text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os from tensorflow.core.protobuf.saved_model_pb2 import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py REPO_PATH = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) INTERNAL_OPS = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def onnx_compliancy(saved_model_path, strict, opset): saved_model = SavedModel() onnx_ops = [] with open(os.path.join(REPO_PATH, "utils", "tf_ops", "onnx.json")) as f: onnx_opsets = json.load(f)["opsets"] for i in range(1, opset + 1): onnx_ops.extend(onnx_opsets[str(i)]) with open(saved_model_path, "rb") as f: saved_model.ParseFromString(f.read()) model_op_names = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def) # Convert to list, sorted if you want model_op_names = sorted(model_op_names) incompatible_ops = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(op) if strict and len(incompatible_ops) > 0: raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops) elif len(incompatible_ops) > 0: print(f"Found the following incompatible ops for the opset {opset}:") print(*incompatible_ops, sep="\n") else: print(f"The saved model {saved_model_path} can properly be converted with ONNX.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) args = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
transformers/utils/check_tf_ops.py/0
{ "file_path": "transformers/utils/check_tf_ops.py", "repo_id": "transformers", "token_count": 1302 }
412
## w/ and w/o gradient accumulation python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name ppo_step_grad_accu --mini_batch_size 1 --gradient_accumulation_steps 128 --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ## w/ different models (gpt2, gpt2-xl, falcon, llama2) python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name ppo_gpt2 --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name ppo_falcon_rw_1b --model_name tiiuae/falcon-rw-1b --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ## w/ and w/o PEFT python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name ppo_peft --use_peft --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template
trl/benchmark/benchmark_level3.sh/0
{ "file_path": "trl/benchmark/benchmark_level3.sh", "repo_id": "trl", "token_count": 689 }
413
# DPO Trainer TRL supports the DPO Trainer for training language models from preference data, as described in the paper [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) by Rafailov et al., 2023. For a full example have a look at [`examples/scripts/dpo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/dpo.py). The first step as always is to train your SFT model, to ensure the data we train on is in-distribution for the DPO algorithm. ## Expected dataset format The DPO trainer expects a very specific format for the dataset. Since the model will be trained to directly optimize the preference of which sentence is the most relevant, given two sentences. We provide an example from the [`Anthropic/hh-rlhf`](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset below: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/rlhf-antropic-example.png", width="50%"> </div> Therefore the final dataset object should contain these 3 entries if you use the default `DPODataCollatorWithPadding` data collator. The entries should be named: - `prompt` - `chosen` - `rejected` for example: ```py dpo_dataset_dict = { "prompt": [ "hello", "how are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "chosen": [ "hi nice to meet you", "I am fine", "My name is Mary", "My name is Mary", "Python", "Python", "Java", ], "rejected": [ "leave me alone", "I am not fine", "Whats it to you?", "I dont have a name", "Javascript", "C++", "C++", ], } ``` where the `prompt` contains the context inputs, `chosen` contains the corresponding chosen responses and `rejected` contains the corresponding negative (rejected) responses. As can be seen a prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays. ## Expected model format The DPO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `DPOTrainer` For a detailed example have a look at the `examples/scripts/dpo.py` script. At a high level we need to initialize the `DPOTrainer` with a `model` we wish to train, a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response, the `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder). ```py dpo_trainer = DPOTrainer( model, model_ref, args=training_args, beta=0.1, train_dataset=train_dataset, tokenizer=tokenizer, ) ``` After this one can then call: ```py dpo_trainer.train() ``` Note that the `beta` is the temperature parameter for the DPO loss, typically something in the range of `0.1` to `0.5`. We ignore the reference model as `beta` -> 0. ## Loss functions Given the preference data, we can fit a binary classifier according to the Bradley-Terry model and in fact the DPO authors propose the sigmoid loss on the normalized likelihood via the `logsigmoid` to fit a logistic regression. The [RSO](https://arxiv.org/abs/2309.06657) authors propose to use a hinge loss on the normalized likelihood from the [SLiC](https://arxiv.org/abs/2305.10425) paper. The `DPOTrainer` can be switched to this loss via the `loss_type="hinge"` argument and the `beta` in this case is the reciprocal of the margin. The [IPO](https://arxiv.org/abs/2310.12036) authors provide a deeper theoretical understanding of the DPO algorithms and identify an issue with overfitting and propose an alternative loss which can be used via the `loss_type="ipo"` argument to the trainer. Note that the `beta` parameter is the reciprocal of the gap between the log-likelihood ratios of the chosen vs the rejected completion pair and thus the smaller the `beta` the larger this gaps is. As per the paper the loss is averaged over log-likelihoods of the completion (unlike DPO which is summed only). The [cDPO](https://ericmitchell.ai/cdpo.pdf) is a tweak on the DPO loss where we assume that the preference labels are noisy with some probability that can be passed to the `DPOTrainer` via `label_smoothing` argument (between 0 and 0.5) and then a conservative DPO loss is used. Use the `loss_type="cdpo"` argument to the trainer to use it. The [KTO](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf) loss is derived to directly maximize the utility of LLM generations instead of the log-likelihood of preferences. Thus the dataset are not necessarily preferences but rather desirable vs undesirable completions. For paired preference data as required by the `DPOTrainer`, use the `loss_type="kto_pair"` argument to the trainer to utilize this loss, while for the more general case of desired and undesirable data, use the as of yet unimplemented `KTOTrainer`. ## Logging While training and evaluating we record the following reward metrics: * `rewards/chosen`: the mean difference between the log probabilities of the policy model and the reference model for the chosen responses scaled by beta * `rewards/rejected`: the mean difference between the log probabilities of the policy model and the reference model for the rejected responses scaled by beta * `rewards/accuracies`: mean of how often the chosen rewards are > than the corresponding rejected rewards * `rewards/margins`: the mean difference between the chosen and corresponding rejected rewards ## Accelerate DPO fine-tuning using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks for DPO listed below: | GPU | Model | Dataset | πŸ€— | πŸ€— + Flash Attention 2 | πŸ¦₯ Unsloth | πŸ¦₯ VRAM saved | |----------|-----------------|-----------|------|------------------------|-----------------|----------------| | A100 40G | Zephyr 7b | Ultra Chat| 1x | 1.24x | **1.88x** | -11.6% | | Tesla T4 | Zephyr 7b | Ultra Chat| 1x | 1.09x | **1.55x** | -18.6% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from transformers import TrainingArguments from trl import DPOTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number. # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/zephyr-sft", max_seq_length = max_seq_length, dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False. # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r = 16, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, lora_dropout = 0, # Dropout = 0 is currently optimized bias = "none", # Bias = "none" is currently optimized use_gradient_checkpointing = True, random_state = 3407, ) training_args = TrainingArguments(output_dir="./output") dpo_trainer = DPOTrainer( model, model_ref=None, args=training_args, beta=0.1, train_dataset=train_dataset, tokenizer=tokenizer, ) dpo_trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Reference model considerations with PEFT You have three main options (plus several variants) for how the reference model works when using PEFT, assuming the model that you would like to further enhance with DPO was tuned using (Q)LoRA. 1. Simply create two instances of the model, each loading your adapter - works fine but is very inefficient. 2. Merge the adapter into the base model, create another adapter on top, then leave the `model_ref` param null, in which case DPOTrainer will unload the adapter for reference inference - efficient, but has potential downsides discussed below. 3. Load the adapter twice with different names, then use `set_adapter` during training to swap between the adapter being DPO'd and the reference adapter - slightly less efficient compared to 2 (~adapter size VRAM overhead), but avoids the pitfalls. ### Downsides to merging QLoRA before DPO (approach 2) As suggested by [Tim Dettmers](https://twitter.com/Tim_Dettmers/status/1694654191325573456), the best option for merging QLoRA adapters is to first quantize the base model, merge the adapter, then convert back to bf16. Something similar to [this script](https://github.com/jondurbin/qlora/blob/main/qmerge.py) You can also just merge the adapters the standard way without quantizing the base model, but then you have 1-2% reduced performance (and evidently, more issues with empty responses). If you use the recommended approach, which quantizes the model, you're now in a situation where to use QLoRA for DPO, you will need to re-quantize the merged model again or use an unquantized merge with lower overall performance. ### Using option 3 - load the adapter twice To avoid the downsides with option 2, at the expense of slightly increased VRAM, you can load your fine-tuned adapter into the model twice, with different names, and set the model/ref adapter names in DPOTrainer. For example: ```python # Load the base model. bnb_config = BitsAndBytesConfig( load_in_4bit=True, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) model = AutoModelForCausalLM.from_pretrained( "mistralai/mixtral-8x7b-v0.1", load_in_4bit=True, quantization_config=bnb_config, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16, device_map="auto", ) model.config.use_cache = False # Load the adapter. model = PeftModel.from_pretrained( model, "/path/to/peft", is_trainable=True, adapter_name="train", ) # Load the adapter a second time, with a different name, which will be our reference model. model.load_adapter("/path/to/peft", adapter_name="reference") # Initialize the trainer, without a ref_model param. dpo_trainer = DPOTrainer( model, ... model_adapter_name="train", ref_adapter_name="reference", ) ``` ## DPOTrainer [[autodoc]] DPOTrainer
trl/docs/source/dpo_trainer.mdx/0
{ "file_path": "trl/docs/source/dpo_trainer.mdx", "repo_id": "trl", "token_count": 3648 }
414
# Text Environments Text environments provide a learning ground for language agents. It allows a language model to use tools to accomplish a task such as using a Python interpreter to answer math questions or using a search index for trivia questions. Having access to tools allows language models to solve tasks that would be very hard for the models itself but can be trivial for the appropriate tools. A good example is arithmetics of large numbers that become a simple copy-paste task once you have access to a calculator. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv.png"> </div> Let's dive into how text environments work and start with tools! ## Tools One of the core building blocks of text environments are tools that the model can use to solve tasks. In general tools can be any Python function that takes a string as input and returns string. The `TextEnvironment` offers two options for tools: either go with predefined tools from `transformers.Tool` or define your own function or class with `__call__` method. Let's have a look at both! ### `transformers.Tool` Text environments fully support tools of the class `transformers.Tool`. The advantage of building tools in that framework is that they can easily be shared ```Python from transformers import load_tool # simple calculator tool that runs +-/* operations calc_tool = load_tool("ybelkada/simple-calculator") # python interpreter that executes program and returns outputs py_tool = load_tool("lvwerra/python-interpreter") # wikipedia search index that returns best search match wiki_tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") ``` These tools are either loaded from the hub or from a local folder. Using the tool is as simple as calling them with a text query: ```Python calc_tool("1/2") >>> "0.5" ``` Note that both input and return values are strings to enable easy usage with a language model. ### Custom Tools The following is an example of a tool that adds two integers: ```Python def add(text): int_1, int_2 = text.split("+") result = int(int_1) + int(int_2) return str(result) print(add("1+1")) >>> "2" ``` We looked at basic examples such as a calculator but the principle holds for more complex tools as well such as a web search tool where you input the query and get the search results in return. Now let's look at how the model can use the tools with the call syntax. ### Call syntax In order to have a unified way for the model to call a tool we created a simple syntax that looks as follows: ```python "<request><TOOL_NAME>QUERY<call>TOOL_RESPONSE<response>" ``` There are a few special tokens involved so let's decompose it: First the model can signal that it wants to use a tool by emitting the `<request>` token. After that we want to know the name of the tool to call which is done by enclosing the tool name with `<>` brackets. Once we know which tool to call the tool query follows which is in free text form. The `<call>` tokens signifies the end of the query and stops the model generation. At this point the model output is parsed and the query sent to the tool. The environment appends the tool response to the string followed by the `<response>` token to show the end the tool output. Let's look at the concrete example of the calculator and assume its name is `Calculator` (more on how the name of a tool is inferred later): ```python "<request><Calculator>1/2<call>0.5<response>" ``` Finally, the episode is ended and generation stops when the model generates `<submit>` which marks the interaction as completed. Now let's have a look how we can create a new text environment! ## Create a `TextEnvironment` ```python prompt = """\ What is 13-3? <request><SimpleCalculatorTool>13-3<call>10.0<response> Result=10<submit> """ def reward_fn(result, answer): """Simplified reward function returning 1 if result matches answer and 0 otherwise.""" result_parsed = result.split("=")[1].split("<")[0] return int(result_parsed==answer) text_env = TextEnvironemnt( model=model, tokenizer=tokenizer, tools= {"SimpleCalculatorTool": load_tool("ybelkada/simple-calculator")}, reward_fn=exact_match_reward, prompt=prompt, max_turns=1 max_tool_response=100 generation_kwargs={"do_sample": "true"} ) ``` Let's decompose the settings: | Argument | Description | |:-------------------|:----------------| | `model` | Language model to interact with the environment and generate requests. | | `tokenizer` | Tokenizer of language model handling tokenization of strings. | | `tools` | `list` of `dict` of tools. If former the name of the tool is inferred from class name and otherwise it's the keys of the dictionary.| | `reward_fn` | A function that takes a string as input and returns. Can have extra arguments that are passed to `.run()` such as ground truth.| | `prompt` | Prompt to prepend to every task. Usually a few examples to demonstrate to the model how to use the tools in a few-shot fashion. | | `max_turns` | Maximum number of interactions between model and tools before episode ends.| | `max_tool_response`| The tool response is truncated to this number to avoid running out of model context.| | `max_length` | The maximum number of tokens to allow in an episode. | | `generation_kwargs`| Generation settings used by the language model. | You can customize the environment to your needs and add custom tools and settings. Let's see how you can use the environment to have the model interact with the available tools! ## Run an Episode To run a set of queries through the text environment one can simply use the `run` method. ```python queries = ["What is 1/2?"] answers = ["0.5"] queries, responses, masks, rewards, histories = text_env.run(queries, answers=answers) ``` This will execute the model/tool feedback loop for each query until either no tool is called anymore, the maximum number of turns is reached or to maximum number of tokens in an episode is exceeded. The extra `kwargs` (e.g. `answers=answers` above) passed to `run` will be passed on to the reward function. There are five objects that are returned by `run`: - `queries`: a list of the tokenized queries - `responses`: all tokens that have been generated withing the environment including model and tool tokens - `masks`: mask that indicates which tokens have been generated by the model and which tokens are generated by the tool - `rewards`: a list of reward for each query/response - `histories`: list of `TextHistory` objects, which are useful objects containing all the above and also the text equivalents The masks are crucial for training as we don't want to optimize tokens that the model has not generated which are tokens produced by the tools. Next, we'll train a PPO step with the generated responses! ### Train Training on episodes from the `TextEnvironment` is straight forward and simply requires forwarding all the returned variables except the `TextHistory` objects to the `step` method: ```python train_stats = ppo_trainer.step(queries, responses, rewards, masks) ``` ## `TextHistory` The `TextHistory` object stores the interactions between the model and the text environment. It stores tokens and text generated in each turn and their source in each turn (model or system) as well as rewards. Let's go through the class attributes and methods. ### Attributes The following table summarises the available attributes of the `TextEnvironment` class: | Attribute | Description | |:-------------------|:----------------| | `text` | The full string of the text generated in the text environment with both model and system generated text. | | `text_spans` | A list of tuples with the spans for each model or system generated text segment. | | `system_spans` | A list of boolean values indicating if the segment is model or system generated. | | `tokens` | All tokens generated in text environment with both model and system generated tokens. | | `token_spans` | Similar to `text_spans` the `token_spans` indicate the boundaries of model andsystem generated tokens. | | `token_masks` | The token masks can be used to ignore system generated tokens by masking them. | | `completed` | Indicates if the interaction with the environment has completed. | | `truncated` | Indicates if the interaction with the environment has completed because max length was reached. | With these attributes you can reconstruct every interaction of the model with the `TextEnvironment`. The `TextHistory` also lets you visualize the text history. Let's have a look! ### Visualization When the model interacts inside the `TextEnvironment` it can be useful to visualize and separate which parts of the text outputs were generated by the model and which parts come from the system and tools. For that purpose there are the two methods [`TextHistory.show_text`] and [`TextHistory.show_tokens`]. They print the text and tokens respectively and highlight the various segments using the [`rich` libray](https://github.com/Textualize/rich) (make sure to install it before using these methods). You can see that the prompt is highlighted in gray, whereas system segments such as query and tool responses are highlighted in green. All segments generated by the model are highlighted in blue and in addition to the pure text output the reward is displayed as additional text in plum. Here an example of `show_text`: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv_show_text.png" width=600> </div> Sometimes there can be tricky tokenization related issues that are hidden when showing the decoded text. Thus `TextHistory` also offers an option to display the same highlighting on the tokens directly with `show_tokens`: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv_show_tokens.png" width=800> </div> Note that you can turn on the colour legend by passing `show_legend=True`. ## API Documentation [[autodoc]] TextEnvironment [[autodoc]] TextHistory
trl/docs/source/text_environments.md/0
{ "file_path": "trl/docs/source/text_environments.md", "repo_id": "trl", "token_count": 2826 }
415
# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model. There were three main steps to the training process: 1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path=<LLAMA_MODEL_PATH> --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se` 2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=<LLAMA_SE_MODEL>` 3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model: - `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name=<LLAMA_SE_MODEL> --reward_model_name=<LLAMA_SE_RM_MODEL> --adafactor=False --tokenizer_name=<LLAMA_TOKENIZER> --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam` LoRA layers were using at all stages to reduce memory requirements. At each stage the peft adapter layers were merged with the base model, using: ```shell python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ ``` Note that this script requires `peft>=0.3.0`. For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform).
trl/examples/research_projects/stack_llama/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/README.md", "repo_id": "trl", "token_count": 696 }
416
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # regular: python examples/scripts/dpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/dpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ from dataclasses import dataclass, field from typing import Dict, Optional import torch from datasets import Dataset, load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments from trl import DPOTrainer, ModelConfig, get_kbit_device_map, get_peft_config, get_quantization_config @dataclass class ScriptArguments: beta: float = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"}) max_length: int = field(default=512, metadata={"help": "max length of each sample"}) max_prompt_length: int = field(default=128, metadata={"help": "max length of each sample's prompt"}) max_target_length: int = field( default=128, metadata={"help": "Only used for encoder decoder model. Max target of each sample's prompt"} ) sanity_check: bool = field(default=True, metadata={"help": "only train on 1000 samples"}) ignore_bias_buffers: bool = field( default=False, metadata={ "help": "debug argument for distributed training;" "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" }, ) generate_during_eval: bool = field(default=False, metadata={"help": "Generate during evaluation"}) def extract_anthropic_prompt(prompt_and_response): """Extract the anthropic prompt from a prompt and response pair.""" search_term = "\n\nAssistant:" search_term_idx = prompt_and_response.rfind(search_term) assert search_term_idx != -1, f"Prompt and response does not contain '{search_term}'" return prompt_and_response[: search_term_idx + len(search_term)] def get_hh(split: str, sanity_check: bool = False, silent: bool = False, cache_dir: Optional[str] = None) -> Dataset: """Load the Anthropic Helpful-Harmless dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], 'chosen': List[str], 'rejected': List[str], } Prompts should be structured as follows: \n\nHuman: <prompt>\n\nAssistant: Multiple turns are allowed, but the prompt should always start with \n\nHuman: and end with \n\nAssistant:. """ dataset = load_dataset("Anthropic/hh-rlhf", split=split, cache_dir=cache_dir) if sanity_check: dataset = dataset.select(range(min(len(dataset), 1000))) def split_prompt_and_responses(sample) -> Dict[str, str]: prompt = extract_anthropic_prompt(sample["chosen"]) return { "prompt": prompt, "chosen": sample["chosen"][len(prompt) :], "rejected": sample["rejected"][len(prompt) :], } return dataset.map(split_prompt_and_responses) if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, TrainingArguments, ModelConfig)) args, training_args, model_config = parser.parse_args_into_dataclasses() ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) peft_config = get_peft_config(model_config) if peft_config is None: model_ref = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) else: model_ref = None tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token if args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] ################ # Dataset ################ train_dataset = get_hh("train", sanity_check=args.sanity_check) eval_dataset = get_hh("test", sanity_check=args.sanity_check) ################ # Training ################ trainer = DPOTrainer( model, model_ref, args=training_args, beta=args.beta, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, max_length=args.max_length, max_target_length=args.max_target_length, max_prompt_length=args.max_prompt_length, generate_during_eval=args.generate_during_eval, peft_config=get_peft_config(model_config), ) trainer.train() trainer.save_model(training_args.output_dir)
trl/examples/scripts/dpo.py/0
{ "file_path": "trl/examples/scripts/dpo.py", "repo_id": "trl", "token_count": 2629 }
417
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import PyTorchModelHubMixin class BaseTrainer(PyTorchModelHubMixin): r""" Base class for all trainers - this base class implements the basic functions that we need for a trainer. The trainer needs to have the following functions: - step: takes in a batch of data and performs a step of training - loss: takes in a batch of data and returns the loss - compute_rewards: takes in a batch of data and returns the rewards - _build_models_and_tokenizer: builds the models and tokenizer - _build_dataset: builds the dataset Each user is expected to implement their own trainer class that inherits from this base if they want to use a new training algorithm. """ def __init__(self, config): self.config = config def step(self, *args): raise NotImplementedError("Not implemented") def loss(self, *args): raise NotImplementedError("Not implemented") def compute_rewards(self, *args): raise NotImplementedError("Not implemented") def _save_pretrained(self, save_directory): raise NotImplementedError("Not implemented")
trl/trl/trainer/base.py/0
{ "file_path": "trl/trl/trainer/base.py", "repo_id": "trl", "token_count": 538 }
418
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [email protected]. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
accelerate/CODE_OF_CONDUCT.md/0
{ "file_path": "accelerate/CODE_OF_CONDUCT.md", "repo_id": "accelerate", "token_count": 1107 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Launching Multi-GPU Training from a Jupyter Environment This tutorial teaches you how to fine tune a computer vision model with πŸ€— Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. <Tip> This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) </Tip> ## Configuring the Environment Before any training can be performed, a πŸ€— Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` However, if general defaults are fine and you are *not* running on a TPU, πŸ€—Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. <Tip warning={true}> CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. </Tip> ```python import os from accelerate.utils import write_basic_config write_basic_config() # Write a config file os._exit(00) # Restart the notebook ``` ## Preparing the Dataset and Model Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) ```python import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model ``` First you need to create a function to extract the class name based on a filename: ```python import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname) ``` ```python out beagle_32.jpg ``` In the case here, the label is `beagle`. Using regex you can extract the label from the filename: ```python import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] ``` ```python extract_label(fname) ``` And you can see it properly returned the right name for our file: ```python out "beagle" ``` Next a `Dataset` class should be made to handle grabbing the image and the label: ```python class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} ``` Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the launched function: ```python fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] ``` Next gather all the labels: ```python all_labels = [extract_label(fname) for fname in fnames] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} ``` Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. ```python def get_dataloaders(batch_size: int = 64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training a simple RandomResizedCrop will be used train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) # For evaluation a deterministic Resize will be used eval_tfm = Compose([Resize((224, 224)), ToTensor()]) eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) return train_dataloader, eval_dataloader ``` Finally, you should import the scheduler to be used later: ```python from torch.optim.lr_scheduler import CosineAnnealingLR ``` ## Writing the Training Function Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. Here is a basic training loop for the animal classification problem: <Tip> The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end </Tip> ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) accelerator = Accelerator(mixed_precision=mixed_precision) ``` First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. <Tip warning={true}> If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) to learn why </Tip> Next you should build your dataloaders and create your model: ```python train_dataloader, eval_dataloader = get_dataloaders(batch_size) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) ``` <Tip> You build the model here so that the seed also controls the new weight initialization </Tip> As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be trained only initially: ```python for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True ``` Normalizing the batches of images will make training a little faster: ```python mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] ``` To make these constants available on the active device, you should set it to the Accelerator's device: ```python mean = mean.to(accelerator.device) std = std.to(accelerator.device) ``` Next instantiate the rest of the PyTorch classes used for training: ```python optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) ``` Before passing everything to [`~Accelerator.prepare`]. <Tip> There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. </Tip> ```python model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) ``` Now train the model: ```python for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall total accuracy of each batch will be added to two constants: ```python model.eval() accurate = 0 num_elems = 0 ``` Next you have the rest of your standard PyTorch loop: ```python for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) ``` Before finally the last major difference. When performing distributed evaluation, the predictions and labels need to be passed through [`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: ```python accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() ``` Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: ```python eval_metric = accurate.item() / num_elems accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` A full version of this training loop is available below: ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # Instantiate the model (you build the model here so that the seed also controls new weight initaliziations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # You can normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make these constants available on the active device, set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Instantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now you train the model for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` ## Using the notebook_launcher All that's left is to use the [`notebook_launcher`]. You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) ```python from accelerate import notebook_launcher ``` ```python args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2) ``` In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time. For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so: ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8) ``` And in the second Jupyter session on the other machine: <Tip> Notice how the `node_rank` has changed </Tip> ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8) ``` In the case of running on the TPU, it would look like so: ```python model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) args = (model, "fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=8) ``` As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: ```python out Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71 ``` And that's it! ## Debugging A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong, you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards). ## Conclusion This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: - Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] - Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) - If using the TPU, declare your model outside the training loop function
accelerate/docs/source/basic_tutorials/notebook.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/notebook.md", "repo_id": "accelerate", "token_count": 5538 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fully Sharded Data Parallel To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/). We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config. ## How it works out of the box On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled: ```bash compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: false fsdp_cpu_ram_efficient_loading: true fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py ``` Currently, `Accelerate` supports the following config through the CLI: `fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy) `fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU `fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP `fsdp_transformer_layer_cls_to_wrap`: Only applicable for πŸ€— Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for πŸ€— Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. `fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`. `fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH `fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature. `fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT `fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP. `fsdp_cpu_ram_efficient_loading`: Only applicable for πŸ€— Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained πŸ€— Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. `fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`. When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them. The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that. Below is an example: ```py from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) accelerator = Accelerator(fsdp_plugin=fsdp_plugin) ``` ## Saving and loading The new recommended way of checkpointing when using FSDP models is to use `SHARDED_STATE_DICT` as `StateDictType` when setting up the accelerate config. Below is the code snippet to save using `save_state` utility of accelerate. ```py accelerator.save_state("ckpt") ``` Inspect the checkpoint folder to see model and optimizer as shards per process: ``` ls ckpt # optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin cd ckpt ls optimizer_0 # __0_0.distcp __1_0.distcp ls pytorch_model_0 # __0_0.distcp __1_0.distcp ``` To load them back for resuming the training, use the `load_state` utility of accelerate ```py accelerator.load_state("ckpt") ``` When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict. Below is an example: ```diff unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), ) ``` ### State Dict `accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU. You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html). ## A few caveats to be aware of - In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. - This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of πŸ€— `Transformers` library. For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
accelerate/docs/source/usage_guides/fsdp.md/0
{ "file_path": "accelerate/docs/source/usage_guides/fsdp.md", "repo_id": "accelerate", "token_count": 2785 }
2
{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/examples/deepspeed_config_templates/zero_stage3_config.json/0
{ "file_path": "accelerate/examples/deepspeed_config_templates/zero_stage3_config.json", "repo_id": "accelerate", "token_count": 657 }
3
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import contextlib import functools import json import math import os import re import shutil import sys import warnings from collections import OrderedDict from contextlib import contextmanager from functools import partial from types import MethodType from typing import Any, Callable, Union import torch import torch.utils.hooks as hooks from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches from .hooks import AlignDevicesHook from .logging import get_logger from .optimizer import AcceleratedOptimizer from .scheduler import AcceleratedScheduler from .state import AcceleratorState, GradientState, PartialState from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers from .utils import ( MODEL_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, AutocastKwargs, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FP8RecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, TorchDynamoPlugin, check_os_kernel, clean_state_dict_for_safetensors, compare_versions, convert_model, convert_outputs_to_fp32, extract_model_from_parallel, gather, gather_object, get_mixed_precision_context_manager, get_pretty_name, has_transformer_engine_layers, is_bf16_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_megatron_lm_available, is_msamp_available, is_npu_available, is_torch_version, is_tpu_available, is_xpu_available, load_fsdp_model, load_fsdp_optimizer, pad_across_processes, parse_choice_from_env, recursively_apply, reduce, release_memory, save, save_fsdp_model, save_fsdp_optimizer, shard_checkpoint, wait_for_everyone, ) from .utils.constants import FSDP_PYTORCH_VERSION from .utils.modeling import get_state_dict_offloaded_model from .utils.other import is_compiled_module if is_deepspeed_available(): from .utils import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, ) if is_fp8_available(): import transformer_engine.common.recipe as te_recipe from transformer_engine.pytorch import fp8_autocast if is_megatron_lm_available(): from .utils import ( MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, megatron_lm_initialize, megatron_lm_prepare_data_loader, megatron_lm_prepare_model, megatron_lm_prepare_optimizer, megatron_lm_prepare_scheduler, ) from torch.distributed.algorithms.join import Join if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.distributed.xla_multiprocessing as xmp if is_npu_available(check_device=False): import torch_npu # noqa: F401 try: from torch.optim.lr_scheduler import LRScheduler except ImportError: from torch.optim.lr_scheduler import _LRScheduler as LRScheduler logger = get_logger(__name__) class Accelerator: """ Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. Args: device_placement (`bool`, *optional*, defaults to `True`): Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, etc...). split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. mixed_precision (`str`, *optional*): Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8' requires the installation of transformers-engine. gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. If not passed, will default to the value in the environment variable `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. deepspeed_plugin (`DeepSpeedPlugin`, *optional*): Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured directly using *accelerate config* fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*): Tweak your FSDP related args using this argument. This argument is optional and can be configured directly using *accelerate config* megatron_lm_plugin (`MegatronLMPlugin`, *optional*): Tweak your MegatronLM related args using this argument. This argument is optional and can be configured directly using *accelerate config* rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration in your prepared dataloaders. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): A list of loggers to be setup for experiment tracking. Should be one or several of: - `"all"` - `"tensorboard"` - `"wandb"` - `"comet_ml"` If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. project_config (`ProjectConfiguration`, *optional*): A configuration for how saving the state can be handled. project_dir (`str`, `os.PathLike`, *optional*): A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved checkpoints. dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether or not use a fully seedable random sampler ([`~data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducable using a different sampling technique. While seed-to-seed results may differ, on average the differences are neglible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`): Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only done under certain circumstances (at the end of each epoch, for instance). kwargs_handlers (`list[KwargHandler]`, *optional*) A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision are created. See [kwargs](kwargs) for more information. dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `"no"`): Set to one of the possible dynamo backends to optimize your training with torch dynamo. gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*): A configuration for how gradient accumulation should be handled, if more tweaking than just the `gradient_accumulation_steps` is needed. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. - **local_process_index** (`int`) -- The process index on the current machine. - **mixed_precision** (`str`) -- The configured mixed precision mode. - **num_processes** (`int`) -- The total number of processes used for training. - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which case the learning rate should not be changed. - **process_index** (`int`) -- The overall index of the current process among all processes. - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. """ def __init__( self, device_placement: bool = True, split_batches: bool = False, mixed_precision: PrecisionType | str | None = None, gradient_accumulation_steps: int = 1, cpu: bool = False, deepspeed_plugin: DeepSpeedPlugin | None = None, fsdp_plugin: FullyShardedDataParallelPlugin | None = None, megatron_lm_plugin: MegatronLMPlugin | None = None, rng_types: list[str | RNGType] | None = None, log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None, project_dir: str | os.PathLike | None = None, project_config: ProjectConfiguration | None = None, gradient_accumulation_plugin: GradientAccumulationPlugin | None = None, dispatch_batches: bool | None = None, even_batches: bool = True, use_seedable_sampler: bool = False, step_scheduler_with_optimizer: bool = True, kwargs_handlers: list[KwargsHandler] | None = None, dynamo_backend: DynamoBackend | str | None = None, ): self.trackers = [] if project_config is not None: self.project_configuration = project_config else: self.project_configuration = ProjectConfiguration(project_dir=project_dir) if project_dir is not None and self.project_dir is None: self.project_configuration.set_directories(project_dir) if mixed_precision is not None: mixed_precision = str(mixed_precision) if mixed_precision not in PrecisionType: raise ValueError( f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" ) dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend) if deepspeed_plugin is None: # init from env variables deepspeed_plugin = ( DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None ) else: assert isinstance( deepspeed_plugin, DeepSpeedPlugin ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object." os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided if deepspeed_plugin: if not is_deepspeed_available(): raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") if compare_versions("deepspeed", "<", "0.9.3"): raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.") mixed_precision = ( os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision ) deepspeed_plugin.set_mixed_precision(mixed_precision) deepspeed_plugin.set_deepspeed_weakref() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance( fsdp_plugin, FullyShardedDataParallelPlugin ): if is_torch_version("<", FSDP_PYTORCH_VERSION): raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}") if fsdp_plugin is None: # init from env variables fsdp_plugin = ( FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None ) else: if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided if megatron_lm_plugin is None: # init from env variables megatron_lm_plugin = ( MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None ) else: if not isinstance(megatron_lm_plugin, MegatronLMPlugin): raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided if megatron_lm_plugin: if not is_megatron_lm_available(): raise ImportError("Megatron is not installed. please build it from source.") # Kwargs handlers self.ddp_handler = None self.scaler_handler = None self.init_handler = None self.fp8_recipe_handler = None self.autocast_handler = None if kwargs_handlers is not None: for handler in kwargs_handlers: assert isinstance( handler, KwargsHandler ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." if isinstance(handler, DistributedDataParallelKwargs): if self.ddp_handler is not None: raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.") else: self.ddp_handler = handler elif isinstance(handler, GradScalerKwargs): if self.scaler_handler is not None: raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.") else: self.scaler_handler = handler elif isinstance(handler, InitProcessGroupKwargs): if self.init_handler is not None: raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.") else: self.init_handler = handler elif isinstance(handler, FP8RecipeKwargs): if self.fp8_recipe_handler is not None: raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.") else: self.fp8_recipe_handler = handler elif isinstance(handler, AutocastKwargs): if self.autocast_handler is not None: raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.") else: self.autocast_handler = handler if self.fp8_recipe_handler is None and mixed_precision == "fp8": self.fp8_recipe_handler = FP8RecipeKwargs() kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} self.state = AcceleratorState( mixed_precision=mixed_precision, cpu=cpu, dynamo_plugin=dynamo_plugin, deepspeed_plugin=deepspeed_plugin, fsdp_plugin=fsdp_plugin, megatron_lm_plugin=megatron_lm_plugin, _from_accelerator=True, **kwargs, ) trackers = filter_trackers(log_with, self.logging_dir) if len(trackers) < 1 and log_with is not None: warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") self.log_with = trackers if ( (mixed_precision != "bf16") and getattr(self.state, "downcast_bfloat", False) and (self.state.distributedType != DistributedType.TPU) ): raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") if gradient_accumulation_plugin is not None: if gradient_accumulation_steps != 1: raise ValueError( "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object." ) else: gradient_accumulation_steps = int( parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps) ) gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps) self.gradient_state = GradientState( gradient_accumulation_plugin=gradient_accumulation_plugin, ) if self.state.distributed_type == DistributedType.TPU: if self.gradient_state.num_steps != 1: raise ValueError( "Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object." ) self.device_placement = device_placement self.split_batches = split_batches self.dispatch_batches = dispatch_batches self.even_batches = even_batches self.use_seedable_sampler = use_seedable_sampler self.step_scheduler_with_optimizer = step_scheduler_with_optimizer # Mixed precision attributes self.scaler = None self.native_amp = False err = "{mode} mixed precision requires {requirement}" if ( self.state.mixed_precision == "fp16" and self.device.type != "cpu" and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) ): self.native_amp = True if self.device.type not in ("xpu", "cuda", "mps", "npu"): raise ValueError(err.format(mode="fp16", requirement="a GPU")) kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} if self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler self.scaler = ShardedGradScaler(**kwargs) elif is_npu_available(): self.scaler = torch.npu.amp.GradScaler(**kwargs) else: self.scaler = torch.cuda.amp.GradScaler(**kwargs) elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM, ): if self.device.type in ["cpu", "xpu"]: self.native_amp = True else: self.native_amp = is_bf16_available(True) if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available(): raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device.")) # Start of internal step tracking self.step = 0 # Internal references to the training objects self._optimizers = [] self._models = [] self._schedulers = [] self._dataloaders = [] self._custom_objects = [] # Hooks self._load_model_state_pre_hook = OrderedDict() self._save_model_state_pre_hook = OrderedDict() # RNG Types self.rng_types = rng_types if self.rng_types is None: self.rng_types = ["generator"] # Set a flag tensor for early stopping and other breakpoints self.flag_tensor = None check_os_kernel() @property def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.state.use_distributed @property def distributed_type(self): return self.state.distributed_type @property def num_processes(self): return self.state.num_processes @property def process_index(self): return self.state.process_index @property def local_process_index(self): return self.state.local_process_index @property def device(self): return self.state.device @property def project_dir(self): return self.project_configuration.project_dir @property def logging_dir(self): return self.project_configuration.logging_dir @property def save_iteration(self): return self.project_configuration.iteration @property def is_main_process(self): """True for one process only.""" return self.state.is_main_process @property def is_local_main_process(self): """True for one process per server.""" return self.state.is_local_main_process @property def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`Accelerator.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self.mixed_precision != "no" @property def is_last_process(self): return self.process_index == self.num_processes - 1 @property def mixed_precision(self): return self.state.mixed_precision @contextmanager def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import Accelerator accelerator = Accelerator() with accelerator.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def on_main_process(self, function: Callable[..., Any] = None): """ A decorator that will run the decorated function on the main process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> @accelerator.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_main_process(function)(*args, **kwargs) return _inner def on_local_main_process(self, function: Callable[..., Any] = None): """ A decorator that will run the decorated function on the local main process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_local_main_process def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_local_main_process(function)(*args, **kwargs) return _inner def on_last_process(self, function: Callable[..., Any]): """ A decorator that will run the decorated function on the last process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_last_process def print_something(): print(f"Printed on process {accelerator.process_index}") print_something() "Printed on process 3" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_last_process(function)(*args, **kwargs) return _inner def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ A decorator that will run the decorated function on a given process index only. Can also be called using the `PartialState` class. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_process(process_index=2) def print_something(): print(f"Printed on process {accelerator.process_index}") print_something() "Printed on process 2" ``` """ # Initial construction of the decorator. if (self is not None) and (process_index is not None) and (function is None): return partial(self.on_process, process_index=process_index) # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_process(function, process_index)(*args, **kwargs) return _inner def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ A decorator that will run the decorated function on a given local process index only. Can also be called using the `PartialState` class. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_local_process(local_process_index=2) def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ # Initial construction of the decorator. if (self is not None) and (local_process_index is not None) and (function is None): return partial(self.on_local_process, local_process_index=local_process_index) # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_local_process(function, local_process_index)(*args, **kwargs) return _inner @contextmanager def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ with self.state.main_process_first(): yield @contextmanager def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.local_process_index}") ``` """ with self.state.local_main_process_first(): yield @contextmanager def no_sync(self, model): """ A context manager to disable gradient synchronizations across DDP processes by calling `torch.nn.parallel.DistributedDataParallel.no_sync`. If `model` is not in DDP, this context manager does nothing Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) >>> input_a = next(iter(dataloader)) >>> input_b = next(iter(dataloader)) >>> with accelerator.no_sync(): ... outputs = model(input_a) ... loss = loss_func(outputs) ... accelerator.backward(loss) ... # No synchronization across processes, only accumulate gradients >>> outputs = model(input_b) >>> accelerator.backward(loss) >>> # Synchronization across all processes >>> optimizer.step() >>> optimizer.zero_grad() ``` """ context = contextlib.nullcontext if self.use_distributed: context = getattr(model, "no_sync", context) with context(): yield @staticmethod @contextmanager def trigger_sync_in_backward(model): """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under `Accelerator.no_sync` (only applicable in multi-GPU scenarios). If the script is not launched in distributed mode, this context manager does nothing. Args: model (`torch.nn.Module`): The model for which to trigger the gradient synchronization. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) >>> with accelerator.no_sync(): ... loss_a = loss_func(model(input_a)) # first forward pass ... loss_b = loss_func(model(input_b)) # second forward pass >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients >>> with accelerator.trigger_sync_in_backward(model): ... accelerator.backward(loss_b) # Synchronization across all processes >>> optimizer.step() >>> optimizer.zero_grad() ``` """ if not isinstance(model, torch.nn.parallel.DistributedDataParallel): yield return old_require_backward_grad_sync = model.require_backward_grad_sync old_require_forward_param_sync = model.require_forward_param_sync # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features. # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466 model.require_backward_grad_sync = True model.require_forward_param_sync = True # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402 model.reducer.prepare_for_backward([]) try: yield finally: model.require_backward_grad_sync = old_require_backward_grad_sync model.require_forward_param_sync = old_require_forward_param_sync def _do_sync(self): "Sets the right `sync_gradients` context and either resets or increases `self.step`" if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader: self.step = 0 self.gradient_state._set_sync_gradients(True) else: self.step += 1 self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0) @property def sync_gradients(self): return self.gradient_state.sync_gradients @sync_gradients.setter def sync_gradients(self, sync_gradients): self.gradient_state.sync_gradients = sync_gradients @property def gradient_accumulation_steps(self): return self.gradient_state.num_steps @gradient_accumulation_steps.setter def gradient_accumulation_steps(self, gradient_accumulation_steps): self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps}) @contextmanager def accumulate(self, *models): """ A context manager that will lightly wrap around and perform gradient accumulation automatically Args: *models (list of `torch.nn.Module`): PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will skip gradient syncing during backward pass in distributed training Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=1) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, output in dataloader: ... with accelerator.accumulate(model): ... outputs = model(input) ... loss = loss_func(outputs) ... loss.backward() ... optimizer.step() ... scheduler.step() ... optimizer.zero_grad() ``` """ self._do_sync() with contextlib.ExitStack() as cm_stack: for m in models: cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m)) yield @contextmanager def join_uneven_inputs(self, joinables, even_batches=None): """ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the length of the dataset. Args: joinables (`list[torch.distributed.algorithms.Joinable]`): A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. even_batches (`bool`, *optional*) If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, the default `Accelerator` value wil be used. <Tip warning={true}> `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other configuration, this method will have no effect. </Tip> <Tip warning={true}> Overidding `even_batches` will not affect iterable-style data loaders. </Tip> Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(even_batches=True) >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): ... for input, output in dataloader: ... outputs = model(input) ... loss = loss_func(outputs) ... loss.backward() ... optimizer.step() ... optimizer.zero_grad() ``` """ if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU): dl_even_batches_values = [] if even_batches is not None: iterable_dl_seen = False # override value in batch sampler for map-style datasets for dl_idx, dl in enumerate(self._dataloaders): if isinstance(dl, DataLoaderDispatcher): iterable_dl_seen = True continue dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) dl.batch_sampler.even_batches = even_batches if iterable_dl_seen: warnings.warn( "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" ) else: even_batches = self.even_batches enable_join = False if even_batches else True try: with Join(joinables, enable=enable_join, throw_on_early_termination=False): yield finally: # reset any batch samplers that have been modified for dl_idx, even_batches_value in dl_even_batches_values: self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value else: # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs if self.distributed_type != DistributedType.NO: warnings.warn( "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." ) with contextlib.nullcontext(joinables): yield def print(self, *args, **kwargs): """ Drop in replacement of `print()` to only print once per server. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> accelerator.print("Hello world!") ``` """ self.state.print(*args, **kwargs) def _prepare_one(self, obj, first_pass=False, device_placement=None): # First pass of preparation: DataLoader, model, optimizer if first_pass: if isinstance(obj, torch.utils.data.DataLoader): return self.prepare_data_loader(obj, device_placement=device_placement) elif isinstance(obj, torch.nn.Module): return self.prepare_model(obj, device_placement=device_placement) elif isinstance(obj, torch.optim.Optimizer): optimizer = self.prepare_optimizer(obj, device_placement=device_placement) return optimizer # Second pass of preparation: LR scheduler (which need the full list of optimizers) elif isinstance(obj, LRScheduler): scheduler = self.prepare_scheduler(obj) return scheduler # Return the unprocessed object if previous criteria was not met return obj def prepare(self, *args, device_placement=None): """ Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same order. Args: *args (list of objects): Any of the following type of objects: - `torch.utils.data.DataLoader`: PyTorch Dataloader - `torch.nn.Module`: PyTorch Module - `torch.optim.Optimizer`: PyTorch Optimizer - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler device_placement (`list[bool]`, *optional*): Used to customize whether automatic device placement should be performed for each object passed. Needs to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP. <Tip> You don't need to prepare a model if you only use it for inference without any kind of mixed precision </Tip> Examples: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model, optimizer, data_loader and scheduler are defined >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler) ``` ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model, optimizer, data_loader and scheduler are defined >>> device_placement = [True, True, False, False] >>> # Will place the first to items passed in automatically to the right device but not the last two. >>> model, optimizer, data_loader, scheduler = accelerator.prepare( ... model, optimizer, data_loader, scheduler, device_placement=device_placement ... ) ``` """ if device_placement is None: device_placement = [None for _ in args] elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") elif len(device_placement) != len(args): raise ValueError( f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." ) for obj in args: # TODO: Look at enabling native TP training directly with a proper config if ( isinstance(obj, torch.nn.Module) and self.verify_device_map(obj) and self.distributed_type != DistributedType.NO and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" ): raise ValueError( "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." ) if self.distributed_type == DistributedType.DEEPSPEED: model_count = 0 for obj in args: if isinstance(obj, torch.nn.Module): model_count += 1 if model_count > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" ) # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will # have parameters disconnected from the model (so no training :-( ). # If the model and optimizer have parameters on different devices we raise an error. if self.distributed_type == DistributedType.TPU: model_device, optimizer_device = self._get_devices() if model_device is not None and optimizer_device is not None and model_device != optimizer_device: raise ValueError( "The model and the optimizer parameters are not on the same device, which probably means you " "created an optimizer around your model **before** putting on the device. Make sure the line " "model.to(device) is before the optimizer creation in your script or remove it entirely and use " "the flag default value for `device_placement` in your `Accelerator` to let it handle that " "part for you." ) # If we're dealing with device placement, this deals with that by... tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): # 1. grabbing old model parameters old_named_params = self._get_named_parameters(*args) if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if self.device.type == "cpu" and self.state.use_ipex: args = self._prepare_ipex(*args) elif self.device.type == "xpu" and is_xpu_available(): args = self._prepare_ipex(*args) if self.distributed_type == DistributedType.DEEPSPEED: result = self._prepare_deepspeed(*args) elif self.distributed_type == DistributedType.MEGATRON_LM: result = self._prepare_megatron_lm(*args) else: if self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "MSAMP": args = self._prepare_msamp(*args) # MS-AMP will handle the device placement device_placement = [False for _ in args] result = tuple( self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) ) result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): # 2. grabbing new model parameters new_named_params = self._get_named_parameters(*result) # 3. building a map from the first to the second mapping = {p: new_named_params[n] for n, p in old_named_params.items()} # 4. using that map to update the parameters of the optimizer for obj in result: if isinstance(obj, torch.optim.Optimizer): obj._switch_parameters(mapping) for item in result: if any( item in container for container in (self._dataloaders, self._models, self._optimizers, self._schedulers) ): setattr(item, "_is_accelerate_prepared", True) return result if len(result) > 1 else result[0] def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False): """ Prepares a PyTorch model for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: model (`torch.nn.Module`): A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without any kind of mixed precision device_placement (`bool`, *optional*): Whether or not to place the model on the proper device. Will default to `self.device_placement`. evaluation_mode (`bool`, *optional*, defaults to `False`): Whether or not to set the model for evaluation only, by just applying mixed precision and `torch.compile` (if configured in the `Accelerator` object). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model is defined >>> model = accelerator.prepare_model(model) ``` """ if device_placement is None: device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP self._models.append(model) # TODO: Look at enabling native TP training directly with a proper config if ( self.verify_device_map(model) and self.distributed_type != DistributedType.NO and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" ): raise ValueError( "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." ) if self.native_amp: model._original_forward = model.forward model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler) new_forward = autocast_context(model_forward_func) if hasattr(model.forward, "__func__"): model.forward = MethodType(new_forward, model) model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) else: model.forward = convert_outputs_to_fp32(new_forward) elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE": if not has_transformer_engine_layers(model): with torch.no_grad(): convert_model(model) model._converted_to_transformer_engine = True model._original_forward = model.forward kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {} if "fp8_format" in kwargs: kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) fp8_recipe = te_recipe.DelayedScaling(**kwargs) model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward) if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr( model, "hf_device_map", False ): model_devices = set(model.hf_device_map.values()) if len(model_devices) > 1 and self.distributed_type != DistributedType.NO: raise ValueError( "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode." " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism." " Therefore you should not specify that you are under any distributed regime in your accelerate config." ) current_device = list(model_devices)[0] current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device if torch.device(current_device_index) != self.device: # if on the first device (GPU 0) we don't care if (self.device.index is not None) or (current_device_index != 0): raise ValueError( "You can't train a model that has been loaded in 8-bit precision on a different device than the one " "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}" ) if "cpu" in model_devices or "disk" in model_devices: raise ValueError( "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload." ) elif device_placement and not self.verify_device_map(model): model = model.to(self.device) if not evaluation_mode: if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, ): if any(p.requires_grad for p in model.parameters()): kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} # TODO: Look at enabling native TP training directly with a proper config if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true": device_ids, output_device = [self.local_process_index], self.local_process_index else: device_ids, output_device = None, None model = torch.nn.parallel.DistributedDataParallel( model, device_ids=device_ids, output_device=output_device, **kwargs ) elif self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, # don't wrap it again # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it # is a FSDP model, don't wrap it again is_type_fsdp = isinstance(model, FSDP) or ( is_compiled_module(model) and isinstance(model._orig_mod, FSDP) ) if not is_type_fsdp: self.state.fsdp_plugin.set_auto_wrap_policy(model) fsdp_plugin = self.state.fsdp_plugin kwargs = { "sharding_strategy": fsdp_plugin.sharding_strategy, "cpu_offload": fsdp_plugin.cpu_offload, "auto_wrap_policy": fsdp_plugin.auto_wrap_policy, "mixed_precision": fsdp_plugin.mixed_precision_policy, "sync_module_states": fsdp_plugin.sync_module_states, "backward_prefetch": fsdp_plugin.backward_prefetch, "forward_prefetch": fsdp_plugin.forward_prefetch, "use_orig_params": fsdp_plugin.use_orig_params, "param_init_fn": fsdp_plugin.param_init_fn, "ignored_modules": fsdp_plugin.ignored_modules, "limit_all_gathers": fsdp_plugin.limit_all_gathers, "device_id": self.device, } model = FSDP(model, **kwargs) if fsdp_plugin.activation_checkpointing: from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper, ) apply_activation_checkpointing( model, checkpoint_wrapper_fn=functools.partial( checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT, ), auto_wrap_policy=fsdp_plugin.auto_wrap_policy, ) # if the previous and current models are same, delete the previous one if len(self._models) > 1 and (self._models[-2] is self._models[-1]): del self._models[-2] self._models[-1] = model elif self.distributed_type == DistributedType.MULTI_CPU: kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) elif self.distributed_type == DistributedType.TPU and self.state.fork_launched: model = xmp.MpModelWrapper(model).to(self.device) # torch.compile should be called last and only if the model isn't already compiled. if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): if not is_torch_version(">=", "2.0"): raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.") model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) return model def _prepare_deepspeed(self, *args): import deepspeed deepspeed_plugin = self.state.deepspeed_plugin is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args) result = [ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args ] if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"): if is_dataloader_present: batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] if any(bs is None for bs in batch_sizes): raise ValueError( "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. " "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." ) if self.split_batches: batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) if len(batch_sizes) > 1: logger.info( "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." ) else: raise ValueError( "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " "with `batch_size` attribute returning an integer value " "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." ) else: batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu") # handle `gradient_accumulation_steps` when the value is `auto` deepspeed_plugin.fill_match( "gradient_accumulation_steps", must_match=False, gradient_accumulation_steps=self.gradient_accumulation_steps, ) config_kwargs = { "train_micro_batch_size_per_gpu": batch_size_per_device, "train_batch_size": batch_size_per_device * deepspeed_plugin.get_value("gradient_accumulation_steps") * self.num_processes, "gradient_clipping": 1.0, "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, } model = None optimizer = None scheduler = None for obj in result: if isinstance(obj, torch.nn.Module): model = obj elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): optimizer = obj elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES ): scheduler = obj if optimizer is not None: if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): raise ValueError( "You cannot specify an optimizer in the config file and in the code at the same time. " "Please remove the optimizer from the config file or " "create `accelerate.utils.DummyOptim` in the code." ) elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): raise ValueError( "You cannot create a `DummyOptim` without specifying an optimizer in the config file." ) if isinstance(optimizer, (torch.optim.Optimizer)): deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True if scheduler is not None: if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): raise ValueError( "You cannot specify a scheduler in the config file and in the code at the same time. " "Please remove the scheduler from the config file or " "create `accelerate.utils.DummyScheduler` in the code." ) elif ( "scheduler" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None ): raise ValueError( "Either specify a scheduler in the config file or " "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`." ) if optimizer is not None and scheduler is not None: if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): raise ValueError( "You can only specify `accelerate.utils.DummyScheduler` in the code when using " "`accelerate.utils.DummyOptim`." ) if model is not None: # deal with config keys that use `auto` value and rely on model's hidden_size hidden_size_based_keys = [ "zero_optimization.reduce_bucket_size", "zero_optimization.stage3_prefetch_bucket_size", "zero_optimization.stage3_param_persistence_threshold", ] hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)] if len(hidden_size_auto_keys) > 0: reasoning = ( "therefore it's not possible to automatically fill out the following `auto` entries " + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + "`auto` values for these keys with an integer value of your choice." ) if not hasattr(model, "config"): raise ValueError("Can't find `model.config` entry, " + reasoning) if hasattr(model.config, "hidden_size"): hidden_size = model.config.hidden_size elif hasattr(model.config, "hidden_sizes"): # if there are many hidden sizes pick the largest one hidden_size = max(model.config.hidden_sizes) else: raise ValueError( "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning ) config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, } ) if isinstance(optimizer, (DummyOptim)): config_kwargs.update( {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} ) if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None: max_lr = ( getattr(scheduler.optimizer, "lr", None) if getattr(scheduler.optimizer, "defaults", None) is None else scheduler.optimizer.defaults["lr"] ) config_kwargs.update( { "scheduler.params.warmup_min_lr": 0, "scheduler.params.warmup_max_lr": max_lr, "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, } ) if scheduler.total_num_steps is not None: config_kwargs["scheduler.params.total_num_steps"] = ( math.ceil(scheduler.total_num_steps / self.num_processes) if not self.split_batches else scheduler.total_num_steps ) deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) self.deepspeed_config = deepspeed_plugin.deepspeed_config kwargs = dict(model=model, config_params=self.deepspeed_config) if optimizer is not None: if isinstance(optimizer, (DummyOptim)): kwargs["model_parameters"] = optimizer.params if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None: kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable else: if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get( "device", "none" ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True): from deepspeed.ops.adam import DeepSpeedCPUAdam defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults) kwargs["optimizer"] = optimizer if scheduler is not None: if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES: kwargs["lr_scheduler"] = scheduler engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) if optimizer is not None: optimizer = DeepSpeedOptimizerWrapper(optimizer) if scheduler is not None: if lr_scheduler is None: scheduler = AcceleratedScheduler( scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches, ) else: scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = engine elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): result[i] = optimizer elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES ): result[i] = scheduler # pointing for deepspeed_engine_wrapped.backward() self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) self._models.append(engine) if optimizer is not None: self._optimizers.append(optimizer) if scheduler is not None: self._schedulers.append(scheduler) if len(self._models) > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" ) return tuple(result) def _prepare_megatron_lm(self, *args): megatron_lm_plugin = self.state.megatron_lm_plugin if not megatron_lm_plugin.megatron_dataset_flag: batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] if len(batch_sizes) == 0: raise ValueError( "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." ) micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) if len(batch_sizes) > 1: logger.info( "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." ) else: for obj in args: if isinstance(obj, MegatronLMDummyDataLoader): micro_batch_size = obj.dataset_args["micro_batch_size"] break dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) model = None optimizer = None scheduler = None is_dummy_scheduler = False batch_data = None for obj in args: if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: batch_data = next(iter(obj)) if isinstance(obj, torch.nn.Module): model = obj elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): scheduler = obj if model is not None: megatron_lm_plugin.set_network_size_args(model, batch_data) if optimizer is not None: megatron_lm_plugin.set_optimizer_type(optimizer) if scheduler is not None: is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler) if not is_dummy_scheduler: raise ValueError( "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." ) megatron_lm_plugin.set_scheduler_args(scheduler) # initialize megatron-lm megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) counter = 0 result = [] for obj in args: if isinstance(obj, torch.utils.data.DataLoader): result.append(megatron_lm_prepare_data_loader(self, obj)) counter += 1 elif isinstance(obj, MegatronLMDummyDataLoader): if counter == 0: obj.set_megatron_data_args() dataloaders = megatron_lm_prepare_data_loader(self, obj) result.append(dataloaders[counter]) counter += 1 else: result.append(obj) if model is not None: model = megatron_lm_prepare_model(self) if optimizer is not None: optimizer = megatron_lm_prepare_optimizer(self, model) if scheduler is not None: scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler) if model is not None: model = MegatronEngine(self, model, optimizer, scheduler) if optimizer is not None: optimizer = MegatronLMOptimizerWrapper(optimizer) if scheduler is not None: scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], torch.optim.Optimizer): result[i] = optimizer elif isinstance(result[i], MegatronLMDummyScheduler): result[i] = scheduler if model is not None: self._models.append(model) if optimizer is not None: self._optimizers.append(optimizer) if scheduler is not None: self._schedulers.append(scheduler) if len(self._models) > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" ) return tuple(result) def _prepare_ipex(self, *args): if not is_ipex_available(): raise ImportError( "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer" " to https://github.com/intel/intel-extension-for-pytorch." ) else: import intel_extension_for_pytorch as ipex model = None optimizer = None result = [obj for obj in args] for obj in result: if isinstance(obj, torch.nn.Module): model = obj elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj if optimizer is not None and model is not None: dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else torch.float32 if self.device.type == "xpu" and is_xpu_available(): model = model.to(self.device) model, optimizer = torch.xpu.optimize( model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1" ) else: model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1") for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], (torch.optim.Optimizer)): result[i] = optimizer return tuple(result) def _prepare_msamp(self, *args): if not is_msamp_available(): raise ImportError( "MS-AMP was not found on your system. Please ensure that MS-AMP is available " " or choose `'te'` as the backend for FP8 mixed precision training." ) else: import msamp model, optimizer = None, None num_models, num_optimizers = 0, 0 result = [obj for obj in args] for obj in result: if isinstance(obj, torch.nn.Module): model = obj num_models += 1 elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj num_optimizers += 1 if optimizer is None or model is None: raise ValueError( "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP." ) elif num_models > 1 or num_optimizers > 1: raise ValueError( f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP." ) else: model, optimizer = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], (torch.optim.Optimizer)): result[i] = optimizer return tuple(result) def prepare_data_loader( self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None ): """ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: data_loader (`torch.utils.data.DataLoader`): A vanilla PyTorch DataLoader to prepare device_placement (`bool`, *optional*): Whether or not to place the batches on the proper device in the prepared dataloader. Will default to `self.device_placement`. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> data_loader = torch.utils.data.DataLoader(...) >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True) ``` """ # Ensure we can't double wrap a DataLoader due to `find_batch_size` if getattr(data_loader, "_is_accelerate_prepared", False): if data_loader not in self._dataloaders: self._dataloaders.append(data_loader) return data_loader if device_placement is None: device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False prepared_data_loader = prepare_data_loader( data_loader, self.device, num_processes=self.num_processes, process_index=self.process_index, split_batches=self.split_batches, put_on_device=device_placement, rng_types=self.rng_types.copy(), dispatch_batches=self.dispatch_batches, even_batches=self.even_batches, slice_fn_for_dispatch=slice_fn_for_dispatch, use_seedable_sampler=self.use_seedable_sampler, ) self._dataloaders.append(prepared_data_loader) return prepared_data_loader def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): """ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: optimizer (`torch.optim.Optimizer`): A vanilla PyTorch optimizer to prepare device_placement (`bool`, *optional*): Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> optimizer = torch.optim.Adam(...) >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True) ``` """ # Ensure we can't double wrap an optimizer due to `find_batch_size` if getattr(optimizer, "_is_accelerate_prepared", False): if optimizer not in self._optimizers: self._optimizers.append(optimizer) return optimizer if device_placement is None: device_placement = self.device_placement optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) self._optimizers.append(optimizer) return optimizer def prepare_scheduler(self, scheduler: LRScheduler): """ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: scheduler (`torch.optim.lr_scheduler.LRScheduler`): A vanilla PyTorch scheduler to prepare Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> optimizer = torch.optim.Adam(...) >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) >>> scheduler = accelerator.prepare_scheduler(scheduler) ``` """ # Ensure we can't double wrap a scheduler due to `find_batch_size` if getattr(scheduler, "_is_accelerate_prepared", False): if scheduler not in self._schedulers: self._schedulers.append(scheduler) return scheduler # We try to find the optimizer associated with `scheduler`, the default is the full list. optimizer = self._optimizers for opt in self._optimizers: if getattr(scheduler, "optimizer", None) == opt.optimizer: optimizer = opt break scheduler = AcceleratedScheduler( scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches, ) self._schedulers.append(scheduler) return scheduler def backward(self, loss, **kwargs): """ Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based on the configuration. Should be used in lieu of `loss.backward()`. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> outputs = model(inputs) >>> loss = loss_fn(outputs, labels) >>> accelerator.backward(loss) ``` """ if self.distributed_type != DistributedType.DEEPSPEED: # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` loss = loss / self.gradient_accumulation_steps if self.distributed_type == DistributedType.DEEPSPEED: self.deepspeed_engine_wrapped.backward(loss, **kwargs) elif self.distributed_type == DistributedType.MEGATRON_LM: return elif self.scaler is not None: self.scaler.scale(loss).backward(**kwargs) else: loss.backward(**kwargs) def set_trigger(self): """ Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which will check across all processes. Note: Does not require `wait_for_everyone()` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume later in the training script >>> # `should_do_breakpoint` is a custom function to monitor when to break, >>> # e.g. when the loss is NaN >>> if should_do_breakpoint(loss): ... accelerator.set_trigger() >>> # Assume later in the training script >>> if accelerator.check_breakpoint(): ... break ``` """ self.flag_tensor = torch.tensor(1, device=self.device) def check_trigger(self): """ Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and reset the trigger tensor to 0. Note: Does not require `wait_for_everyone()` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume later in the training script >>> # `should_do_breakpoint` is a custom function to monitor when to break, >>> # e.g. when the loss is NaN >>> if should_do_breakpoint(loss): ... accelerator.set_trigger() >>> # Assume later in the training script >>> if accelerator.check_trigger(): ... break ``` """ # Now that we are outside `__init__`, we can initialize it if it is `None` on device if self.flag_tensor is None: self.flag_tensor = torch.tensor(0, device=self.device) flag_tensor = self.reduce(self.flag_tensor) if flag_tensor.item() >= 1: self.flag_tensor = torch.tensor(0, device=self.device) return True return False def unscale_gradients(self, optimizer=None): """ Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`] Args: optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*): The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers that were passed to [`~Accelerator.prepare`]. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer = accelerator.prepare(model, optimizer) >>> outputs = model(inputs) >>> loss = loss_fn(outputs, labels) >>> accelerator.backward(loss) >>> accelerator.unscale_gradients(optimizer=optimizer) ``` """ if self.native_amp and self.mixed_precision == "fp16": if optimizer is None: # TODO: this unscales all optimizers where we should only unscale the one where parameters are. optimizer = self._optimizers elif not isinstance(optimizer, (tuple, list)): optimizer = [optimizer] for opt in optimizer: while isinstance(opt, AcceleratedOptimizer): opt = opt.optimizer # Reduce gradients first for XLA if self.distributed_type == DistributedType.TPU: gradients = xm._fetch_gradients(opt) self.reduce(gradients, scale=1.0 / self.num_processes) self.scaler.unscale_(opt) def clip_grad_norm_(self, parameters, max_norm, norm_type=2): """ Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, target in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ``` """ if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) def clip_grad_value_(self, parameters, clip_value): """ Should be used in place of `torch.nn.utils.clip_grad_value_`. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, target in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_value_(model.parameters(), clip_value) ... optimizer.step() ``` """ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") self.unscale_gradients() torch.nn.utils.clip_grad_value_(parameters, clip_value) def gather(self, tensor): """ Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to regroup the predictions from all processes when doing evaluation. Note: This gather happens in all processes. Args: tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): The tensors to gather across all processes. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. Example: ```python >>> # Assuming four processes >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.tensor([accelerator.process_index]) >>> gathered_tensor = accelerator.gather(process_tensor) >>> gathered_tensor tensor([0, 1, 2, 3]) ``` """ return gather(tensor) def gather_for_metrics(self, input_data): """ Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be used for gathering the inputs and targets for metric calculation. Args: input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`): The tensors or objects for calculating metrics across all processes Example: ```python >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5) >>> dataloader = accelerator.prepare(dataloader) >>> batch = next(iter(dataloader)) >>> gathered_items = accelerator.gather_for_metrics(batch) >>> len(gathered_items) 9 ``` """ try: recursively_apply(lambda x: x, input_data, error_on_other_type=True) all_tensors = True except TypeError: all_tensors = False if not all_tensors: data = gather_object(input_data) else: data = self.gather(input_data) try: if self.gradient_state.end_of_dataloader: # at the end of a dataloader, `gather_for_metrics` regresses to # `gather` unless the dataset has a remainder so log. if self.gradient_state.remainder == -1: logger.info( "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." ) return data elif self.gradient_state.remainder > 0: # Last batch needs to be truncated on distributed systems as it contains additional samples def _adjust_samples(tensor): return tensor[: self.gradient_state.remainder] return recursively_apply(_adjust_samples, data) else: # remainder is 0 # no remainder even though at end of dataloader, so nothing to do. return data else: # Not at the end of the dataloader, no need to adjust the tensors return data except Exception: # Dataset had no length or raised an error return data def reduce(self, tensor, reduction="sum", scale=1.0): """ Reduce the values in *tensor* across all processes based on *reduction*. Note: All processes get the reduced value. Args: tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): The tensors to reduce across all processes. reduction (`str`, *optional*, defaults to "sum"): A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. scale (`float`, *optional*, defaults to 1.0): A default scaling value to be applied after the reduce, only valied on XLA. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The reduced tensor(s). Example: ```python >>> # Assuming two processes >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index) >>> process_tensor = process_tensor.to(accelerator.device) >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum") >>> reduced_tensor tensor([4, 6]) ``` """ return reduce(tensor, reduction, scale) def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): """ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The padded tensor(s). Example: ```python >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2 >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device) >>> padded_tensor = accelerator.pad_across_processes(process_tensor) >>> padded_tensor.shape torch.Size([2]) ``` """ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) def unwrap_model(self, model, keep_fp32_wrapper: bool = True): """ Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving the model. Args: model (`torch.nn.Module`): The model to unwrap. keep_fp32_wrapper (`bool`, *optional*, defaults to `True`): Whether to not remove the mixed precision hook if it was added. Returns: `torch.nn.Module`: The unwrapped model. Example: ```python >>> # Assuming two GPU processes >>> from torch.nn.parallel import DistributedDataParallel >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model = accelerator.prepare(MyModel()) >>> print(model.__class__.__name__) DistributedDataParallel >>> model = accelerator.unwrap_model(model) >>> print(model.__class__.__name__) MyModel ``` """ return extract_model_from_parallel(model, keep_fp32_wrapper) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ wait_for_everyone() @on_main_process def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}): """ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations Args: project_name (`str`): The name of the project. All trackers will save their data based on this config (`dict`, *optional*): Optional starting configuration to be logged. init_kwargs (`dict`, *optional*): A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be formatted like so: ```python {"wandb": {"tags": ["tag_a", "tag_b"]}} ``` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers( ... project_name="my_project", ... config={"learning_rate": 0.001, "batch_size": 32}, ... init_kwargs={"tensorboard": {"flush_secs": 60}}, ... ) ``` """ for tracker in self.log_with: if issubclass(type(tracker), GeneralTracker): # Custom trackers are already initialized self.trackers.append(tracker) else: tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] if getattr(tracker_init, "requires_logging_directory"): # We can skip this check since it was done in `__init__` self.trackers.append( tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) ) else: self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) if config is not None: for tracker in self.trackers: tracker.store_init_configuration(config) def get_tracker(self, name: str, unwrap: bool = False): """ Returns a `tracker` from `self.trackers` based on `name` on the main process only. Args: name (`str`): The name of a tracker, corresponding to the `.name` property. unwrap (`bool`): Whether to return the internal tracking mechanism or to return the wrapped tracker instead (recommended). Returns: `GeneralTracker`: The tracker corresponding to `name` if it exists. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> tensorboard_tracker = accelerator.get_tracker("tensorboard") ``` """ if len(self.trackers) > 0: for tracker in self.trackers: if tracker.name == name: return tracker.tracker if unwrap else tracker raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") # Handle tracker only made on main process return GeneralTracker(_blank=True) @on_main_process def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}): """ Logs `values` to all stored trackers in `self.trackers` on the main process only. Args: values (`dict`): Values should be a dictionary-like object containing only types `int`, `float`, or `str`. step (`int`, *optional*): The run step. If included, the log will be affiliated with this step. log_kwargs (`dict`, *optional*): A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted like so: ```python {"wandb": {"tags": ["tag_a", "tag_b"]}} ``` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> accelerator.log({"loss": 0.5, "accuracy": 0.9}) ``` """ for tracker in self.trackers: tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) @on_main_process def end_training(self): """ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be called at the end of your script if using experiment tracking. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> # Do training >>> accelerator.end_training() ``` """ for tracker in self.trackers: tracker.finish() def save(self, obj, f, safe_serialization=False): """ Save the object passed to disk once per machine. Use in place of `torch.save`. Args: obj (`object`): The object to save. f (`str` or `os.PathLike`): Where to save the content of `obj`. safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` Note: If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node, rather than only once on the main node. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> arr = [0, 1, 2, 3] >>> accelerator.save(arr, "array.pkl") ``` """ save( obj, f, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization, ) def save_model( self, model: torch.nn.Module, save_directory: Union[str, os.PathLike], max_shard_size: Union[int, str] = "10GB", safe_serialization: bool = True, ): """ Save a model so that it can be re-loaded using load_checkpoint_in_model Arguments: model: (`torch.nn.Module`): Model to be saved. The model can be wrapped or unwraped. save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model = ... >>> accelerator.save_model(model, save_directory) ``` """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) # get the state_dict of the model if any( [ module._hf_hook.offload for module in model.modules() if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) ] ): state_dict = get_state_dict_offloaded_model(model) else: if any(param.device == torch.device("meta") for param in model.parameters()): raise RuntimeError("You can't save the model since some parameters are on the meta device.") state_dict = self.get_state_dict(model) if safe_serialization: state_dict = clean_state_dict_for_safetensors(state_dict) weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME # Shard the model if it is too big. shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "") # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 filename_no_suffix = filename.replace(".bin", "") reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() and reg.fullmatch(filename_no_suffix) is not None and PartialState().is_main_process ): os.remove(full_filename) # Save the model for shard_file, shard in shards.items(): self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization) if index is None: path_to_weights = os.path.join(save_directory, WEIGHTS_NAME) logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, save_index_file) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: """ Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`]. Args: hook (`Callable`): A function to be called in [`Accelerator.save_state`] before `save_checkpoint`. The hook should have the following signature: `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None` The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths` argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. <Tip> Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save configurations in addition to model weights. Can also be used to overwrite model saving with a customized method. In this case, make sure to remove already loaded weights from the weights list. </Tip> Returns: `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling `handle.remove()` """ handle = hooks.RemovableHandle(self._save_model_state_pre_hook) self._save_model_state_pre_hook[handle.id] = hook return handle def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs): """ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named `checkpoint_<iteration>`. Otherwise they are just saved to `output_dir`. <Tip> Should only be used when wanting to save a checkpoint during training and restoring the state in the same environment. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). save_model_func_kwargs (`dict`, *optional*): Additional keyword arguments for saving model which can be passed to the underlying save function, such as optional arguments for DeepSpeed's `save_checkpoint` function. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, lr_scheduler = ... >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) >>> accelerator.save_state(output_dir="my_checkpoint") ``` """ if self.project_configuration.automatic_checkpoint_naming: output_dir = os.path.join(self.project_dir, "checkpoints") os.makedirs(output_dir, exist_ok=True) if self.project_configuration.automatic_checkpoint_naming: folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] if ( self.project_configuration.total_limit is not None and (len(folders) + 1 > self.project_configuration.total_limit) and self.is_main_process ): def _inner(folder): return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] folders.sort(key=_inner) logger.warning( f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." ) for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: shutil.rmtree(folder) output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") if os.path.exists(output_dir): raise ValueError( f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." ) self.wait_for_everyone() os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving current state to {output_dir}") if self.distributed_type == DistributedType.TPU: # Finish running the previous step before checkpointing xm.mark_step() # Save the models taking care of FSDP and DeepSpeed nuances weights = [] for i, model in enumerate(self._models): if self.distributed_type == DistributedType.FSDP: logger.info("Saving FSDP model") save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i) logger.info(f"FSDP Model saved to output dir {output_dir}") elif self.distributed_type == DistributedType.DEEPSPEED: logger.info("Saving DeepSpeed Model and Optimizer") ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") elif self.distributed_type == DistributedType.MEGATRON_LM: logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") model.save_checkpoint(output_dir) logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") else: weights.append(self.get_state_dict(model, unwrap=False)) # Save the optimizers taking care of FSDP and DeepSpeed nuances optimizers = [] if self.distributed_type == DistributedType.FSDP: for i, opt in enumerate(self._optimizers): logger.info("Saving FSDP Optimizer") save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i) logger.info(f"FSDP Optimizer saved to output dir {output_dir}") elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: optimizers = self._optimizers # Save the lr schedulers taking care of DeepSpeed nuances schedulers = [] if self.distributed_type == DistributedType.DEEPSPEED: for i, scheduler in enumerate(self._schedulers): if isinstance(scheduler, DeepSpeedSchedulerWrapper): continue schedulers.append(scheduler) elif self.distributed_type not in [DistributedType.MEGATRON_LM]: schedulers = self._schedulers # Save the samplers of the dataloaders dataloaders = self._dataloaders # Call model loading hooks that might have been registered with # accelerator.register_model_state_hook for hook in self._save_model_state_pre_hook.values(): hook(self._models, weights, output_dir) save_location = save_accelerator_state( output_dir, weights, optimizers, schedulers, dataloaders, self.state.process_index, self.scaler, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization, ) for i, obj in enumerate(self._custom_objects): save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node) self.project_configuration.iteration += 1 return save_location def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: """ Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`]. Args: hook (`Callable`): A function to be called in [`Accelerator.load_state`] before `load_checkpoint`. The hook should have the following signature: `hook(models: list[torch.nn.Module], input_dir: str) -> None` The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. <Tip> Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load configurations in addition to model weights. Can also be used to overwrite model loading with a customized method. In this case, make sure to remove already loaded models from the models list. </Tip> Returns: `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling `handle.remove()` """ handle = hooks.RemovableHandle(self._load_model_state_pre_hook) self._load_model_state_pre_hook[handle.id] = hook return handle def load_state(self, input_dir: str = None, **load_model_func_kwargs): """ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. <Tip> Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for checkpointing, it will not be loaded if stored in the directory. </Tip> Args: input_dir (`str` or `os.PathLike`): The name of the folder all relevant weights and states were saved in. Can be `None` if `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint. load_model_func_kwargs (`dict`, *optional*): Additional keyword arguments for loading model which can be passed to the underlying load function, such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the model and optimizer on. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, lr_scheduler = ... >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) >>> accelerator.load_state("my_checkpoint") ``` """ if input_dir is not None: # Check if folder exists input_dir = os.path.expanduser(input_dir) if not os.path.isdir(input_dir): raise ValueError(f"Tried to find {input_dir} but folder does not exist") elif self.project_configuration.automatic_checkpoint_naming: # Pick up from automatic checkpoint naming input_dir = os.path.join(self.project_dir, "checkpoints") folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)] def _inner(folder): return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] folders.sort(key=_inner) input_dir = folders[-1] else: raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.") logger.info(f"Loading states from {input_dir}") # Load the models taking care of FSDP and DeepSpeed nuances models = [] for i, model in enumerate(self._models): if self.distributed_type == DistributedType.FSDP: logger.info("Loading FSDP model") load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i) logger.info(f"FSDP Model loaded from input dir {input_dir}") elif self.distributed_type == DistributedType.DEEPSPEED: logger.info("Loading DeepSpeed Model and Optimizer") ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") elif self.distributed_type == DistributedType.MEGATRON_LM: logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") model.load_checkpoint(input_dir) logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") else: models.append(model) # Load the optimizers taking care of FSDP and DeepSpeed nuances optimizers = [] if self.distributed_type == DistributedType.FSDP: for i, opt in enumerate(self._optimizers): logger.info("Loading FSDP Optimizer") load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i) logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: optimizers = self._optimizers # Load the lr schedulers taking care of DeepSpeed nuances schedulers = [] if self.distributed_type == DistributedType.DEEPSPEED: for i, scheduler in enumerate(self._schedulers): if isinstance(scheduler, DeepSpeedSchedulerWrapper): continue schedulers.append(scheduler) elif self.distributed_type not in [DistributedType.MEGATRON_LM]: schedulers = self._schedulers dataloaders = self._dataloaders # Call model loading hooks that might have been registered with # accelerator.register_model_state_hook for hook in self._load_model_state_pre_hook.values(): hook(models, input_dir) map_location = load_model_func_kwargs.pop("map_location", None) if map_location is None: if self.num_processes > 1 and self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, ): map_location = "on_device" else: map_location = "cpu" load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, self.state.process_index, self.scaler, map_location, **load_model_func_kwargs, ) custom_checkpoints = [ f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None ] if len(custom_checkpoints) != len(self._custom_objects): err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:" err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" err += f"\n\tRegistered objects: {len(self._custom_objects)}\n" err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects," err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually." raise RuntimeError(err) else: logger.info(f"Loading in {len(custom_checkpoints)} custom states") for index, obj in enumerate(self._custom_objects): load_custom_state(obj, input_dir, index) def free_memory(self): """ Will release all references to the internal objects stored and call the garbage collector. You should call this method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, scheduler = ... >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) >>> accelerator.free_memory() >>> del model, optimizer, scheduler ``` """ self._schedulers = [] self._optimizers = [] self._models = [] self._dataloaders = [] self.deepspeed_engine_wrapped = None self.step = 0 release_memory() def clear(self): """ Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the garbage collector. You should call this method between two trainings with different models/optimizers. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, scheduler = ... >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) >>> accelerator.free_memory() >>> del model, optimizer, scheduler ``` """ self.free_memory() def _get_named_parameters(self, *args): named_parameters = {} for obj in args: if isinstance(obj, torch.nn.Module): obj = extract_model_from_parallel(obj) named_parameters.update({n: p for n, p in obj.named_parameters()}) return named_parameters def _get_devices(self, *args): model_device = None optimizer_device = None for obj in args: # Loop through model parameters and stop at the first once we have its device. if isinstance(obj, torch.nn.Module): for param in obj.parameters(): model_device = param.device break # Loop through optimizer parameters groups and stop at the first once we have its device. if isinstance(obj, torch.optim.Optimizer): for param_group in obj.param_groups: if len(param_group["params"]) > 0: optimizer_device = param_group["params"][0].device break return (model_device, optimizer_device) def get_state_dict(self, model, unwrap=True): """ Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full precision. Args: model (`torch.nn.Module`): A PyTorch model sent through [`Accelerator.prepare`] unwrap (`bool`, *optional*, defaults to `True`): Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict Returns: `dict`: The state dictionary of the model potentially without full precision. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> net = torch.nn.Linear(2, 2) >>> net = accelerator.prepare(net) >>> state_dict = accelerator.get_state_dict(net) ``` """ if self.distributed_type == DistributedType.DEEPSPEED: if self.deepspeed_config["zero_optimization"]["stage"] == 3: if model.zero_gather_16bit_weights_on_model_save(): state_dict = model._zero3_consolidated_16bit_state_dict() else: raise ValueError( "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " "set `zero3_save_16bit_model` to True when using `accelerate config`. " "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." ) else: from deepspeed.checkpoint.utils import clone_tensors_for_torch_save state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict()) elif self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp import FullStateDictConfig, StateDictType from torch.distributed.fsdp import FullyShardedDataParallel as FSDP full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config): state_dict = model.state_dict() else: if unwrap: model = self.unwrap_model(model) state_dict = model.state_dict() return state_dict def register_for_checkpointing(self, *objects): """ Makes note of `objects` and will save or load them in during `save_state` or `load_state`. These should be utilized when the state is being loaded or saved in the same script. It is not designed to be used in different scripts. <Tip> Every `object` must have a `load_state_dict` and `state_dict` function to be stored. </Tip> Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function. >>> obj = CustomObject() >>> accelerator.register_for_checkpointing(obj) >>> accelerator.save_state("checkpoint.pt") ``` """ invalid_objects = [] for obj in objects: if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): invalid_objects.append(obj) if len(invalid_objects) > 0: err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" for index, obj in enumerate(invalid_objects): err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" raise ValueError(err) self._custom_objects.extend(objects) @contextmanager def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None): """ Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing different will happen otherwise. A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is useful in blocks under `autocast` where you want to revert to fp32. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(mixed_precision="fp16") >>> with accelerator.autocast(): ... train() ``` """ if cache_enabled: warnings.warn( "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. " "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.", FutureWarning, ) if self.autocast_handler is not None: self.autocast_handler.cache_enabled = True else: self.autocast_handler = AutocastKwargs(cache_enabled=True) if autocast_handler is None: autocast_handler = self.autocast_handler autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler) autocast_context.__enter__() yield autocast_context.__exit__(*sys.exc_info()) @property def optimizer_step_was_skipped(self): """ Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which case the learning rate should not be changed. """ for optimizer in self._optimizers: if optimizer.step_was_skipped: return True return False def skip_first_batches(self, dataloader, num_batches: int = 0): """ Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. Args: dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches. num_batches (`int`, *optional*, defaults to 0): The number of batches to skip Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2) >>> # for the first epoch only >>> for input, target in skipped_dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... optimizer.step() >>> # subsequent epochs >>> for input, target in dataloader: ... optimizer.zero_grad() ... ... ``` """ return skip_first_batches(dataloader, num_batches=num_batches) def __deepcopy__(self, memo): logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.") return self def verify_device_map(self, model: torch.nn.Module) -> bool: """ Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`. """ # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry. for m in model.modules(): if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1: return True return False
accelerate/src/accelerate/accelerator.py/0
{ "file_path": "accelerate/src/accelerate/accelerator.py", "repo_id": "accelerate", "token_count": 61941 }
4
from .selection_menu import BulletMenu
accelerate/src/accelerate/commands/menu/__init__.py/0
{ "file_path": "accelerate/src/accelerate/commands/menu/__init__.py", "repo_id": "accelerate", "token_count": 9 }
5
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import math import os import threading import warnings from contextlib import contextmanager from functools import partial from typing import Any, Callable, Optional import torch from .utils import ( DistributedType, DynamoBackend, GradientAccumulationPlugin, check_cuda_p2p_ib_support, check_fp8_capability, get_ccl_version, get_int_from_env, is_ccl_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_mps_available, is_npu_available, is_tpu_available, is_xpu_available, parse_choice_from_env, parse_flag_from_env, ) from .utils.dataclasses import SageMakerDistributedType if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm if is_npu_available(check_device=False): import torch_npu # noqa: F401 logger = logging.getLogger(__name__) def is_initialized() -> bool: """ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, but works as a module method. """ return AcceleratorState._shared_state != {} # Lambda function that does nothing def do_nothing(*args, **kwargs): return None class ThreadLocalSharedDict(threading.local): """ Descriptor that holds a dict shared between instances of a class in the same thread. Note: Descriptors have slightly different semantics than just a dict field on its own. `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`). See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3). See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3 """ def __init__(self, thread_local: bool = False): self._storage = {} def __get__(self, obj, objtype=None): return self._storage def __set__(self, obj, value): self._storage = value # Prefer global shared dictionary, except when using TPU. SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict # Inspired by Alex Martelli's 'Borg'. class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_tpu_available() and not cpu: self.distributed_type = DistributedType.TPU self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() self.local_process_index = xm.get_local_ordinal() self.device = xm.xla_device() elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1: if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) @staticmethod def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() @property def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} @property def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 @property def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 @property def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) @property def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.TPU: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() @contextmanager def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: return inputs yield _split_values(inputs, start_index, end_index) @contextmanager def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) @contextmanager def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() @state.on_local_main_process def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() @state.on_last_process def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() @state.on_process(process_index=2) def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_local_process(local_process_index=2) def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) @property def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") class AcceleratorState: """ Singleton class that has information about the current training environment. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__( self, mixed_precision: str = None, cpu: bool = False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool = False, **kwargs, ): self.__dict__ = self._shared_state if parse_flag_from_env("ACCELERATE_USE_CPU"): cpu = True if PartialState._shared_state == {}: PartialState(cpu, **kwargs) self.__dict__.update(PartialState._shared_state) self._check_initialized(mixed_precision, cpu) if not self.initialized: self.deepspeed_plugin = None self.use_ipex = None mixed_precision = ( parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower() ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError( "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." ) elif not check_fp8_capability(): logger.warning( f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " "or higher, compute capability of 8.9 or higher). Will use FP16 instead." ) mixed_precision = "fp16" self.dynamo_plugin = dynamo_plugin if not _from_accelerator: raise ValueError( "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " "before using any functionality from the `accelerate` library." ) # deepspeed handles mixed_precision using deepspeed_config self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision if self.distributed_type == DistributedType.TPU: if mixed_precision == "bf16": if os.environ.get("ACCELERATE_DOWNCAST_BF16"): os.environ["XLA_USE_BF16"] = str(0) os.environ["XLA_DOWNCAST_BF16"] = str(1) self.downcast_bfloat = True else: os.environ["XLA_USE_BF16"] = str(1) os.environ["XLA_DOWNCAST_BF16"] = str(0) self.downcast_bfloat = False elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: self.deepspeed_plugin = deepspeed_plugin elif self.distributed_type == DistributedType.MULTI_GPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": self.distributed_type = DistributedType.MEGATRON_LM megatron_lm_plugin.set_mixed_precision(self._mixed_precision) self.megatron_lm_plugin = megatron_lm_plugin elif self.distributed_type == DistributedType.MULTI_NPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if is_ipex_available(): "check if user disables it explicitly" self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) else: self.use_ipex = False if self.distributed_type == DistributedType.MULTI_XPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if ( self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == "no" and self.device.type == "cuda" ): torch.backends.cuda.matmul.allow_tf32 = True PartialState._shared_state["distributed_type"] = self.distributed_type @property def initialized(self) -> bool: return self._shared_state != PartialState._shared_state def __repr__(self): repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" if self.distributed_type == DistributedType.DEEPSPEED: repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" return repr def _check_initialized(self, mixed_precision=None, cpu=None): "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" if self.initialized: err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." if cpu and self.device.type != "cpu": raise ValueError(err.format(flag="cpu=True")) if ( mixed_precision is not None and mixed_precision != self._mixed_precision and self.distributed_type != DistributedType.DEEPSPEED ): raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) # For backward compatibility @property def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`AcceleratorState.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self._mixed_precision != "no" @property def mixed_precision(self): if self.distributed_type == DistributedType.DEEPSPEED: config = self.deepspeed_plugin.deepspeed_config if config.get("fp16", {}).get("enabled", False): mixed_precision = "fp16" elif config.get("bf16", {}).get("enabled", False): mixed_precision = "bf16" else: mixed_precision = "no" else: mixed_precision = self._mixed_precision return mixed_precision @staticmethod def _reset_state(reset_partial_state: bool = False): "Resets `_shared_state`, is used internally and should not be called" AcceleratorState._shared_state.clear() if reset_partial_state: PartialState._reset_state() @property def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return PartialState().use_distributed @property def is_last_process(self) -> bool: "Returns whether the current process is the last one" return PartialState().is_last_process @property def is_main_process(self) -> bool: "Returns whether the current process is the main process" return PartialState().is_main_process @property def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return PartialState().is_local_main_process def wait_for_everyone(self): PartialState().wait_for_everyone() @contextmanager def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate.state import AcceleratorState state = AcceleratorState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs @contextmanager def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().main_process_first(): yield @contextmanager def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().local_main_process_first(): yield def print(self, *args, **kwargs): PartialState().print(*args, **kwargs) class GradientState: """ Singleton class that has information related to gradient synchronization for gradient accumulation **Available attributes:** - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are being iterated over - **num_steps** (`int`) -- The number of steps to accumulate over - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient accumulation - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset """ _shared_state = SharedDict() def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None): self.__dict__ = self._shared_state if not self.initialized: self.sync_gradients = True self.active_dataloader = None self.dataloader_references = [None] self.plugin_kwargs = ( gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {} ) # Plugin args are different and can be updated if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs(): self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() @property def num_steps(self) -> int: "Returns the number of steps to accumulate over" return self.plugin_kwargs.get("num_steps", 1) @property def adjust_scheduler(self) -> bool: "Returns whether the scheduler should be adjusted" return self.plugin_kwargs.get("adjust_scheduler", False) @property def sync_with_dataloader(self) -> bool: "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset" return self.plugin_kwargs.get("sync_with_dataloader", True) @property def initialized(self) -> bool: "Returns whether the `GradientState` has been initialized" return GradientState._shared_state != {} @property def end_of_dataloader(self) -> bool: "Returns whether we have reached the end of the current dataloader" if not self.in_dataloader: return False return self.active_dataloader.end_of_dataloader @property def remainder(self) -> int: "Returns the number of extra samples that were added from padding the dataloader" if not self.in_dataloader: return -1 return self.active_dataloader.remainder def __repr__(self): return ( f"Sync Gradients: {self.sync_gradients}\n" f"At end of current dataloader: {self.end_of_dataloader}\n" f"Extra samples added: {self.remainder}\n" f"Gradient accumulation plugin: {self.plugin_kwargs}\n" ) def _set_sync_gradients(self, sync_gradients): "Private function that sets whether gradients should be synchronized. Users should not have to call this." self.sync_gradients = sync_gradients def _add_dataloader(self, dataloader): "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this." self.active_dataloader = dataloader self.dataloader_references.append(self.active_dataloader) def _remove_dataloader(self, dataloader): "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this." self.dataloader_references.remove(dataloader) self.active_dataloader = self.dataloader_references[-1] @property def in_dataloader(self) -> bool: "Returns whether the current process is in a dataloader" return self.active_dataloader is not None @staticmethod def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" GradientState._shared_state.clear()
accelerate/src/accelerate/state.py/0
{ "file_path": "accelerate/src/accelerate/state.py", "repo_id": "accelerate", "token_count": 21388 }
6
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=None): rng = np.random.default_rng(seed) self.length = length self.x = rng.normal(size=(length,)).astype(np.float32) self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32) def __len__(self): return self.length def __getitem__(self, i): return {"x": self.x[i], "y": self.y[i]} class RegressionModel4XPU(torch.nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = torch.nn.Parameter(torch.tensor([2, 3]).float()) self.b = torch.nn.Parameter(torch.tensor([2, 3]).float()) self.first_batch = True def forward(self, x=None): if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") self.first_batch = False return x * self.a[0] + self.b[0] class RegressionModel(torch.nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = torch.nn.Parameter(torch.tensor(a).float()) self.b = torch.nn.Parameter(torch.tensor(b).float()) self.first_batch = True def forward(self, x=None): if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") self.first_batch = False return x * self.a + self.b def mocked_dataloaders(accelerator, batch_size: int = 16): from datasets import load_dataset from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} datasets = load_dataset("csv", data_files=data_files) label_list = datasets["train"].unique("label") label_to_id = {v: i for i, v in enumerate(label_list)} def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer( examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" ) if "label" in examples: outputs["labels"] = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["sentence1", "sentence2", "label"], ) def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) return train_dataloader, eval_dataloader
accelerate/src/accelerate/test_utils/training.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/training.py", "repo_id": "accelerate", "token_count": 1572 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config # noqa: F401 from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_tpu_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version logger = get_logger(__name__) if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def is_compiled_module(module): """ Check whether the module was compiled with torch.compile() """ if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): return False return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True): """ Extract a model from its distributed containers. Args: model (`torch.nn.Module`): The model to extract. keep_fp32_wrapper (`bool`, *optional*): Whether to remove mixed precision hooks from the model. Returns: `torch.nn.Module`: The extracted model. """ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) is_compiled = is_compiled_module(model) if is_compiled: compiled_model = model model = model._orig_mod if is_deepspeed_available(): from deepspeed import DeepSpeedEngine options += (DeepSpeedEngine,) if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP options += (FSDP,) while isinstance(model, options): model = model.module if not keep_fp32_wrapper: forward = getattr(model, "forward") original_forward = model.__dict__.pop("_original_forward", None) if original_forward is not None: while hasattr(forward, "__wrapped__"): forward = forward.__wrapped__ if forward == original_forward: break model.forward = MethodType(forward, model) if getattr(model, "_converted_to_transformer_engine", False): convert_model(model, to_transformer_engine=False) if is_compiled: compiled_model._orig_mod = model model = compiled_model return model def wait_for_everyone(): """ Introduces a blocking point in the script, making sure all processes have reached this point before continuing. <Tip warning={true}> Make sure all processes will reach this instruction otherwise one of your processes will hang forever. </Tip> """ PartialState().wait_for_everyone() def clean_state_dict_for_safetensors(state_dict: dict): """ Cleans the state dictionary from a model and removes tensor aliasing if present. Args: state_dict (`dict`): The state dictionary from a model """ ptrs = collections.defaultdict(list) # When bnb serialization is used, weights in state dict can be strings for name, tensor in state_dict.items(): if not isinstance(tensor, str): ptrs[id_tensor_storage(tensor)].append(name) # These are all pointers of tensors with shared memory shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} warn_names = set() for names in shared_ptrs.values(): # When not all duplicates have been cleaned, we still remove those keys but put a clear warning. # If the link between tensors was done at runtime then `from_pretrained` will not get # the key back leading to random tensor. A proper warning will be shown # during reload (if applicable), but since the file is not necessarily compatible with # the config, better show a proper warning. found_names = [name for name in names if name in state_dict] warn_names.update(found_names[1:]) for name in found_names[1:]: del state_dict[name] if len(warn_names) > 0: logger.warning( f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", ) state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()} return state_dict def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False): """ Save the data to disk. Use in place of `torch.save()`. Args: obj: The data to save f: The file (or file-like object) to use to save the data save_on_each_node (`bool`, *optional*, defaults to `False`): Whether to only save on the global main process safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`). """ # Check if it's a model and remove duplicates if safe_serialization: save_func = partial(safe_save_file, metadata={"format": "pt"}) if isinstance(obj, OrderedDict): obj = clean_state_dict_for_safetensors(obj) else: save_func = torch.save if PartialState().distributed_type == DistributedType.TPU: xm.save(obj, f) elif PartialState().is_main_process and not save_on_each_node: save_func(obj, f) elif PartialState().is_local_main_process and save_on_each_node: save_func(obj, f) @contextmanager def clear_environment(): """ A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context. When this context exits, the cached `os.environ` will be back. Example: ```python >>> import os >>> from accelerate.utils import clear_environment >>> os.environ["FOO"] = "bar" >>> with clear_environment(): ... print(os.environ) ... os.environ["FOO"] = "new_bar" ... print(os.environ["FOO"]) {} new_bar >>> print(os.environ["FOO"]) bar ``` """ _old_os_environ = os.environ os.environ = dict() yield os.environ = _old_os_environ @contextmanager def patch_environment(**kwargs): """ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. Will convert the values in `kwargs` to strings and upper-case all the keys. Example: ```python >>> import os >>> from accelerate.utils import patch_environment >>> with patch_environment(FOO="bar"): ... print(os.environ["FOO"]) # prints "bar" >>> print(os.environ["FOO"]) # raises KeyError ``` """ existing_vars = {} for key, value in kwargs.items(): key = key.upper() if key in os.environ: existing_vars[key] = os.environ[key] os.environ[key] = str(value) yield for key in kwargs: key = key.upper() if key in existing_vars: # restore previous value os.environ[key] = existing_vars[key] else: os.environ.pop(key, None) def get_pretty_name(obj): """ Gets a pretty name from `obj`. """ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): obj = getattr(obj, "__class__", obj) if hasattr(obj, "__qualname__"): return obj.__qualname__ if hasattr(obj, "__name__"): return obj.__name__ return str(obj) def merge_dicts(source, destination): """ Recursively merges two dictionaries. Args: source (`dict`): The dictionary to merge into `destination`. destination (`dict`): The dictionary to merge `source` into. """ for key, value in source.items(): if isinstance(value, dict): node = destination.setdefault(key, {}) merge_dicts(value, node) else: destination[key] = value return destination def is_port_in_use(port: int = None) -> bool: """ Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been run and need to see if the port is already in use. """ if port is None: port = 29500 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: return s.connect_ex(("localhost", port)) == 0 def convert_bytes(size): "Converts `size` from bytes to the largest possible unit" for x in ["bytes", "KB", "MB", "GB", "TB"]: if size < 1024.0: return f"{round(size, 2)} {x}" size /= 1024.0 return f"{round(size, 2)} PB" def check_os_kernel(): """Warns if the kernel version is below the recommended minimum on Linux.""" # see issue #1929 info = platform.uname() system = info.system if system != "Linux": return _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release) min_version = "5.5.0" if Version(version) < Version(min_version): msg = ( f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can " "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher." ) logger.warning(msg, main_process_only=True) def recursive_getattr(obj, attr: str): """ Recursive `getattr`. Args: obj: A class instance holding the attribute. attr (`str`): The attribute that is to be retrieved, e.g. 'attribute1.attribute2'. """ def _getattr(obj, attr): return getattr(obj, attr) return reduce(_getattr, [obj] + attr.split("."))
accelerate/src/accelerate/utils/other.py/0
{ "file_path": "accelerate/src/accelerate/utils/other.py", "repo_id": "accelerate", "token_count": 4102 }
8
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class MockLaunchConfig(SageMakerConfig): compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER fp16 = True ec2_instance_type = "ml.p3.2xlarge" iam_role_name = "accelerate_sagemaker_execution_role" profile = "hf-sm" region = "us-east-1" num_machines = 1 base_job_name = "accelerate-sagemaker-1" pytorch_version = "1.6" transformers_version = "4.4" training_script = "train.py" success_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] fail_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class SageMakerLaunch(unittest.TestCase): def test_args_convert(self): # If no defaults are changed, `to_kwargs` returns an empty dict. converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args["model_name_or_path"], str) assert isinstance(converted_args["do_train"], bool) assert isinstance(converted_args["epochs"], int) assert isinstance(converted_args["learning_rate"], float) assert isinstance(converted_args["max_steps"], float) with pytest.raises(ValueError): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
accelerate/tests/test_sagemaker.py/0
{ "file_path": "accelerate/tests/test_sagemaker.py", "repo_id": "accelerate", "token_count": 851 }
9
# Welcome to the RLHF Handbook! Stay tuned for more details πŸ€—
alignment-handbook/chapters/en/chapter0/introduction.mdx/0
{ "file_path": "alignment-handbook/chapters/en/chapter0/introduction.mdx", "repo_id": "alignment-handbook", "token_count": 18 }
10
# Model arguments model_name_or_path: mistralai/Mistral-7B-v0.1 model_revision: main torch_dtype: float16 # LoRA arguments load_in_4bit: true use_peft: true lora_r: 16 lora_alpha: 16 lora_dropout: 0.05 lora_target_modules: - q_proj - k_proj - v_proj - o_proj - gate_proj - up_proj - down_proj # Data training arguments chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" dataset_mixer: HuggingFaceH4/ultrachat_200k: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: zephyr-7b-sft-qlora hub_strategy: every_save learning_rate: 2.0e-04 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/zephyr-7b-sft-qlora overwrite_output_dir: true per_device_eval_batch_size: 8 per_device_train_batch_size: 4 push_to_hub: true report_to: - tensorboard save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-beta/sft/config_qlora.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-beta/sft/config_qlora.yaml", "repo_id": "alignment-handbook", "token_count": 646 }
11
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer from alignment import ( DataArguments, ModelArguments, get_peft_config, get_quantization_config, get_tokenizer, is_adapter_model, ) from alignment.data import DEFAULT_CHAT_TEMPLATE class GetQuantizationConfigTest(unittest.TestCase): def test_4bit(self): model_args = ModelArguments(load_in_4bit=True) quantization_config = get_quantization_config(model_args) self.assertTrue(quantization_config.load_in_4bit) self.assertEqual(quantization_config.bnb_4bit_compute_dtype, torch.float16) self.assertEqual(quantization_config.bnb_4bit_quant_type, "nf4") self.assertFalse(quantization_config.bnb_4bit_use_double_quant) def test_8bit(self): model_args = ModelArguments(load_in_8bit=True) quantization_config = get_quantization_config(model_args) self.assertTrue(quantization_config.load_in_8bit) def test_no_quantization(self): model_args = ModelArguments() quantization_config = get_quantization_config(model_args) self.assertIsNone(quantization_config) class GetTokenizerTest(unittest.TestCase): def setUp(self) -> None: self.model_args = ModelArguments(model_name_or_path="HuggingFaceH4/zephyr-7b-alpha") def test_right_truncation_side(self): tokenizer = get_tokenizer(self.model_args, DataArguments(truncation_side="right")) self.assertEqual(tokenizer.truncation_side, "right") def test_left_truncation_side(self): tokenizer = get_tokenizer(self.model_args, DataArguments(truncation_side="left")) self.assertEqual(tokenizer.truncation_side, "left") def test_default_chat_template(self): tokenizer = get_tokenizer(self.model_args, DataArguments()) self.assertEqual(tokenizer.chat_template, DEFAULT_CHAT_TEMPLATE) def test_default_chat_template_no_overwrite(self): """ If no chat template is passed explicitly in the config, then for models with a `default_chat_template` but no `chat_template` we do not set a `chat_template`, and that we do not change `default_chat_template` """ model_args = ModelArguments(model_name_or_path="codellama/CodeLlama-7b-Instruct-hf") base_tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-Instruct-hf") processed_tokenizer = get_tokenizer(model_args, DataArguments()) assert getattr(processed_tokenizer, "chat_template") is None self.assertEqual(base_tokenizer.default_chat_template, processed_tokenizer.default_chat_template) def test_chatml_chat_template(self): chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" tokenizer = get_tokenizer(self.model_args, DataArguments(chat_template=chat_template)) self.assertEqual(tokenizer.chat_template, chat_template) class GetPeftConfigTest(unittest.TestCase): def test_peft_config(self): model_args = ModelArguments(use_peft=True, lora_r=42, lora_alpha=0.66, lora_dropout=0.99) peft_config = get_peft_config(model_args) self.assertEqual(peft_config.r, 42) self.assertEqual(peft_config.lora_alpha, 0.66) self.assertEqual(peft_config.lora_dropout, 0.99) def test_no_peft_config(self): model_args = ModelArguments(use_peft=False) peft_config = get_peft_config(model_args) self.assertIsNone(peft_config) class IsAdapterModelTest(unittest.TestCase): def test_is_adapter_model_calls_listdir(self): # Assert that for an invalid repo name it gets to the point where it calls os.listdir, # which is expected to raise a FileNotFoundError self.assertRaises(FileNotFoundError, is_adapter_model, "nonexistent/model")
alignment-handbook/tests/test_model_utils.py/0
{ "file_path": "alignment-handbook/tests/test_model_utils.py", "repo_id": "alignment-handbook", "token_count": 1782 }
12
# Summary [Introduction](README.md) # User Guide - [Installation](guide/installation.md) - [Hello World - MNIST](guide/hello_world.md) - [PyTorch cheatsheet](guide/cheatsheet.md) # Reference Guide - [Running a model](inference/inference.md) - [Using the hub](inference/hub.md) - [Error management](error_manage.md) - [Training](training/training.md) - [Simplified](training/simplified.md) - [MNIST](training/mnist.md) - [Fine-tuning]() - [Serialization]() - [Advanced Cuda usage]() - [Writing a custom kernel]() - [Porting a custom kernel]() - [Using MKL]() - [Creating apps]() - [Creating a WASM app]() - [Creating a REST api webserver]() - [Creating a desktop Tauri app]()
candle/candle-book/src/SUMMARY.md/0
{ "file_path": "candle/candle-book/src/SUMMARY.md", "repo_id": "candle", "token_count": 274 }
13
# Writing a custom kernel
candle/candle-book/src/inference/cuda/writing.md/0
{ "file_path": "candle/candle-book/src/inference/cuda/writing.md", "repo_id": "candle", "token_count": 6 }
14
pub(crate) mod affine; pub(crate) mod matmul; pub(crate) mod random; pub(crate) mod where_cond; use candle_core::{Device, Result}; pub(crate) trait BenchDevice { fn sync(&self) -> Result<()>; fn bench_name<S: Into<String>>(&self, name: S) -> String; } impl BenchDevice for Device { fn sync(&self) -> Result<()> { match self { Device::Cpu => Ok(()), Device::Cuda(device) => { #[cfg(feature = "cuda")] return Ok(device.synchronize()?); #[cfg(not(feature = "cuda"))] panic!("Cuda device without cuda feature enabled: {:?}", device) } Device::Metal(device) => { #[cfg(feature = "metal")] return Ok(device.wait_until_completed()?); #[cfg(not(feature = "metal"))] panic!("Metal device without metal feature enabled: {:?}", device) } } } fn bench_name<S: Into<String>>(&self, name: S) -> String { match self { Device::Cpu => { let cpu_type = if cfg!(feature = "accelerate") { "accelerate" } else if cfg!(feature = "mkl") { "mkl" } else { "cpu" }; format!("{}_{}", cpu_type, name.into()) } Device::Cuda(_) => format!("cuda_{}", name.into()), Device::Metal(_) => format!("metal_{}", name.into()), } } } struct BenchDeviceHandler { devices: Vec<Device>, } impl BenchDeviceHandler { pub fn new() -> Result<Self> { let mut devices = Vec::new(); if cfg!(feature = "metal") { devices.push(Device::new_metal(0)?); } else if cfg!(feature = "cuda") { devices.push(Device::new_cuda(0)?); } devices.push(Device::Cpu); Ok(Self { devices }) } }
candle/candle-core/benches/benchmarks/mod.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/mod.rs", "repo_id": "candle", "token_count": 1019 }
15
use super::Cpu; #[cfg(target_arch = "arm")] use core::arch::arm::*; #[cfg(target_arch = "aarch64")] use core::arch::aarch64::*; pub struct CurrentCpu {} const STEP: usize = 16; const EPR: usize = 4; const ARR: usize = STEP / EPR; impl CurrentCpu { #[cfg(target_arch = "aarch64")] unsafe fn reduce_one(x: float32x4_t) -> f32 { vaddvq_f32(x) } #[cfg(target_arch = "arm")] unsafe fn reduce_one(x: float32x4_t) -> f32 { vgetq_lane_f32(x, 0) + vgetq_lane_f32(x, 1) + vgetq_lane_f32(x, 2) + vgetq_lane_f32(x, 3) } } impl Cpu<ARR> for CurrentCpu { type Unit = float32x4_t; type Array = [float32x4_t; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { vdupq_n_f32(0.0) } unsafe fn from_f32(x: f32) -> Self::Unit { vdupq_n_f32(x) } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn load(mem_addr: *const f32) -> Self::Unit { vld1q_f32(mem_addr) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { vaddq_f32(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { vfmaq_f32(a, b, c) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { vst1q_f32(mem_addr, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = vaddq_f32(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = vaddq_f32(x[4 * i], x[4 * i + 2]); } *y = Self::reduce_one(x[0]); } }
candle/candle-core/src/cpu/neon.rs/0
{ "file_path": "candle/candle-core/src/cpu/neon.rs", "repo_id": "candle", "token_count": 897 }
16
//! Numpy support for tensors. //! //! The spec for the npy format can be found in //! [npy-format](https://docs.scipy.org/doc/numpy-1.14.2/neps/npy-format.html). //! The functions from this module can be used to read tensors from npy/npz files //! or write tensors to these files. A npy file contains a single tensor (unnamed) //! whereas a npz file can contain multiple named tensors. npz files are also compressed. //! //! These two formats are easy to use in Python using the numpy library. //! //! ```python //! import numpy as np //! x = np.arange(10) //! //! # Write a npy file. //! np.save("test.npy", x) //! //! # Read a value from the npy file. //! x = np.load("test.npy") //! //! # Write multiple values to a npz file. //! values = { "x": x, "x_plus_one": x + 1 } //! np.savez("test.npz", **values) //! //! # Load multiple values from a npz file. //! values = np.loadz("test.npz") //! ``` use crate::{DType, Device, Error, Result, Shape, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::Path; const NPY_MAGIC_STRING: &[u8] = b"\x93NUMPY"; const NPY_SUFFIX: &str = ".npy"; fn read_header<R: Read>(reader: &mut R) -> Result<String> { let mut magic_string = vec![0u8; NPY_MAGIC_STRING.len()]; reader.read_exact(&mut magic_string)?; if magic_string != NPY_MAGIC_STRING { return Err(Error::Npy("magic string mismatch".to_string())); } let mut version = [0u8; 2]; reader.read_exact(&mut version)?; let header_len_len = match version[0] { 1 => 2, 2 => 4, otherwise => return Err(Error::Npy(format!("unsupported version {otherwise}"))), }; let mut header_len = vec![0u8; header_len_len]; reader.read_exact(&mut header_len)?; let header_len = header_len .iter() .rev() .fold(0_usize, |acc, &v| 256 * acc + v as usize); let mut header = vec![0u8; header_len]; reader.read_exact(&mut header)?; Ok(String::from_utf8_lossy(&header).to_string()) } #[derive(Debug, PartialEq)] struct Header { descr: DType, fortran_order: bool, shape: Vec<usize>, } impl Header { fn shape(&self) -> Shape { Shape::from(self.shape.as_slice()) } fn to_string(&self) -> Result<String> { let fortran_order = if self.fortran_order { "True" } else { "False" }; let mut shape = self .shape .iter() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","); let descr = match self.descr { DType::BF16 => Err(Error::Npy("bf16 is not supported".into()))?, DType::F16 => "f2", DType::F32 => "f4", DType::F64 => "f8", DType::I64 => "i8", DType::U32 => "u4", DType::U8 => "u1", }; if !shape.is_empty() { shape.push(',') } Ok(format!( "{{'descr': '<{descr}', 'fortran_order': {fortran_order}, 'shape': ({shape}), }}" )) } // Hacky parser for the npy header, a typical example would be: // {'descr': '<f8', 'fortran_order': False, 'shape': (128,), } fn parse(header: &str) -> Result<Header> { let header = header.trim_matches(|c: char| c == '{' || c == '}' || c == ',' || c.is_whitespace()); let mut parts: Vec<String> = vec![]; let mut start_index = 0usize; let mut cnt_parenthesis = 0i64; for (index, c) in header.chars().enumerate() { match c { '(' => cnt_parenthesis += 1, ')' => cnt_parenthesis -= 1, ',' => { if cnt_parenthesis == 0 { parts.push(header[start_index..index].to_owned()); start_index = index + 1; } } _ => {} } } parts.push(header[start_index..].to_owned()); let mut part_map: HashMap<String, String> = HashMap::new(); for part in parts.iter() { let part = part.trim(); if !part.is_empty() { match part.split(':').collect::<Vec<_>>().as_slice() { [key, value] => { let key = key.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let value = value.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let _ = part_map.insert(key.to_owned(), value.to_owned()); } _ => return Err(Error::Npy(format!("unable to parse header {header}"))), } } } let fortran_order = match part_map.get("fortran_order") { None => false, Some(fortran_order) => match fortran_order.as_ref() { "False" => false, "True" => true, _ => return Err(Error::Npy(format!("unknown fortran_order {fortran_order}"))), }, }; let descr = match part_map.get("descr") { None => return Err(Error::Npy("no descr in header".to_string())), Some(descr) => { if descr.is_empty() { return Err(Error::Npy("empty descr".to_string())); } if descr.starts_with('>') { return Err(Error::Npy(format!("little-endian descr {descr}"))); } // the only supported types in tensor are: // float64, float32, float16, // complex64, complex128, // int64, int32, int16, int8, // uint8, and bool. match descr.trim_matches(|c: char| c == '=' || c == '<' || c == '|') { "e" | "f2" => DType::F16, "f" | "f4" => DType::F32, "d" | "f8" => DType::F64, // "i" | "i4" => DType::S32, "q" | "i8" => DType::I64, // "h" | "i2" => DType::S16, // "b" | "i1" => DType::S8, "B" | "u1" => DType::U8, "I" | "u4" => DType::U32, "?" | "b1" => DType::U8, // "F" | "F4" => DType::C64, // "D" | "F8" => DType::C128, descr => return Err(Error::Npy(format!("unrecognized descr {descr}"))), } } }; let shape = match part_map.get("shape") { None => return Err(Error::Npy("no shape in header".to_string())), Some(shape) => { let shape = shape.trim_matches(|c: char| c == '(' || c == ')' || c == ','); if shape.is_empty() { vec![] } else { shape .split(',') .map(|v| v.trim().parse::<usize>()) .collect::<std::result::Result<Vec<_>, _>>()? } } }; Ok(Header { descr, fortran_order, shape, }) } } impl Tensor { // TODO: Add the possibility to read directly to a device? pub(crate) fn from_reader<R: std::io::Read>( shape: Shape, dtype: DType, reader: &mut R, ) -> Result<Self> { let elem_count = shape.elem_count(); match dtype { DType::BF16 => { let mut data_t = vec![bf16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F16 => { let mut data_t = vec![f16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F32 => { let mut data_t = vec![0f32; elem_count]; reader.read_f32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F64 => { let mut data_t = vec![0f64; elem_count]; reader.read_f64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U8 => { let mut data_t = vec![0u8; elem_count]; reader.read_exact(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U32 => { let mut data_t = vec![0u32; elem_count]; reader.read_u32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::I64 => { let mut data_t = vec![0i64; elem_count]; reader.read_i64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } } } /// Reads a npy file and return the stored multi-dimensional array as a tensor. pub fn read_npy<T: AsRef<Path>>(path: T) -> Result<Self> { let mut reader = File::open(path.as_ref())?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } Self::from_reader(header.shape(), header.descr, &mut reader) } /// Reads a npz file and returns the stored multi-dimensional arrays together with their names. pub fn read_npz<T: AsRef<Path>>(path: T) -> Result<Vec<(String, Self)>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for i in 0..zip.len() { let mut reader = zip.by_index(i)?; let name = { let name = reader.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push((name, s)) } Ok(result) } /// Reads a npz file and returns the stored multi-dimensional arrays for some specified names. pub fn read_npz_by_name<T: AsRef<Path>>(path: T, names: &[&str]) -> Result<Vec<Self>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for name in names.iter() { let mut reader = match zip.by_name(&format!("{name}{NPY_SUFFIX}")) { Ok(reader) => reader, Err(_) => Err(Error::Npy(format!( "no array for {name} in {:?}", path.as_ref() )))?, }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push(s) } Ok(result) } fn write<T: Write>(&self, f: &mut T) -> Result<()> { f.write_all(NPY_MAGIC_STRING)?; f.write_all(&[1u8, 0u8])?; let header = Header { descr: self.dtype(), fortran_order: false, shape: self.dims().to_vec(), }; let mut header = header.to_string()?; let pad = 16 - (NPY_MAGIC_STRING.len() + 5 + header.len()) % 16; for _ in 0..pad % 16 { header.push(' ') } header.push('\n'); f.write_all(&[(header.len() % 256) as u8, (header.len() / 256) as u8])?; f.write_all(header.as_bytes())?; self.write_bytes(f) } /// Writes a multi-dimensional array in the npy format. pub fn write_npy<T: AsRef<Path>>(&self, path: T) -> Result<()> { let mut f = File::create(path.as_ref())?; self.write(&mut f) } /// Writes multiple multi-dimensional arrays using the npz format. pub fn write_npz<S: AsRef<str>, T: AsRef<Tensor>, P: AsRef<Path>>( ts: &[(S, T)], path: P, ) -> Result<()> { let mut zip = zip::ZipWriter::new(File::create(path.as_ref())?); let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); for (name, tensor) in ts.iter() { zip.start_file(format!("{}.npy", name.as_ref()), options)?; tensor.as_ref().write(&mut zip)? } Ok(()) } } /// Lazy tensor loader. pub struct NpzTensors { index_per_name: HashMap<String, usize>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl NpzTensors { pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> { let path = path.as_ref().to_owned(); let zip_reader = BufReader::new(File::open(&path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut index_per_name = HashMap::new(); for i in 0..zip.len() { let file = zip.by_index(i)?; let name = { let name = file.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; index_per_name.insert(name, i); } Ok(Self { index_per_name, path, }) } pub fn names(&self) -> Vec<&String> { self.index_per_name.keys().collect() } /// This only returns the shape and dtype for a named tensor. Compared to `get`, this avoids /// reading the whole tensor data. pub fn get_shape_and_dtype(&self, name: &str) -> Result<(Shape, DType)> { let index = match self.index_per_name.get(name) { None => crate::bail!("cannot find tensor {name}"), Some(index) => *index, }; let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; Ok((header.shape(), header.descr)) } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { let index = match self.index_per_name.get(name) { None => return Ok(None), Some(index) => *index, }; // We hope that the file has not changed since first reading it. let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let tensor = Tensor::from_reader(header.shape(), header.descr, &mut reader)?; Ok(Some(tensor)) } } #[cfg(test)] mod tests { use super::Header; #[test] fn parse() { let h = "{'descr': '<f8', 'fortran_order': False, 'shape': (128,), }"; assert_eq!( Header::parse(h).unwrap(), Header { descr: crate::DType::F64, fortran_order: false, shape: vec![128] } ); let h = "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128), }"; let h = Header::parse(h).unwrap(); assert_eq!( h, Header { descr: crate::DType::F32, fortran_order: true, shape: vec![256, 1, 128] } ); assert_eq!( h.to_string().unwrap(), "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128,), }" ); let h = Header { descr: crate::DType::U32, fortran_order: false, shape: vec![], }; assert_eq!( h.to_string().unwrap(), "{'descr': '<u4', 'fortran_order': False, 'shape': (), }" ); } }
candle/candle-core/src/npy.rs/0
{ "file_path": "candle/candle-core/src/npy.rs", "repo_id": "candle", "token_count": 8717 }
17
use crate::Layout; /// An iterator over offset position for items of an N-dimensional arrays stored in a /// flat buffer using some potential strides. #[derive(Debug)] pub struct StridedIndex<'a> { next_storage_index: Option<usize>, multi_index: Vec<usize>, dims: &'a [usize], stride: &'a [usize], } impl<'a> StridedIndex<'a> { pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self { let elem_count: usize = dims.iter().product(); let next_storage_index = if elem_count == 0 { None } else { // This applies to the scalar case. Some(start_offset) }; StridedIndex { next_storage_index, multi_index: vec![0; dims.len()], dims, stride, } } pub(crate) fn from_layout(l: &'a Layout) -> Self { Self::new(l.dims(), l.stride(), l.start_offset()) } } impl<'a> Iterator for StridedIndex<'a> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let storage_index = match self.next_storage_index { None => return None, Some(storage_index) => storage_index, }; let mut updated = false; let mut next_storage_index = storage_index; for ((multi_i, max_i), stride_i) in self .multi_index .iter_mut() .zip(self.dims.iter()) .zip(self.stride.iter()) .rev() { let next_i = *multi_i + 1; if next_i < *max_i { *multi_i = next_i; updated = true; next_storage_index += stride_i; break; } else { next_storage_index -= *multi_i * stride_i; *multi_i = 0 } } self.next_storage_index = if updated { Some(next_storage_index) } else { None }; Some(storage_index) } } #[derive(Debug)] pub enum StridedBlocks<'a> { SingleBlock { start_offset: usize, len: usize, }, MultipleBlocks { block_start_index: StridedIndex<'a>, block_len: usize, }, }
candle/candle-core/src/strided_index.rs/0
{ "file_path": "candle/candle-core/src/strided_index.rs", "repo_id": "candle", "token_count": 1148 }
18
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::models::bert::{BertModel, Config, HiddenAct, DTYPE}; use anyhow::{Error as E, Result}; use candle::Tensor; use candle_nn::VarBuilder; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::{PaddingParams, Tokenizer}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, /// When set, compute embeddings for this prompt. #[arg(long)] prompt: Option<String>, /// Use the pytorch weights rather than the safetensors ones #[arg(long)] use_pth: bool, /// The number of times to run the prompt. #[arg(long, default_value = "1")] n: usize, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// Use tanh based approximation for Gelu instead of erf implementation. #[arg(long, default_value = "false")] approximate_gelu: bool, } impl Args { fn build_model_and_tokenizer(&self) -> Result<(BertModel, Tokenizer)> { let device = candle_examples::device(self.cpu)?; let default_model = "sentence-transformers/all-MiniLM-L6-v2".to_string(); let default_revision = "refs/pr/21".to_string(); let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id, RepoType::Model, revision); let (config_filename, tokenizer_filename, weights_filename) = { let api = Api::new()?; let api = api.repo(repo); let config = api.get("config.json")?; let tokenizer = api.get("tokenizer.json")?; let weights = if self.use_pth { api.get("pytorch_model.bin")? } else { api.get("model.safetensors")? }; (config, tokenizer, weights) }; let config = std::fs::read_to_string(config_filename)?; let mut config: Config = serde_json::from_str(&config)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let vb = if self.use_pth { VarBuilder::from_pth(&weights_filename, DTYPE, &device)? } else { unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? } }; if self.approximate_gelu { config.hidden_act = HiddenAct::GeluApproximate; } let model = BertModel::load(vb, &config)?; Ok((model, tokenizer)) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { println!("tracing..."); let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let start = std::time::Instant::now(); let (model, mut tokenizer) = args.build_model_and_tokenizer()?; let device = &model.device; if let Some(prompt) = args.prompt { let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let token_type_ids = token_ids.zeros_like()?; println!("Loaded and encoded {:?}", start.elapsed()); for idx in 0..args.n { let start = std::time::Instant::now(); let ys = model.forward(&token_ids, &token_type_ids)?; if idx == 0 { println!("{ys}"); } println!("Took {:?}", start.elapsed()); } } else { let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); if let Some(pp) = tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; tokenizer.with_padding(Some(pp)); } let tokens = tokenizer .encode_batch(sentences.to_vec(), true) .map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Ok(Tensor::new(tokens.as_slice(), device)?) }) .collect::<Result<Vec<_>>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let token_type_ids = token_ids.zeros_like()?; println!("running inference on batch {:?}", token_ids.shape()); let embeddings = model.forward(&token_ids, &token_type_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); let mut similarities = vec![]; for i in 0..n_sentences { let e_i = embeddings.get(i)?; for j in (i + 1)..n_sentences { let e_j = embeddings.get(j)?; let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
candle/candle-examples/examples/bert/main.rs/0
{ "file_path": "candle/candle-examples/examples/bert/main.rs", "repo_id": "candle", "token_count": 3527 }
19
// TODO: Add an offline mode. #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; use candle_transformers::models::falcon::{Config, Falcon}; struct TextGeneration { model: Falcon, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } struct GenerationOptions { temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { fn new( model: Falcon, tokenizer: Tokenizer, generation_options: GenerationOptions, seed: u64, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, generation_options.temp, generation_options.top_p); let repeat_penalty = generation_options.repeat_penalty; let repeat_last_n = generation_options.repeat_last_n; Self { model, tokenizer, logits_processor, device: device.clone(), repeat_penalty, repeat_last_n, } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { println!("starting the inference loop"); let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let start_gen = std::time::Instant::now(); let context_size = if self.model.config().use_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); println!("> {:?}", start_gen.elapsed()); println!( "{} token: {} '{}'", index + 1, next_token, self.tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({} token/s)\n----\n{}\n----", sample_len as f64 / dt.as_secs_f64(), self.tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// Use f32 computations rather than bf16. #[arg(long)] use_f32: bool, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "tiiuae/falcon-7b")] model_id: String, #[arg(long, default_value = "refs/pr/43")] revision: String, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let dtype = if args.use_f32 { DType::F32 } else { DType::BF16 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let config = Config::falcon7b(); config.validate()?; let model = Falcon::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let generation_options = GenerationOptions { temp: args.temperature, top_p: args.top_p, repeat_penalty: args.repeat_penalty, repeat_last_n: args.repeat_last_n, }; let mut pipeline = TextGeneration::new(model, tokenizer, generation_options, args.seed, &device); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/falcon/main.rs/0
{ "file_path": "candle/candle-examples/examples/falcon/main.rs", "repo_id": "candle", "token_count": 2723 }
20
# candle-mixtral: 8x7b LLM using a sparse mixture of experts. Mixtral-8x7B-v0.1 is a pretrained generative LLM with 56 billion parameters. - [Blog post](https://mistral.ai/news/mixtral-of-experts/) from Mistral announcing the model release. - [Model card](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on the HuggingFace Hub. ## Running the example ```bash $ cargo run --example mixtral --release -- --prompt "def print_prime(n): " def print_prime(n): # n is the number of prime numbers to be printed i = 2 count = 0 while (count < n): if (isPrime(i)): print(i) count += 1 i += 1 def isPrime(n): for x in range(2, int(n**0.5)+1): if (n % x == 0): ... ```
candle/candle-examples/examples/mixtral/README.md/0
{ "file_path": "candle/candle-examples/examples/mixtral/README.md", "repo_id": "candle", "token_count": 322 }
21
# candle-quantized-llama: Fast Inference of quantized LLaMA models This example provides a quantized LLaMA model similar to [llama.cpp](https://github.com/ggerganov/llama.cpp). This is based on candle built-in quantization methods. Supported features include: - 2-bit, 3-bit, 4-bit, 5-bit, 6-bit and 8-bit integer quantization support. - SIMD optimizations on Apple Silicon and x86. - Support using the `gguf` and `ggml` file formats. The weights are automatically downloaded for you from the [HuggingFace Hub](https://huggingface.co/) on the first run. There are various command line flags to use local files instead, run with `--help` to learn about them. ![Axiom of Choice](./assets/aoc.gif) ## Running some example. ```bash cargo run --example quantized --release -- --prompt "The best thing about coding in rust is " > avx: true, neon: false, simd128: false, f16c: true > temp: 0.80 repeat-penalty: 1.10 repeat-last-n: 64 > loaded 291 tensors (3.79GB) in 2.17s > params: HParams { n_vocab: 32000, n_embd: 4096, n_mult: 256, n_head: 32, n_layer: 32, n_rot: 128, ftype: 2 } > The best thing about coding in rust is 1.) that I don’t need to worry about memory leaks, 2.) speed and 3.) my program will compile even on old machines. ``` Using the mixtral sparse mixture of expert model: ```bash $ cargo run --example quantized --release -- --which mixtral --prompt "Lebesgue's integral is superior to Riemann's because " > avx: true, neon: false, simd128: false, f16c: true > temp: 0.80 repeat-penalty: 1.10 repeat-last-n: 64 > loaded 995 tensors (26.44GB) in 0.03s Lebesgue's integral is superior to Riemann's because 1. it is defined for a wider class of functions, those which are absolutely integrable; 2. the definition does not involve limits in two variables---one being computed before the other (which makes some computations more difficult); and 3. interchange of order of integration is easier to establish than with Riemann's integral. On the other hand, Lebesgue's integral applies only for bounded functions defined on finite intervals; it does not provide numerical values for improper integrals. The latter are best evaluated using Cauchy's limit definition. The reason $f(x) = x^2$ is discontinuous at the ends of its interval of definition, and Riemann's integral requires continuity on the whole of an open interval containing it (see our earlier post), sine no such function exists with this property, is that the endpoints are infinite in measure for Lebesgue's integral. ``` ## Command-line flags Run with `--help` to see all options. - `--which`: specify the model to use, e.g. `7b`, `13-chat`, `7b-code`. - `--prompt interactive`: interactive mode where multiple prompts can be entered. - `--model mymodelfile.gguf`: use a local model file rather than getting one from the hub.
candle/candle-examples/examples/quantized/README.md/0
{ "file_path": "candle/candle-examples/examples/quantized/README.md", "repo_id": "candle", "token_count": 820 }
22
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::resnet; use clap::{Parser, ValueEnum}; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { #[value(name = "18")] Resnet18, #[value(name = "34")] Resnet34, #[value(name = "50")] Resnet50, #[value(name = "101")] Resnet101, #[value(name = "152")] Resnet152, } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Variant of the model to use. #[arg(value_enum, long, default_value_t = Which::Resnet18)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-resnet".into()); let filename = match args.which { Which::Resnet18 => "resnet18.safetensors", Which::Resnet34 => "resnet34.safetensors", Which::Resnet50 => "resnet50.safetensors", Which::Resnet101 => "resnet101.safetensors", Which::Resnet152 => "resnet152.safetensors", }; api.get(filename)? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let class_count = candle_examples::imagenet::CLASS_COUNT as usize; let model = match args.which { Which::Resnet18 => resnet::resnet18(class_count, vb)?, Which::Resnet34 => resnet::resnet34(class_count, vb)?, Which::Resnet50 => resnet::resnet50(class_count, vb)?, Which::Resnet101 => resnet::resnet101(class_count, vb)?, Which::Resnet152 => resnet::resnet152(class_count, vb)?, }; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/resnet/main.rs/0
{ "file_path": "candle/candle-examples/examples/resnet/main.rs", "repo_id": "candle", "token_count": 1281 }
23
# candle-trocr `TrOCR` is a transformer OCR Model. In this example it is used to transcribe image text. See the associated [model card](https://huggingface.co/microsoft/trocr-base-printed) for details on the model itself. ## Running an example ```bash cargo run --example trocr --release -- --which base --cpu --image candle-examples/examples/trocr/assets/trocr.png ``` ``` <s> industry , Mr. Brown commented icily . " Let us have a</s> ```
candle/candle-examples/examples/trocr/readme.md/0
{ "file_path": "candle/candle-examples/examples/trocr/readme.md", "repo_id": "candle", "token_count": 146 }
24
def remove_prefix(text, prefix): return text[text.startswith(prefix) and len(prefix):] nps = {} for k, v in model.state_dict().items(): k = remove_prefix(k, 'module_list.') nps[k] = v.detach().numpy() np.savez('yolo-v3.ot', **nps)
candle/candle-examples/examples/yolo-v3/extract-weights.py/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/extract-weights.py", "repo_id": "candle", "token_count": 98 }
25
// Build script to run nvcc and generate the C glue code for launching the flash-attention kernel. // The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment // variable in order to cache the compiled artifacts and avoid recompiling too often. use anyhow::{Context, Result}; use std::path::PathBuf; const KERNEL_FILES: [&str; 17] = [ "kernels/flash_api.cu", "kernels/flash_fwd_hdim128_fp16_sm80.cu", "kernels/flash_fwd_hdim160_fp16_sm80.cu", "kernels/flash_fwd_hdim192_fp16_sm80.cu", "kernels/flash_fwd_hdim224_fp16_sm80.cu", "kernels/flash_fwd_hdim256_fp16_sm80.cu", "kernels/flash_fwd_hdim32_fp16_sm80.cu", "kernels/flash_fwd_hdim64_fp16_sm80.cu", "kernels/flash_fwd_hdim96_fp16_sm80.cu", "kernels/flash_fwd_hdim128_bf16_sm80.cu", "kernels/flash_fwd_hdim160_bf16_sm80.cu", "kernels/flash_fwd_hdim192_bf16_sm80.cu", "kernels/flash_fwd_hdim224_bf16_sm80.cu", "kernels/flash_fwd_hdim256_bf16_sm80.cu", "kernels/flash_fwd_hdim32_bf16_sm80.cu", "kernels/flash_fwd_hdim64_bf16_sm80.cu", "kernels/flash_fwd_hdim96_bf16_sm80.cu", ]; fn main() -> Result<()> { println!("cargo:rerun-if-changed=build.rs"); for kernel_file in KERNEL_FILES.iter() { println!("cargo:rerun-if-changed={kernel_file}"); } println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h"); println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h"); println!("cargo:rerun-if-changed=kernels/flash.h"); println!("cargo:rerun-if-changed=kernels/philox.cuh"); println!("cargo:rerun-if-changed=kernels/softmax.h"); println!("cargo:rerun-if-changed=kernels/utils.h"); println!("cargo:rerun-if-changed=kernels/kernel_traits.h"); println!("cargo:rerun-if-changed=kernels/block_info.h"); println!("cargo:rerun-if-changed=kernels/static_switch.h"); let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?); let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") { Err(_) => { #[allow(clippy::redundant_clone)] out_dir.clone() } Ok(build_dir) => { let path = PathBuf::from(build_dir); path.canonicalize().expect(&format!( "Directory doesn't exists: {} (the current directory is {})", &path.display(), std::env::current_dir()?.display() )) } }; let kernels = KERNEL_FILES.iter().collect(); let builder = bindgen_cuda::Builder::default() .kernel_paths(kernels) .out_dir(build_dir.clone()) .arg("-std=c++17") .arg("-O3") .arg("-U__CUDA_NO_HALF_OPERATORS__") .arg("-U__CUDA_NO_HALF_CONVERSIONS__") .arg("-U__CUDA_NO_HALF2_OPERATORS__") .arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__") .arg("-Icutlass/include") .arg("--expt-relaxed-constexpr") .arg("--expt-extended-lambda") .arg("--use_fast_math") .arg("--verbose"); let out_file = build_dir.join("libflashattention.a"); builder.build_lib(out_file); println!("cargo:rustc-link-search={}", build_dir.display()); println!("cargo:rustc-link-lib=flashattention"); println!("cargo:rustc-link-lib=dylib=cudart"); println!("cargo:rustc-link-lib=dylib=stdc++"); Ok(()) }
candle/candle-flash-attn/build.rs/0
{ "file_path": "candle/candle-flash-attn/build.rs", "repo_id": "candle", "token_count": 1604 }
26
[package] name = "candle-kernels" version = "0.3.3" edition = "2021" description = "CUDA kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] [build-dependencies] bindgen_cuda = "0.1.1"
candle/candle-kernels/Cargo.toml/0
{ "file_path": "candle/candle-kernels/Cargo.toml", "repo_id": "candle", "token_count": 126 }
27
[package] name = "candle-metal-kernels" version = "0.3.3" edition = "2021" description = "Metal kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] metal = { version = "0.27.0", features = ["mps"] } once_cell = "1.18.0" thiserror = "1" tracing = "0.1.37" [dev-dependencies] half = { version = "2.3.1", features = [ "num-traits", "use-intrinsics", "rand_distr", ] } rand = "0.8.5"
candle/candle-metal-kernels/Cargo.toml/0
{ "file_path": "candle/candle-metal-kernels/Cargo.toml", "repo_id": "candle", "token_count": 218 }
28
use candle_metal_kernels::{binary, call_binary_contiguous, call_binary_strided, Kernels}; use half::{bf16, f16}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f16_map = |v: &[f32]| v.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let f16_1k = f16_map(&f32_1k); let f16_10k = f16_map(&f32_10k); let f16_100k = f16_map(&f32_100k); let bf16_map = |v: &[f32]| v.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let bf16_1k = bf16_map(&f32_1k); let bf16_10k = bf16_map(&f32_10k); let bf16_100k = bf16_map(&f32_100k); let f32_ckernels = [ binary::contiguous::add::FLOAT, binary::contiguous::sub::FLOAT, binary::contiguous::mul::FLOAT, binary::contiguous::div::FLOAT, ]; let f32_skernels = [ binary::strided::add::FLOAT, binary::strided::sub::FLOAT, binary::strided::mul::FLOAT, binary::strided::div::FLOAT, ]; let f16_ckernels = [ binary::contiguous::add::HALF, binary::contiguous::sub::HALF, binary::contiguous::mul::HALF, binary::contiguous::div::HALF, ]; let f16_skernels = [ binary::strided::add::HALF, binary::strided::sub::HALF, binary::strided::mul::HALF, binary::strided::div::HALF, ]; let bf16_ckernels = [ binary::contiguous::add::BFLOAT, binary::contiguous::sub::BFLOAT, binary::contiguous::mul::BFLOAT, binary::contiguous::div::BFLOAT, ]; let bf16_skernels = [ binary::strided::add::BFLOAT, binary::strided::sub::BFLOAT, binary::strided::mul::BFLOAT, binary::strided::div::BFLOAT, ]; println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_binary_bench(&device, &kernels, &f32_1k, f32_ckernels, f32_skernels); run_binary_bench(&device, &kernels, &f32_10k, f32_ckernels, f32_skernels); run_binary_bench(&device, &kernels, &f32_100k, f32_ckernels, f32_skernels); // f16 run_binary_bench(&device, &kernels, &f16_1k, f16_ckernels, f16_skernels); run_binary_bench(&device, &kernels, &f16_10k, f16_ckernels, f16_skernels); run_binary_bench(&device, &kernels, &f16_100k, f16_ckernels, f16_skernels); // bf16 run_binary_bench(&device, &kernels, &bf16_1k, bf16_ckernels, bf16_skernels); run_binary_bench(&device, &kernels, &bf16_10k, bf16_ckernels, bf16_skernels); run_binary_bench(&device, &kernels, &bf16_100k, bf16_ckernels, bf16_skernels); } fn run_binary_bench<T: Clone>( device: &Device, kernels: &Kernels, v: &[T], contiguous: [binary::contiguous::Kernel; 4], strided: [binary::strided::Kernel; 4], ) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 1000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); // Contiguous for kernel_name in contiguous { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_binary_contiguous( device, &command_buffer, kernels, kernel_name, v.len(), &input, &input, &mut output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.to_string(), v.len(), iterations, total_time, total_time / iterations ); } // Strided let shape = vec![2, 5_000]; let strides = vec![2, 1]; let offset = 0; for kernel_name in strided { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_binary_strided( device, command_buffer, &kernels, kernel_name, &shape, &input, &strides, offset, &input, &strides, offset, &mut output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.to_string(), v.len(), iterations, total_time, total_time / iterations ); } }
candle/candle-metal-kernels/tmp/binary.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/binary.rs", "repo_id": "candle", "token_count": 3149 }
29
pub mod activation; pub mod batch_norm; pub mod conv; pub mod embedding; pub mod encoding; pub mod func; pub mod group_norm; pub mod init; pub mod layer_norm; pub mod linear; pub mod loss; pub mod ops; pub mod optim; pub mod rnn; pub mod sequential; pub mod var_builder; pub mod var_map; pub use activation::{prelu, Activation, PReLU}; pub use batch_norm::{batch_norm, BatchNorm, BatchNormConfig}; pub use conv::{ conv1d, conv2d, conv2d_no_bias, conv_transpose2d, conv_transpose2d_no_bias, Conv1d, Conv1dConfig, Conv2d, Conv2dConfig, ConvTranspose2d, ConvTranspose2dConfig, }; pub use embedding::{embedding, Embedding}; pub use func::{func, func_t, Func, FuncT}; pub use group_norm::{group_norm, GroupNorm}; pub use init::Init; pub use layer_norm::{layer_norm, rms_norm, LayerNorm, LayerNormConfig, RmsNorm}; pub use linear::{linear, linear_no_bias, Linear}; pub use ops::Dropout; pub use optim::{AdamW, Optimizer, ParamsAdamW, SGD}; pub use rnn::{gru, lstm, GRUConfig, LSTMConfig, GRU, LSTM, RNN}; pub use sequential::{seq, Sequential}; pub use var_builder::VarBuilder; pub use var_map::VarMap; pub use candle::{Module, ModuleT};
candle/candle-nn/src/lib.rs/0
{ "file_path": "candle/candle-nn/src/lib.rs", "repo_id": "candle", "token_count": 421 }
30
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{test_utils::to_vec2_round, DType, Device, Result, Tensor}; use candle_nn::RNN; /* The following test can be verified against PyTorch using the following snippet. import torch from torch import nn lstm = nn.LSTM(2, 3, 1) lstm.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 24.).reshape(12, 2).cos()) lstm.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 36.).reshape(12, 3).sin()) lstm.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2])) lstm.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2]).cos()) state = torch.zeros((1, 3)), torch.zeros((1, 3)) for inp in [3., 1., 4., 1., 5., 9., 2.]: inp = torch.tensor([[inp, inp * 0.5]]) _out, state = lstm(inp, state) print(state) # (tensor([[ 0.9919, 0.1738, -0.1451]], grad_fn=...), tensor([[ 5.7250, 0.4458, -0.2908]], grad_fn=...)) */ #[test] fn lstm() -> Result<()> { let cpu = &Device::Cpu; let w_ih = Tensor::arange(0f32, 24f32, cpu)?.reshape((12, 2))?; let w_ih = w_ih.cos()?; let w_hh = Tensor::arange(0f32, 36f32, cpu)?.reshape((12, 3))?; let w_hh = w_hh.sin()?; let b_ih = Tensor::new( &[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1., 1., -0.5, 2.], cpu, )?; let b_hh = b_ih.cos()?; let tensors: std::collections::HashMap<_, _> = [ ("weight_ih_l0".to_string(), w_ih), ("weight_hh_l0".to_string(), w_hh), ("bias_ih_l0".to_string(), b_ih), ("bias_hh_l0".to_string(), b_hh), ] .into_iter() .collect(); let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu); let lstm = candle_nn::lstm(2, 3, Default::default(), vb)?; let mut state = lstm.zero_state(1)?; for inp in [3f32, 1., 4., 1., 5., 9., 2.] { let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?; state = lstm.step(&inp, &state)? } let h = state.h(); let c = state.c(); assert_eq!(to_vec2_round(h, 4)?, &[[0.9919, 0.1738, -0.1451]]); assert_eq!(to_vec2_round(c, 4)?, &[[5.725, 0.4458, -0.2908]]); Ok(()) } /* The following test can be verified against PyTorch using the following snippet. import torch from torch import nn gru = nn.GRU(2, 3, 1) gru.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 18.).reshape(9, 2).cos()) gru.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 27.).reshape(9, 3).sin()) gru.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1])) gru.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1]).cos()) state = torch.zeros((1, 3)) for inp in [3., 1., 4., 1., 5., 9., 2.]: inp = torch.tensor([[inp, inp * 0.5]]) _out, state = gru(inp, state) print(state) # tensor([[ 0.0579, 0.8836, -0.9991]], grad_fn=<SqueezeBackward1>) */ #[test] fn gru() -> Result<()> { let cpu = &Device::Cpu; let w_ih = Tensor::arange(0f32, 18f32, cpu)?.reshape((9, 2))?; let w_ih = w_ih.cos()?; let w_hh = Tensor::arange(0f32, 27f32, cpu)?.reshape((9, 3))?; let w_hh = w_hh.sin()?; let b_ih = Tensor::new(&[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1.], cpu)?; let b_hh = b_ih.cos()?; let tensors: std::collections::HashMap<_, _> = [ ("weight_ih_l0".to_string(), w_ih), ("weight_hh_l0".to_string(), w_hh), ("bias_ih_l0".to_string(), b_ih), ("bias_hh_l0".to_string(), b_hh), ] .into_iter() .collect(); let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu); let gru = candle_nn::gru(2, 3, Default::default(), vb)?; let mut state = gru.zero_state(1)?; for inp in [3f32, 1., 4., 1., 5., 9., 2.] { let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?; state = gru.step(&inp, &state)? } let h = state.h(); assert_eq!(to_vec2_round(h, 4)?, &[[0.0579, 0.8836, -0.9991]]); Ok(()) }
candle/candle-nn/tests/rnn.rs/0
{ "file_path": "candle/candle-nn/tests/rnn.rs", "repo_id": "candle", "token_count": 2010 }
31
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape class bf16(DType): pass @staticmethod def cat(tensors: List[Tensor], dim: int) -> Tensor: """ Concatenate the tensors across one axis. """ pass class f16(DType): pass class f32(DType): pass class f64(DType): pass class i64(DType): pass @staticmethod def ones(*shape: Shape, dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor filled with ones. """ pass @staticmethod def rand(*shape: Shape, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor with random values. """ pass @staticmethod def randn(*shape: Shape, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor with random values from a normal distribution. """ pass @staticmethod def stack(tensors: List[Tensor], dim: int) -> Tensor: """ Stack the tensors along a new axis. """ pass @staticmethod def tensor(data: _ArrayLike) -> Tensor: """ Creates a new tensor from a Python value. The value can be a scalar or array-like object. """ pass class u32(DType): pass class u8(DType): pass @staticmethod def zeros(*shape: Shape, dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor filled with zeros. """ pass class DType: """ A `candle` dtype. """ class QTensor: """ A quantized tensor. """ def dequantize(self) -> Tensor: """ Dequantizes the tensor. """ pass @property def ggml_dtype(self) -> str: """ Gets the tensors quantized dtype. """ pass def matmul_t(self, lhs: Tensor) -> Tensor: """ Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. """ pass @property def rank(self) -> int: """ Gets the rank of the tensor. """ pass @property def shape(self) -> Tuple[int]: """ Gets the shape of the tensor. """ pass class Tensor: """ A `candle` tensor. """ def __init__(self, data: _ArrayLike): pass def __add__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __eq__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union[Index, Tensor, Sequence[Index]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __gt__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __mul__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __ne__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __radd__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __richcmp__(self, rhs: Union[Tensor, Scalar], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __rmul__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __sub__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def abs(self) -> Tensor: """ Performs the `abs` operation on the tensor. """ pass def argmax_keepdim(self, dim: int) -> Tensor: """ Returns the indices of the maximum value(s) across the selected dimension. """ pass def argmin_keepdim(self, dim: int) -> Tensor: """ Returns the indices of the minimum value(s) across the selected dimension. """ pass def broadcast_add(self, rhs: Tensor) -> Tensor: """ Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_as(self, *shape: Shape) -> Tensor: """ Broadcasts the tensor to the given shape. """ pass def broadcast_div(self, rhs: Tensor) -> Tensor: """ Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_left(self, *shape: Shape) -> Tensor: """ Broadcasts the tensor to the given shape, adding new dimensions on the left. """ pass def broadcast_mul(self, rhs: Tensor) -> Tensor: """ Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_sub(self, rhs: Tensor) -> Tensor: """ Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def contiguous(self) -> Tensor: """ Makes the tensor contiguous in memory. """ pass def copy(self) -> Tensor: """ Returns a copy of the tensor. """ pass def cos(self) -> Tensor: """ Performs the `cos` operation on the tensor. """ pass def detach(self) -> Tensor: """ Detach the tensor from the computation graph. """ pass @property def device(self) -> Device: """ Gets the tensor's device. """ pass @property def dtype(self) -> DType: """ Gets the tensor's dtype. """ pass def exp(self) -> Tensor: """ Performs the `exp` operation on the tensor. """ pass def flatten_all(self) -> Tensor: """ Flattens the tensor into a 1D tensor. """ pass def flatten_from(self, dim: int) -> Tensor: """ Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. """ pass def flatten_to(self, dim: int) -> Tensor: """ Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). """ pass def get(self, index: int) -> Tensor: """ Gets the value at the specified index. """ pass def index_select(self, rhs: Tensor, dim: int) -> Tensor: """ Select values for the input tensor at the target indexes across the specified dimension. The `indexes` is argument is an int tensor with a single dimension. The output has the same number of dimension as the `self` input. The target dimension of the output has length the length of `indexes` and the values are taken from `self` using the index from `indexes`. Other dimensions have the same number of elements as the input tensor. """ pass def is_contiguous(self) -> bool: """ Returns true if the tensor is contiguous in C order. """ pass def is_fortran_contiguous(self) -> bool: """ Returns true if the tensor is contiguous in Fortran order. """ pass def log(self) -> Tensor: """ Performs the `log` operation on the tensor. """ pass def matmul(self, rhs: Tensor) -> Tensor: """ Performs a matrix multiplication between the two tensors. """ pass def max_keepdim(self, dim: int) -> Tensor: """ Gathers the maximum value across the selected dimension. """ pass def mean_all(self) -> Tensor: """ Returns the mean of the tensor. """ pass def min_keepdim(self, dim: int) -> Tensor: """ Gathers the minimum value across the selected dimension. """ pass def narrow(self, dim: int, start: int, len: int) -> Tensor: """ Returns a new tensor that is a narrowed version of the input, the dimension `dim` ranges from `start` to `start + len`. """ pass @property def nelement(self) -> int: """ Gets the tensor's element count. """ pass def powf(self, p: float) -> Tensor: """ Performs the `pow` operation on the tensor with the given exponent. """ pass def quantize(self, quantized_dtype: str) -> QTensor: """ Quantize the tensor. """ pass @property def rank(self) -> int: """ Gets the tensor's rank. """ pass def recip(self) -> Tensor: """ Get the `recip` of the tensor. """ pass def reshape(self, *shape: Shape) -> Tensor: """ Reshapes the tensor to the given shape. """ pass @property def shape(self) -> Tuple[int]: """ Gets the tensor's shape. """ pass def sin(self) -> Tensor: """ Performs the `sin` operation on the tensor. """ pass def sqr(self) -> Tensor: """ Squares the tensor. """ pass def sqrt(self) -> Tensor: """ Calculates the square root of the tensor. """ pass def squeeze(self, dim: int) -> Tensor: """ Creates a new tensor with the specified dimension removed if its size was one. """ pass @property def stride(self) -> Tuple[int]: """ Gets the tensor's strides. """ pass def sum_all(self) -> Tensor: """ Returns the sum of the tensor. """ pass def sum_keepdim(self, dim: Union[int, List[int]]) -> Tensor: """ Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. """ pass def t(self) -> Tensor: """ Transposes the tensor. """ pass def to(self, *args, **kwargs) -> Tensor: """ Performs Tensor dtype and/or device conversion. """ pass def to_device(self, device: Union[str, Device]) -> Tensor: """ Move the tensor to a new device. """ pass def to_dtype(self, dtype: Union[str, DType]) -> Tensor: """ Convert the tensor to a new dtype. """ pass def to_torch(self) -> torch.Tensor: """ Converts candle's tensor to pytorch's tensor """ pass def transpose(self, dim1: int, dim2: int) -> Tensor: """ Returns a tensor that is a transposed version of the input, the given dimensions are swapped. """ pass def unsqueeze(self, dim: int) -> Tensor: """ Creates a new tensor with a dimension of size one inserted at the specified position. """ pass def values(self) -> _ArrayLike: """ Gets the tensor's data as a Python scalar or array-like object. """ pass def where_cond(self, on_true: Tensor, on_false: Tensor) -> Tensor: """ Returns a tensor with the same shape as the input tensor, the values are taken from `on_true` if the input tensor value is not zero, and `on_false` at the positions where the input tensor is equal to zero. """ pass
candle/candle-pyo3/py_src/candle/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/__init__.pyi", "repo_id": "candle", "token_count": 5785 }
32
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor @staticmethod def cuda_is_available() -> bool: """ Returns true if the 'cuda' backend is available. """ pass @staticmethod def get_num_threads() -> int: """ Returns the number of threads used by the candle. """ pass @staticmethod def has_accelerate() -> bool: """ Returns true if candle was compiled with 'accelerate' support. """ pass @staticmethod def has_mkl() -> bool: """ Returns true if candle was compiled with MKL support. """ pass @staticmethod def load_ggml( path: Union[str, PathLike], device: Optional[Device] = None ) -> Tuple[Dict[str, QTensor], Dict[str, Any], List[str]]: """ Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. """ pass @staticmethod def load_gguf( path: Union[str, PathLike], device: Optional[Device] = None ) -> Tuple[Dict[str, QTensor], Dict[str, Any]]: """ Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, and the second maps metadata keys to metadata values. """ pass @staticmethod def load_safetensors(path: Union[str, PathLike]) -> Dict[str, Tensor]: """ Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. """ pass @staticmethod def save_gguf(path: Union[str, PathLike], tensors: Dict[str, QTensor], metadata: Dict[str, Any]): """ Save quanitzed tensors and metadata to a GGUF file. """ pass @staticmethod def save_safetensors(path: Union[str, PathLike], tensors: Dict[str, Tensor]) -> None: """ Saves a dictionary of tensors to a safetensors file. """ pass
candle/candle-pyo3/py_src/candle/utils/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.pyi", "repo_id": "candle", "token_count": 712 }
33
import candle from candle import Tensor, QTensor from candle.utils import load_safetensors, save_gguf, load_gguf, save_safetensors from pathlib import Path TEST_DIR = Path(__file__).parent.parent / "_workdir" TEST_DIR.mkdir(exist_ok=True) def test_can_roundtrip_safetensors(): tensors = { "a": candle.randn((16, 256)), "b": candle.randn((16, 16)), } file = str(TEST_DIR / "test.safetensors") save_safetensors(file, tensors) loaded_tensors = load_safetensors(file) assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].values() == loaded_tensors[key].values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].dtype) == str(loaded_tensors[key].dtype), "Dtypes are not equal" def test_can_roundtrip_gguf(): metadata = { "a": 1, "b": "foo", "c": [1, 2, 3], "d": [[1, 2], [3, 4]], } tensors = { "a": candle.randn((16, 256)).quantize("q4_0"), "b": candle.randn((16, 16)).quantize("f32"), } file = str(TEST_DIR / "test.gguf") save_gguf(file, tensors, metadata) loaded_tensors, loaded_metadata = load_gguf(file) assert set(metadata.keys()) == set(loaded_metadata.keys()) for key in metadata.keys(): assert metadata[key] == loaded_metadata[key] assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].dequantize().values() == loaded_tensors[key].dequantize().values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].ggml_dtype) == str(loaded_tensors[key].ggml_dtype), "Dtypes are not equal"
candle/candle-pyo3/tests/native/test_utils.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_utils.py", "repo_id": "candle", "token_count": 774 }
34
use crate::quantized_nn::{layer_norm, linear, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::Activation; pub use crate::models::mixformer::Config; const MAX_SEQ_LEN: usize = 4096; #[derive(Debug, Clone)] struct Embedding { wte: crate::quantized_nn::Embedding, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = crate::quantized_nn::Embedding::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?; Ok(Self { wte }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.wte.forward(xs) } } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dim: usize, max_seq_len: usize, dev: &Device) -> Result<Self> { let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, qkv: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor, Tensor)> { let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?; if three != 3 { candle::bail!("unexpected shape for qkv {:?}", qkv.shape()) } let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?; let rotary_dim = rotary_dim * 2; let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?; let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?; let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?; let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?; let q12 = q_rot.chunk(2, D::Minus1)?; let k12 = k_rot.chunk(2, D::Minus1)?; let (q1, q2) = (&q12[0], &q12[1]); let (k1, k2) = (&k12[0], &k12[1]); let c = self.cos.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?; let s = self.sin.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?; let q_rot = Tensor::cat( &[ (q1.broadcast_mul(&c)? - q2.broadcast_mul(&s)?)?, (q1.broadcast_mul(&s)? + q2.broadcast_mul(&c)?)?, ], D::Minus1, )?; let k_rot = Tensor::cat( &[ (k1.broadcast_mul(&c)? - k2.broadcast_mul(&s)?)?, (k1.broadcast_mul(&s)? + k2.broadcast_mul(&c)?)?, ], D::Minus1, )?; let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?; let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?; let v = qkv.i((.., .., 2))?; Ok((q, k, v)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, act: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd); let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?; let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?; Ok(Self { fc1, fc2, act: cfg.activation_function, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct CausalLMHead { ln: candle_nn::LayerNorm, linear: Linear, } impl CausalLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?; Ok(Self { ln, linear }) } } impl Module for CausalLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.ln)? .apply(&self.linear)? .to_dtype(DType::F32) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MHA { wqkv: Linear, out_proj: Linear, rotary_emb: RotaryEmbedding, kv_cache: Option<(Tensor, Tensor)>, head_dim: usize, n_head: usize, softmax_scale: f64, span: tracing::Span, } impl MHA { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.n_embd / cfg.n_head; let op_size = cfg.n_embd; let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?; let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?; let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.device())?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); Ok(Self { wqkv, out_proj, head_dim, n_head: cfg.n_head, kv_cache: None, rotary_emb, softmax_scale, span: tracing::span!(tracing::Level::TRACE, "mha"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self .wqkv .forward(xs)? .reshape((b_size, seq_len, 3, (), self.head_dim))?; let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(1)?, }; // In the python implementation, a single tensor is returned with the third axis of size 3. let (q, k, v) = self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 1)?; let v = Tensor::cat(&[prev_v, &v], 1)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); // scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale) let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s // causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1) // scores = scores + causal_mask.to(dtype=scores.dtype) let attn_weights = match mask { None => attn_weights, Some(mask) => masked_fill( &attn_weights, &mask.broadcast_left(b_size * self.n_head)?, f32::NEG_INFINITY, )?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; // output = torch.einsum('bhts,bshd->bthd', attention_drop, v) // attn_weights: b*h,t,s, v: b*h,s,d let attn_output = attn_weights.matmul(&v)?; // b*h,t,d let attn_output = attn_output .reshape((b_size, (), seq_len, self.head_dim))? .transpose(1, 2)? .flatten_from(D::Minus2)?; attn_output.apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct ParallelBlock { ln: candle_nn::LayerNorm, mixer: MHA, mlp: MLP, span: tracing::Span, } impl ParallelBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let mixer = MHA::new(cfg, vb.pp("mixer"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { ln, mixer, mlp, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = xs.apply(&self.ln)?; let attn_outputs = self.mixer.forward(&xs, mask)?; let feed_forward_hidden_states = self.mlp.forward(&xs)?; attn_outputs + feed_forward_hidden_states + residual } fn clear_kv_cache(&mut self) { self.mixer.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct MixFormerSequentialForCausalLM { embedding: Embedding, blocks: Vec<ParallelBlock>, head: CausalLMHead, span: tracing::Span, } impl MixFormerSequentialForCausalLM { pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_head = vb.pp("lm_head"); let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embd"))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb_head)?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layers"); let embedding = Embedding::new(cfg, vb.pp(0))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp(i + 1))?; blocks.push(block); } let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embedding)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())?; } xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1) } pub fn clear_kv_cache(&mut self) { self.blocks.iter_mut().for_each(|b| b.clear_kv_cache()) } }
candle/candle-transformers/src/models/quantized_mixformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mixformer.rs", "repo_id": "candle", "token_count": 5892 }
35
use super::schedulers::{betas_for_alpha_bar, BetaSchedule, PredictionType}; use candle::{Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum DDPMVarianceType { FixedSmall, FixedSmallLog, FixedLarge, FixedLargeLog, Learned, } impl Default for DDPMVarianceType { fn default() -> Self { Self::FixedSmall } } #[derive(Debug, Clone)] pub struct DDPMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// Option to predicted sample between -1 and 1 for numerical stability. pub clip_sample: bool, /// Option to clip the variance used when adding noise to the denoised sample. pub variance_type: DDPMVarianceType, /// prediction type of the scheduler function pub prediction_type: PredictionType, /// number of diffusion steps used to train the model. pub train_timesteps: usize, } impl Default for DDPMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085, beta_end: 0.012, beta_schedule: BetaSchedule::ScaledLinear, clip_sample: false, variance_type: DDPMVarianceType::FixedSmall, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, } } } pub struct DDPMScheduler { alphas_cumprod: Vec<f64>, init_noise_sigma: f64, timesteps: Vec<usize>, step_ratio: usize, pub config: DDPMSchedulerConfig, } impl DDPMScheduler { pub fn new(inference_steps: usize, config: DDPMSchedulerConfig) -> Result<Self> { let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } // min(train_timesteps, inference_steps) // https://github.com/huggingface/diffusers/blob/8331da46837be40f96fbd24de6a6fb2da28acd11/src/diffusers/schedulers/scheduling_ddpm.py#L187 let inference_steps = inference_steps.min(config.train_timesteps); // arange the number of the scheduler's timesteps let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = (0..inference_steps).map(|s| s * step_ratio).rev().collect(); Ok(Self { alphas_cumprod, init_noise_sigma: 1.0, timesteps, step_ratio, config, }) } fn get_variance(&self, timestep: usize) -> f64 { let prev_t = timestep as isize - self.step_ratio as isize; let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let current_beta_t = 1. - alpha_prod_t / alpha_prod_t_prev; // For t > 0, compute predicted variance Ξ²t (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) // and sample from it to get previous sample // x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample let variance = (1. - alpha_prod_t_prev) / (1. - alpha_prod_t) * current_beta_t; // retrieve variance match self.config.variance_type { DDPMVarianceType::FixedSmall => variance.max(1e-20), // for rl-diffuser https://arxiv.org/abs/2205.09991 DDPMVarianceType::FixedSmallLog => { let variance = variance.max(1e-20).ln(); (variance * 0.5).exp() } DDPMVarianceType::FixedLarge => current_beta_t, DDPMVarianceType::FixedLargeLog => current_beta_t.ln(), DDPMVarianceType::Learned => variance, } } pub fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let prev_t = timestep as isize - self.step_ratio as isize; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L272 // 1. compute alphas, betas let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let current_alpha_t = alpha_prod_t / alpha_prod_t_prev; let current_beta_t = 1. - current_alpha_t; // 2. compute predicted original sample from predicted noise also called "predicted x_0" of formula (15) let mut pred_original_sample = match self.config.prediction_type { PredictionType::Epsilon => { ((sample - model_output * beta_prod_t.sqrt())? / alpha_prod_t.sqrt())? } PredictionType::Sample => model_output.clone(), PredictionType::VPrediction => { ((sample * alpha_prod_t.sqrt())? - model_output * beta_prod_t.sqrt())? } }; // 3. clip predicted x_0 if self.config.clip_sample { pred_original_sample = pred_original_sample.clamp(-1f32, 1f32)?; } // 4. Compute coefficients for pred_original_sample x_0 and current sample x_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_original_sample_coeff = (alpha_prod_t_prev.sqrt() * current_beta_t) / beta_prod_t; let current_sample_coeff = current_alpha_t.sqrt() * beta_prod_t_prev / beta_prod_t; // 5. Compute predicted previous sample Β΅_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_prev_sample = ((&pred_original_sample * pred_original_sample_coeff)? + sample * current_sample_coeff)?; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L305 // 6. Add noise let mut variance = model_output.zeros_like()?; if timestep > 0 { let variance_noise = model_output.randn_like(0., 1.)?; if self.config.variance_type == DDPMVarianceType::FixedSmallLog { variance = (variance_noise * self.get_variance(timestep))?; } else { variance = (variance_noise * self.get_variance(timestep).sqrt())?; } } &pred_prev_sample + variance } pub fn add_noise( &self, original_samples: &Tensor, noise: Tensor, timestep: usize, ) -> Result<Tensor> { (original_samples * self.alphas_cumprod[timestep].sqrt())? + noise * (1. - self.alphas_cumprod[timestep]).sqrt() } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/stable_diffusion/ddpm.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddpm.rs", "repo_id": "candle", "token_count": 3662 }
36
pub mod audio; pub mod model; pub mod quantized_model; use serde::Deserialize; // The names in comments correspond to the original implementation: // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L17 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub num_mel_bins: usize, // n_mels pub max_source_positions: usize, // n_audio_ctx pub d_model: usize, // n_audio_state pub encoder_attention_heads: usize, // n_audio_head pub encoder_layers: usize, // n_audio_layer pub vocab_size: usize, // n_vocab pub max_target_positions: usize, // n_text_ctx // pub n_text_state: usize, pub decoder_attention_heads: usize, // n_text_head pub decoder_layers: usize, // n_text_layer #[serde(default)] pub suppress_tokens: Vec<u32>, } pub const DTYPE: candle::DType = candle::DType::F32; // Audio parameters. pub const SAMPLE_RATE: usize = 16000; pub const N_FFT: usize = 400; pub const HOP_LENGTH: usize = 160; pub const CHUNK_LENGTH: usize = 30; pub const N_SAMPLES: usize = CHUNK_LENGTH * SAMPLE_RATE; // 480000 samples in a 30-second chunk pub const N_FRAMES: usize = N_SAMPLES / HOP_LENGTH; // 3000 frames in a mel spectrogram input pub const NO_SPEECH_THRESHOLD: f64 = 0.6; pub const LOGPROB_THRESHOLD: f64 = -1.0; pub const TEMPERATURES: [f64; 6] = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]; pub const COMPRESSION_RATIO_THRESHOLD: f64 = 2.4; // Tokenizer dependent bits. pub const SOT_TOKEN: &str = "<|startoftranscript|>"; pub const TRANSCRIBE_TOKEN: &str = "<|transcribe|>"; pub const TRANSLATE_TOKEN: &str = "<|translate|>"; pub const NO_TIMESTAMPS_TOKEN: &str = "<|notimestamps|>"; pub const EOT_TOKEN: &str = "<|endoftext|>"; pub const NO_SPEECH_TOKENS: [&str; 2] = ["<|nocaptions|>", "<|nospeech|>"];
candle/candle-transformers/src/models/whisper/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/mod.rs", "repo_id": "candle", "token_count": 812 }
37
use candle::quantized::QTensor; use candle::{Device, Result, Shape}; use std::sync::Arc; // VarBuilder specialized for QTensors pub struct VarBuilder { data: Arc<std::collections::HashMap<String, Arc<QTensor>>>, path: Vec<String>, device: Device, } impl VarBuilder { pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> { let mut file = std::fs::File::open(p)?; let content = candle::quantized::gguf_file::Content::read(&mut file)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut file, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> { let mut cursor = std::io::Cursor::new(buffer); let content = candle::quantized::gguf_file::Content::read(&mut cursor)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut cursor, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn pp<S: ToString>(&self, s: S) -> Self { let mut path = self.path.clone(); path.push(s.to_string()); Self { data: self.data.clone(), path, device: self.device.clone(), } } fn path(&self, tensor_name: &str) -> String { if self.path.is_empty() { tensor_name.to_string() } else { [&self.path.join("."), tensor_name].join(".") } } pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {name}") } Some(qtensor) => { let shape = s.into(); if qtensor.shape() != &shape { candle::bail!( "shape mismatch for {name}, got {:?}, expected {shape:?}", qtensor.shape() ) } Ok(qtensor.clone()) } } } pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {name}") } Some(qtensor) => Ok(qtensor.clone()), } } pub fn device(&self) -> &Device { &self.device } pub fn contains_key(&self, key: &str) -> bool { self.data.contains_key(key) } }
candle/candle-transformers/src/quantized_var_builder.rs/0
{ "file_path": "candle/candle-transformers/src/quantized_var_builder.rs", "repo_id": "candle", "token_count": 1550 }
38
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use candle_wasm_example_blip::console_log; use candle_wasm_example_blip::token_output_stream::TokenOutputStream; use js_sys::Date; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl SelectedModel { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor, JsError> { match self { Self::M(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), Self::Q(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), } } fn reset_kv_cache(&mut self) { match self { Self::M(m) => m.reset_kv_cache(), Self::Q(m) => m.reset_kv_cache(), } } } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: TokenOutputStream, } const SEP_TOKEN_ID: u32 = 102; #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, quantized: bool, ) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let tokenizer = TokenOutputStream::new(tokenizer); let config: blip::Config = serde_json::from_slice(&config)?; let device = Device::Cpu; let start = Date::now(); let model: SelectedModel = if quantized { let vb = quantized_blip::VarBuilder::from_gguf_buffer(&weights, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::Q(model) } else { let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, &device)?; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::M(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); Ok(Self { model, tokenizer }) } #[wasm_bindgen] pub fn generate_caption_from_image(&mut self, image: Vec<u8>) -> Result<String, JsError> { self.model.reset_kv_cache(); let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds: Tensor = match &mut self.model { SelectedModel::M(m) => image.unsqueeze(0)?.apply(m.vision_model())?, SelectedModel::Q(m) => image.unsqueeze(0)?.apply(m.vision_model())?, }; console_log!("image embedded in {:?}s", (Date::now() - start) / 1000.); let mut logits_processor = LogitsProcessor::new(299792458, None, None); let mut token_ids = vec![30522u32]; let mut text: String = "".to_string(); let start = Date::now(); for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = self.model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = self.tokenizer.next_token(token)? { text.push_str(&t); } } if let Some(rest) = self .tokenizer .decode_rest() .map_err(|m| JsError::new(&m.to_string()))? { text.push_str(&rest); } console_log!("caption generated in {:?}s", (Date::now() - start) / 1000.); Ok(text) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let device = &Device::Cpu; let img = image::io::Reader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), device)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/blip/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/blip/src/bin/m.rs", "repo_id": "candle", "token_count": 2699 }
39
//load Candle Bert Module wasm module let init, ModelConditionalGeneration; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class ConditionalGeneration { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelConditionalGeneration } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelConditionalGeneration } = await import( "./build/m.js" )); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelConditionalGeneration( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } = event.data; let { temperature = 0.0, seed = 299792458, repeat_penalty = 1.1, repeat_last_n = 64, top_p = 1, } = { ...params }; try { self.postMessage({ status: "ready", message: "Starting T5 Conditional Generation", }); const model = await ConditionalGeneration.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "decoding", message: "Decoding Prompt", }); const output = model.decode({ prompt, temperature, seed, top_p, repeat_penalty, repeat_last_n, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js", "repo_id": "candle", "token_count": 980 }
40
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); yew::Renderer::<candle_wasm_example_whisper::App>::new().render(); }
candle/candle-wasm-examples/whisper/src/bin/app.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/app.rs", "repo_id": "candle", "token_count": 67 }
41
module.exports = { root: true, parser: "@typescript-eslint/parser", extends: [ "eslint:recommended", "plugin:@typescript-eslint/recommended", "plugin:svelte/recommended", "prettier", ], plugins: ["@typescript-eslint"], ignorePatterns: ["*.cjs"], overrides: [ { files: ["*.svelte"], parser: "svelte-eslint-parser", parserOptions: { parser: "@typescript-eslint/parser", }, }, ], parserOptions: { sourceType: "module", ecmaVersion: 2020, extraFileExtensions: [".svelte"], }, rules: { "no-shadow": ["error"], "@typescript-eslint/no-explicit-any": "error", "@typescript-eslint/no-non-null-assertion": "error", "@typescript-eslint/no-unused-vars": [ // prevent variables with a _ prefix from being marked as unused "error", { argsIgnorePattern: "^_", }, ], "object-shorthand": ["error", "always"], }, env: { browser: true, es2017: true, node: true, }, };
chat-ui/.eslintrc.cjs/0
{ "file_path": "chat-ui/.eslintrc.cjs", "repo_id": "chat-ui", "token_count": 419 }
42
/// <reference types="@sveltejs/kit" /> /// <reference types="unplugin-icons/types/svelte" /> import type { User } from "$lib/types/User"; // See https://kit.svelte.dev/docs/types#app // for information about these interfaces declare global { namespace App { // interface Error {} interface Locals { sessionId: string; user?: User; } // interface PageData {} // interface Platform {} } } export {};
chat-ui/src/app.d.ts/0
{ "file_path": "chat-ui/src/app.d.ts", "repo_id": "chat-ui", "token_count": 145 }
43
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { createEventDispatcher } from "svelte"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonClose from "~icons/carbon/close"; import CarbonEdit from "~icons/carbon/edit"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; export let conv: ConvSidebar; let confirmDelete = false; const dispatch = createEventDispatcher<{ deleteConversation: string; editConversationTitle: { id: string; title: string }; }>(); </script> <a data-sveltekit-noscroll on:mouseleave={() => { confirmDelete = false; }} href="{base}/conversation/{conv.id}" class="group flex h-10 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-600 hover:bg-gray-100 dark:text-gray-300 dark:hover:bg-gray-700 {conv.id === $page.params.id ? 'bg-gray-100 dark:bg-gray-700' : ''}" > <div class="flex flex-1 items-center truncate"> {#if confirmDelete} <span class="mr-1 font-semibold"> Delete </span> {/if} {#if conv.avatarHash} <img src="{base}/settings/assistants/{conv.assistantId}/avatar.jpg?hash={conv.avatarHash}" alt="Assistant avatar" class="mr-1.5 inline size-4 flex-none rounded-full object-cover" /> {conv.title.replace(/\p{Emoji}/gu, "")} {:else if conv.assistantId} <div class="mr-1.5 flex size-4 flex-none items-center justify-center rounded-full bg-gray-300 text-xs font-bold uppercase text-gray-500" /> {conv.title.replace(/\p{Emoji}/gu, "")} {:else} {conv.title} {/if} </div> {#if confirmDelete} <button type="button" class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex" title="Confirm delete action" on:click|preventDefault={() => { confirmDelete = false; dispatch("deleteConversation", conv.id); }} > <CarbonCheckmark class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" /> </button> <button type="button" class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex" title="Cancel delete action" on:click|preventDefault={() => (confirmDelete = false)} > <CarbonClose class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" /> </button> {:else} <button type="button" class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex" title="Edit conversation title" on:click|preventDefault={() => { const newTitle = prompt("Edit this conversation title:", conv.title); if (!newTitle) return; dispatch("editConversationTitle", { id: conv.id, title: newTitle }); }} > <CarbonEdit class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" /> </button> <button type="button" class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex" title="Delete conversation" on:click|preventDefault={(event) => { if (event.shiftKey) { dispatch("deleteConversation", conv.id); } else { confirmDelete = true; } }} > <CarbonTrashCan class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" /> </button> {/if} </a>
chat-ui/src/lib/components/NavConversationItem.svelte/0
{ "file_path": "chat-ui/src/lib/components/NavConversationItem.svelte", "repo_id": "chat-ui", "token_count": 1309 }
44
<script lang="ts"> import { marked } from "marked"; import markedKatex from "marked-katex-extension"; import type { Message } from "$lib/types/Message"; import { afterUpdate, createEventDispatcher } from "svelte"; import { deepestChild } from "$lib/utils/deepestChild"; import { page } from "$app/stores"; import CodeBlock from "../CodeBlock.svelte"; import CopyToClipBoardBtn from "../CopyToClipBoardBtn.svelte"; import IconLoading from "../icons/IconLoading.svelte"; import CarbonRotate360 from "~icons/carbon/rotate-360"; import CarbonDownload from "~icons/carbon/download"; import CarbonThumbsUp from "~icons/carbon/thumbs-up"; import CarbonThumbsDown from "~icons/carbon/thumbs-down"; import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken"; import type { Model } from "$lib/types/Model"; import OpenWebSearchResults from "../OpenWebSearchResults.svelte"; import type { WebSearchUpdate } from "$lib/types/MessageUpdate"; function sanitizeMd(md: string) { let ret = md .replace(/<\|[a-z]*$/, "") .replace(/<\|[a-z]+\|$/, "") .replace(/<$/, "") .replaceAll(PUBLIC_SEP_TOKEN, " ") .replaceAll(/<\|[a-z]+\|>/g, " ") .replaceAll(/<br\s?\/?>/gi, "\n") .replaceAll("<", "&lt;") .trim(); for (const stop of [...(model.parameters?.stop ?? []), "<|endoftext|>"]) { if (ret.endsWith(stop)) { ret = ret.slice(0, -stop.length).trim(); } } return ret; } function unsanitizeMd(md: string) { return md.replaceAll("&lt;", "<"); } export let model: Model; export let message: Message; export let loading = false; export let isAuthor = true; export let readOnly = false; export let isTapped = false; export let webSearchMessages: WebSearchUpdate[]; const dispatch = createEventDispatcher<{ retry: { content: string; id: Message["id"] }; vote: { score: Message["score"]; id: Message["id"] }; }>(); let contentEl: HTMLElement; let loadingEl: IconLoading; let pendingTimeout: ReturnType<typeof setTimeout>; let isCopied = false; const renderer = new marked.Renderer(); // For code blocks with simple backticks renderer.codespan = (code) => { // Unsanitize double-sanitized code return `<code>${code.replaceAll("&amp;", "&")}</code>`; }; // eslint-disable-next-line @typescript-eslint/no-unused-vars const { extensions, ...defaults } = marked.getDefaults() as marked.MarkedOptions & { // eslint-disable-next-line @typescript-eslint/no-explicit-any extensions: any; }; const options: marked.MarkedOptions = { ...defaults, gfm: true, breaks: true, renderer, }; marked.use( markedKatex({ throwOnError: false, // output: "html", }) ); $: tokens = marked.lexer(sanitizeMd(message.content)); afterUpdate(() => { loadingEl?.$destroy(); clearTimeout(pendingTimeout); // Add loading animation to the last message if update takes more than 600ms if (loading) { pendingTimeout = setTimeout(() => { if (contentEl) { loadingEl = new IconLoading({ target: deepestChild(contentEl), props: { classNames: "loading inline ml-2" }, }); } }, 600); } }); let searchUpdates: WebSearchUpdate[] = []; $: searchUpdates = ((webSearchMessages.length > 0 ? webSearchMessages : message.updates?.filter(({ type }) => type === "webSearch")) ?? []) as WebSearchUpdate[]; $: downloadLink = message.from === "user" ? `${$page.url.pathname}/message/${message.id}/prompt` : undefined; let webSearchIsDone = true; $: webSearchIsDone = searchUpdates.length > 0 && searchUpdates[searchUpdates.length - 1].messageType === "sources"; $: webSearchSources = searchUpdates && searchUpdates?.filter(({ messageType }) => messageType === "sources")?.[0]?.sources; $: if (isCopied) { setTimeout(() => { isCopied = false; }, 1000); } </script> {#if message.from === "assistant"} <div class="group relative -mb-8 flex items-start justify-start gap-4 pb-8 leading-relaxed" role="presentation" on:click={() => (isTapped = !isTapped)} on:keypress={() => (isTapped = !isTapped)} > <img alt="" src="https://huggingface.co/avatars/2edb18bd0206c16b433841a47f53fa8e.svg" class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg" /> <div class="relative min-h-[calc(2rem+theme(spacing[3.5])*2)] min-w-[60px] break-words rounded-2xl border border-gray-100 bg-gradient-to-br from-gray-50 px-5 py-3.5 text-gray-600 prose-pre:my-2 dark:border-gray-800 dark:from-gray-800/40 dark:text-gray-300" > {#if searchUpdates && searchUpdates.length > 0} <OpenWebSearchResults classNames={tokens.length ? "mb-3.5" : ""} webSearchMessages={searchUpdates} /> {/if} {#if !message.content && (webSearchIsDone || (webSearchMessages && webSearchMessages.length === 0))} <IconLoading /> {/if} <div class="prose max-w-none max-sm:prose-sm dark:prose-invert prose-headings:font-semibold prose-h1:text-lg prose-h2:text-base prose-h3:text-base prose-pre:bg-gray-800 dark:prose-pre:bg-gray-900" bind:this={contentEl} > {#each tokens as token} {#if token.type === "code"} <CodeBlock lang={token.lang} code={unsanitizeMd(token.text)} /> {:else} <!-- eslint-disable-next-line svelte/no-at-html-tags --> {@html marked.parse(token.raw, options)} {/if} {/each} </div> <!-- Web Search sources --> {#if webSearchSources?.length} <div class="mt-4 flex flex-wrap items-center gap-x-2 gap-y-1.5 text-sm"> <div class="text-gray-400">Sources:</div> {#each webSearchSources as { link, title, hostname }} <a class="flex items-center gap-2 whitespace-nowrap rounded-lg border bg-white px-2 py-1.5 leading-none hover:border-gray-300 dark:border-gray-800 dark:bg-gray-900 dark:hover:border-gray-700" href={link} target="_blank" > <img class="h-3.5 w-3.5 rounded" src="https://www.google.com/s2/favicons?sz=64&domain_url={hostname}" alt="{title} favicon" /> <div>{hostname.replace(/^www\./, "")}</div> </a> {/each} </div> {/if} </div> {#if isAuthor && !loading && message.content} <div class="absolute bottom-1 right-0 flex max-md:transition-all md:bottom-0 md:group-hover:visible md:group-hover:opacity-100 {message.score ? 'visible opacity-100' : 'invisible max-md:-translate-y-4 max-md:opacity-0'} {isTapped || isCopied ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''} " > <button class="btn rounded-sm p-1 text-sm text-gray-400 focus:ring-0 hover:text-gray-500 dark:text-gray-400 dark:hover:text-gray-300 {message.score && message.score > 0 ? 'text-green-500 hover:text-green-500 dark:text-green-400 hover:dark:text-green-400' : ''}" title={message.score === 1 ? "Remove +1" : "+1"} type="button" on:click={() => dispatch("vote", { score: message.score === 1 ? 0 : 1, id: message.id })} > <CarbonThumbsUp class="h-[1.14em] w-[1.14em]" /> </button> <button class="btn rounded-sm p-1 text-sm text-gray-400 focus:ring-0 hover:text-gray-500 dark:text-gray-400 dark:hover:text-gray-300 {message.score && message.score < 0 ? 'text-red-500 hover:text-red-500 dark:text-red-400 hover:dark:text-red-400' : ''}" title={message.score === -1 ? "Remove -1" : "-1"} type="button" on:click={() => dispatch("vote", { score: message.score === -1 ? 0 : -1, id: message.id })} > <CarbonThumbsDown class="h-[1.14em] w-[1.14em]" /> </button> <CopyToClipBoardBtn on:click={() => { isCopied = true; }} classNames="ml-1.5 !rounded-sm !p-1 !text-sm !text-gray-400 focus:!ring-0 hover:!text-gray-500 dark:!text-gray-400 dark:hover:!text-gray-300 !border-none !shadow-none" value={message.content} /> </div> {/if} </div> {/if} {#if message.from === "user"} <div class="group relative flex items-start justify-start gap-4 max-sm:text-sm"> <div class="flex flex-col"> {#if message.files && message.files.length > 0} <div class="mx-auto grid w-fit grid-cols-2 gap-5 px-5"> {#each message.files as file} <!-- handle the case where this is a hash that points to an image in the db, hash is always 64 char long --> {#if file.length === 64} <img src={$page.url.pathname + "/output/" + file} alt="input from user" class="my-2 aspect-auto max-h-48 rounded-lg shadow-lg" /> {:else} <!-- handle the case where this is a base64 encoded image --> <img src={"data:image/*;base64," + file} alt="input from user" class="my-2 aspect-auto max-h-48 rounded-lg shadow-lg" /> {/if} {/each} </div> {/if} <div class="max-w-full whitespace-break-spaces break-words rounded-2xl px-5 py-3.5 text-gray-500 dark:text-gray-400" > {message.content.trim()} </div> {#if !loading} <div class="absolute right-0 top-3.5 flex gap-2 lg:-right-2"> {#if downloadLink} <a class="rounded-lg border border-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 md:hidden dark:border-gray-800 dark:text-gray-400 dark:hover:text-gray-300" title="Download prompt and parameters" type="button" target="_blank" href={downloadLink} > <CarbonDownload /> </a> {/if} {#if !readOnly} <button class="cursor-pointer rounded-lg border border-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 md:hidden lg:-right-2 dark:border-gray-800 dark:text-gray-400 dark:hover:text-gray-300" title="Retry" type="button" on:click={() => dispatch("retry", { content: message.content, id: message.id })} > <CarbonRotate360 /> </button> {/if} </div> {/if} </div> </div> {/if}
chat-ui/src/lib/components/chat/ChatMessage.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatMessage.svelte", "repo_id": "chat-ui", "token_count": 4251 }
45
import { z } from "zod"; import { embeddingEndpointTei, embeddingEndpointTeiParametersSchema, } from "./tei/embeddingEndpoints"; import { embeddingEndpointTransformersJS, embeddingEndpointTransformersJSParametersSchema, } from "./transformersjs/embeddingEndpoints"; // parameters passed when generating text interface EmbeddingEndpointParameters { inputs: string[]; } export type Embedding = number[]; // type signature for the endpoint export type EmbeddingEndpoint = (params: EmbeddingEndpointParameters) => Promise<Embedding[]>; export const embeddingEndpointSchema = z.discriminatedUnion("type", [ embeddingEndpointTeiParametersSchema, embeddingEndpointTransformersJSParametersSchema, ]); type EmbeddingEndpointTypeOptions = z.infer<typeof embeddingEndpointSchema>["type"]; // generator function that takes in type discrimantor value for defining the endpoint and return the endpoint export type EmbeddingEndpointGenerator<T extends EmbeddingEndpointTypeOptions> = ( inputs: Extract<z.infer<typeof embeddingEndpointSchema>, { type: T }> ) => EmbeddingEndpoint | Promise<EmbeddingEndpoint>; // list of all endpoint generators export const embeddingEndpoints: { [Key in EmbeddingEndpointTypeOptions]: EmbeddingEndpointGenerator<Key>; } = { tei: embeddingEndpointTei, transformersjs: embeddingEndpointTransformersJS, }; export default embeddingEndpoints;
chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 413 }
46
import { dot } from "@xenova/transformers"; import type { EmbeddingBackendModel } from "$lib/server/embeddingModels"; import type { Embedding } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; // see here: https://github.com/nmslib/hnswlib/blob/359b2ba87358224963986f709e593d799064ace6/README.md?plain=1#L34 function innerProduct(embeddingA: Embedding, embeddingB: Embedding) { return 1.0 - dot(embeddingA, embeddingB); } export async function findSimilarSentences( embeddingModel: EmbeddingBackendModel, query: string, sentences: string[], { topK = 5 }: { topK: number } ): Promise<Embedding> { const inputs = [ `${embeddingModel.preQuery}${query}`, ...sentences.map((sentence) => `${embeddingModel.prePassage}${sentence}`), ]; const embeddingEndpoint = await embeddingModel.getEndpoint(); const output = await embeddingEndpoint({ inputs }); const queryEmbedding: Embedding = output[0]; const sentencesEmbeddings: Embedding[] = output.slice(1, inputs.length - 1); const distancesFromQuery: { distance: number; index: number }[] = [...sentencesEmbeddings].map( (sentenceEmbedding: Embedding, index: number) => { return { distance: innerProduct(queryEmbedding, sentenceEmbedding), index, }; } ); distancesFromQuery.sort((a, b) => { return a.distance - b.distance; }); // Return the indexes of the closest topK sentences return distancesFromQuery.slice(0, topK).map((item) => item.index); }
chat-ui/src/lib/server/sentenceSimilarity.ts/0
{ "file_path": "chat-ui/src/lib/server/sentenceSimilarity.ts", "repo_id": "chat-ui", "token_count": 503 }
47
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; export interface Assistant extends Timestamps { _id: ObjectId; createdById: User["_id"] | string; // user id or session createdByName?: User["username"]; avatar?: string; name: string; description?: string; modelId: string; exampleInputs: string[]; preprompt: string; userCount?: number; featured?: boolean; }
chat-ui/src/lib/types/Assistant.ts/0
{ "file_path": "chat-ui/src/lib/types/Assistant.ts", "repo_id": "chat-ui", "token_count": 145 }
48
export interface GAEvent { hitType: "event"; eventCategory: string; eventAction: string; eventLabel?: string; eventValue?: number; } // Send a Google Analytics event export function sendAnalyticsEvent({ eventCategory, eventAction, eventLabel, eventValue, }: Omit<GAEvent, "hitType">): void { // Mandatory fields const event: GAEvent = { hitType: "event", eventCategory, eventAction, }; // Optional fields if (eventLabel) { event.eventLabel = eventLabel; } if (eventValue) { event.eventValue = eventValue; } // @ts-expect-error typescript doesn't know gtag is on the window object if (!!window?.gtag && typeof window?.gtag === "function") { // @ts-expect-error typescript doesn't know gtag is on the window object window?.gtag("event", eventAction, { event_category: event.eventCategory, event_label: event.eventLabel, value: event.eventValue, }); } }
chat-ui/src/lib/utils/analytics.ts/0
{ "file_path": "chat-ui/src/lib/utils/analytics.ts", "repo_id": "chat-ui", "token_count": 313 }
49
import type { Message } from "$lib/types/Message"; import type { LegacyParamatersTemplateInput } from "$lib/types/Template"; import Handlebars from "handlebars"; Handlebars.registerHelper("ifUser", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "user") return options.fn(this); }); Handlebars.registerHelper( "ifAssistant", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "assistant") return options.fn(this); } ); export function compileTemplate<T>(input: string, model: LegacyParamatersTemplateInput) { const template = Handlebars.compile<T & LegacyParamatersTemplateInput>(input, { knownHelpers: { ifUser: true, ifAssistant: true }, knownHelpersOnly: true, noEscape: true, strict: true, preventIndent: true, }); return function render(inputs: T, options?: RuntimeOptions) { return template({ ...model, ...inputs }, options); }; }
chat-ui/src/lib/utils/template.ts/0
{ "file_path": "chat-ui/src/lib/utils/template.ts", "repo_id": "chat-ui", "token_count": 290 }
50
<script lang="ts"> import type { PageData } from "./$types"; import { PUBLIC_APP_ASSETS, PUBLIC_ORIGIN } from "$env/static/public"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import CarbonAdd from "~icons/carbon/add"; import CarbonHelpFilled from "~icons/carbon/help-filled"; export let data: PageData; let selectedModel = $page.url.searchParams.get("modelId") ?? ""; const onModelChange = (e: Event) => { const newUrl = new URL($page.url); if ((e.target as HTMLSelectElement).value === "") { newUrl.searchParams.delete("modelId"); } else { newUrl.searchParams.set("modelId", (e.target as HTMLSelectElement).value); } goto(newUrl); }; </script> <svelte:head> {#if isHuggingChat} <title>HuggingChat - Assistants</title> <meta property="og:title" content="HuggingChat - Assistants" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat assistants made by the community." /> <meta property="og:image" content="{PUBLIC_ORIGIN || $page.url.origin}{base}/{PUBLIC_APP_ASSETS}/assistants-thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom mr-1 h-full overflow-y-auto py-12 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Assistants</h1> {#if isHuggingChat} <div class="5 ml-1.5 rounded-lg text-xxs uppercase text-gray-500 dark:text-gray-500"> beta </div> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/357" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" > <CarbonHelpFilled /> </a> {/if} </div> <h3 class="text-gray-500">Popular assistants made by the community</h3> <div class="mt-6 flex justify-between gap-2 max-sm:flex-col sm:items-center"> <select class="mt-1 h-[34px] rounded-lg border border-gray-300 bg-gray-50 px-2 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" bind:value={selectedModel} on:change={onModelChange} > <option value="">All models</option> {#each data.models.filter((model) => !model.unlisted) as model} <option value={model.name}>{model.name}</option> {/each} </select> <a href={`${base}/settings/assistants/new`} class="flex items-center gap-1 whitespace-nowrap rounded-lg border bg-white py-1 pl-1.5 pr-2.5 shadow-sm hover:bg-gray-50 hover:shadow-none dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-700" > <CarbonAdd />Create New assistant </a> </div> <div class="mt-10 grid grid-cols-2 gap-3 sm:gap-5 md:grid-cols-3 lg:grid-cols-4"> {#each data.assistants as assistant} <a href="{base}/assistant/{assistant._id}" class="flex flex-col items-center justify-center overflow-hidden text-balance rounded-xl border bg-gray-50/50 px-4 py-6 text-center shadow hover:bg-gray-50 hover:shadow-inner max-sm:px-4 sm:h-64 sm:pb-4 dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg" alt="Avatar" class="mb-2 aspect-square size-12 flex-none rounded-full object-cover sm:mb-6 sm:size-20" /> {:else} <div class="mb-2 flex aspect-square size-12 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 sm:mb-6 sm:size-20 dark:bg-gray-800" > {assistant.name[0]} </div> {/if} <h3 class="mb-2 line-clamp-2 max-w-full break-words text-center text-[.8rem] font-semibold leading-snug sm:text-sm" > {assistant.name} </h3> <p class="line-clamp-4 text-xs text-gray-700 sm:line-clamp-2 dark:text-gray-400"> {assistant.description} </p> {#if assistant.createdByName} <p class="mt-auto pt-2 text-xs text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="https://hf.co/{assistant.createdByName}" target="_blank" > {assistant.createdByName} </a> </p> {/if} </a> {:else} No assistants found {/each} </div> </div> </div>
chat-ui/src/routes/assistants/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistants/+page.svelte", "repo_id": "chat-ui", "token_count": 2000 }
51
<script lang="ts"> import { marked } from "marked"; import privacy from "../../../PRIVACY.md?raw"; </script> <div class="overflow-auto p-6"> <div class="prose mx-auto px-4 pb-24 pt-6 dark:prose-invert md:pt-12"> <!-- eslint-disable-next-line svelte/no-at-html-tags --> {@html marked(privacy, { gfm: true })} </div> </div>
chat-ui/src/routes/privacy/+page.svelte/0
{ "file_path": "chat-ui/src/routes/privacy/+page.svelte", "repo_id": "chat-ui", "token_count": 141 }
52
@import "highlight.js/styles/atom-one-dark";
chat-ui/src/styles/highlight-js.css/0
{ "file_path": "chat-ui/src/styles/highlight-js.css", "repo_id": "chat-ui", "token_count": 17 }
53
const defaultTheme = require("tailwindcss/defaultTheme"); const colors = require("tailwindcss/colors"); import dotenv from "dotenv"; dotenv.config({ path: "./.env" }); /** @type {import('tailwindcss').Config} */ export default { darkMode: "class", content: ["./src/**/*.{html,js,svelte,ts}"], theme: { extend: { colors: { primary: colors[process.env.PUBLIC_APP_COLOR], }, // fontFamily: { // sans: ['"Inter"', ...defaultTheme.fontFamily.sans] // }, fontSize: { xxs: "0.625rem", smd: "0.94rem", }, }, }, plugins: [ require("tailwind-scrollbar")({ nocompatible: true }), require("@tailwindcss/typography"), ], };
chat-ui/tailwind.config.cjs/0
{ "file_path": "chat-ui/tailwind.config.cjs", "repo_id": "chat-ui", "token_count": 276 }
54
repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage rev: 'v0.1.5' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format
datasets/.pre-commit-config.yaml/0
{ "file_path": "datasets/.pre-commit-config.yaml", "repo_id": "datasets", "token_count": 122 }
55
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 500_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def map(dataset: datasets.Dataset, **kwargs): _ = dataset.map(**kwargs) @get_duration def filter(dataset: datasets.Dataset, **kwargs): _ = dataset.filter(**kwargs) def benchmark_map_filter(): times = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES ) tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True) def tokenize(examples): return tokenizer(examples["text"]) times["map identity"] = map(dataset) times["map identity batched"] = map(dataset, batched=True) times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="numpy"): times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="pandas"): times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="torch", columns="numbers"): times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="tensorflow", columns="numbers"): times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True) times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True) times["filter"] = filter(dataset) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
datasets/benchmarks/benchmark_map_filter.py/0
{ "file_path": "datasets/benchmarks/benchmark_map_filter.py", "repo_id": "datasets", "token_count": 996 }
56
# Build and load Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With πŸ€— Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use. ## ELI5: `load_dataset` Let's begin with a basic Explain Like I'm Five. A dataset is a directory that contains: - Some data files in generic formats (JSON, CSV, Parquet, text, etc.) - A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations - An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures. The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub. The Hub is a central repository where all the Hugging Face datasets and models are stored. If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.). Under the hood, πŸ€— Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in πŸ€— Datasets: * [`datasets.packaged_modules.text.Text`] for text * [`datasets.packaged_modules.csv.Csv`] for CSV and TSV * [`datasets.packaged_modules.json.Json`] for JSON and JSONL * [`datasets.packaged_modules.parquet.Parquet`] for Parquet * [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format) * [`datasets.packaged_modules.sql.Sql`] for SQL databases * [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders * [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub. Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells πŸ€— Datasets how to generate and display examples from it. <Tip> Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script! </Tip> πŸ€— Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive. If you've downloaded the dataset before, then πŸ€— Datasets will reload it from the cache to save you the trouble of downloading it again. Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works. ## Building a dataset When you load a dataset for the first time, πŸ€— Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`]. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/builderconfig.png"/> </div> ### BuilderConfig[[datasets-builderconfig]] [`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset: | Attribute | Description | |---------------|--------------------------------------------------------------| | `name` | Short name of the dataset. | | `version` | Dataset version identifier. | | `data_dir` | Stores the path to a local folder containing the data files. | | `data_files` | Stores paths to local data files. | | `description` | Description of the dataset. | If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass: - Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute. - When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected. You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`]. ### DatasetBuilder[[datasets-datasetbuilder]] [`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasetbuilder.png"/> </div> There are three main methods in [`DatasetBuilder`]: 1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, πŸ€— Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column. 2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted. Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split. 3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method. The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB. ## Maintaining integrity To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies: - The number of splits in the generated `DatasetDict`. - The number of samples in each split of the generated `DatasetDict`. - The list of downloaded files. - The SHA256 checksums of the downloaded files (disabled by defaut). If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. <Tip> If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. </Tip> In this case, an error is raised to alert that the dataset has changed. To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`]. Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated. ## Security The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning). Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers. The code of these datasets is considered **safe**. It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
datasets/docs/source/about_dataset_load.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_load.mdx", "repo_id": "datasets", "token_count": 2537 }
57
# Overview The how-to guides offer a more comprehensive overview of all the tools πŸ€— Datasets offers and how to use them. This will help you tackle messier real-world datasets where you may need to manipulate the dataset structure or content to get it ready for training. The guides assume you are familiar and comfortable with the πŸ€— Datasets basics. We recommend newer users check out our [tutorials](tutorial) first. <Tip> Interested in learning more? Take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course! </Tip> The guides are organized into six sections: - <span class="underline decoration-sky-400 decoration-2 font-semibold">General usage</span>: Functions for general dataset loading and processing. The functions shown in this section are applicable across all dataset modalities. - <span class="underline decoration-pink-400 decoration-2 font-semibold">Audio</span>: How to load, process, and share audio datasets. - <span class="underline decoration-yellow-400 decoration-2 font-semibold">Vision</span>: How to load, process, and share image datasets. - <span class="underline decoration-green-400 decoration-2 font-semibold">Text</span>: How to load, process, and share text datasets. - <span class="underline decoration-orange-400 decoration-2 font-semibold">Tabular</span>: How to load, process, and share tabular datasets. - <span class="underline decoration-indigo-400 decoration-2 font-semibold">Dataset repository</span>: How to share and upload a dataset to the <a href="https://huggingface.co/datasets">Hub</a>. If you have any questions about πŸ€— Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/how_to.md/0
{ "file_path": "datasets/docs/source/how_to.md", "repo_id": "datasets", "token_count": 469 }
58
# Builder classes ## Builders πŸ€— Datasets relies on two main classes during the dataset building process: [`DatasetBuilder`] and [`BuilderConfig`]. [[autodoc]] datasets.DatasetBuilder [[autodoc]] datasets.GeneratorBasedBuilder [[autodoc]] datasets.BeamBasedBuilder [[autodoc]] datasets.ArrowBasedBuilder [[autodoc]] datasets.BuilderConfig ## Download [[autodoc]] datasets.DownloadManager [[autodoc]] datasets.StreamingDownloadManager [[autodoc]] datasets.DownloadConfig [[autodoc]] datasets.DownloadMode ## Verification [[autodoc]] datasets.VerificationMode ## Splits [[autodoc]] datasets.SplitGenerator [[autodoc]] datasets.Split [[autodoc]] datasets.NamedSplit [[autodoc]] datasets.NamedSplitAll [[autodoc]] datasets.ReadInstruction ## Version [[autodoc]] datasets.utils.Version
datasets/docs/source/package_reference/builder_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/builder_classes.mdx", "repo_id": "datasets", "token_count": 253 }
59
# Preprocess In addition to loading datasets, πŸ€— Datasets other main goal is to offer a diverse set of preprocessing functions to get a dataset into an appropriate format for training with your machine learning framework. There are many possible ways to preprocess a dataset, and it all depends on your specific dataset. Sometimes you may need to rename a column, and other times you might need to unflatten nested fields. πŸ€— Datasets provides a way to do most of these things. But in nearly all preprocessing cases, depending on your dataset modality, you'll need to: - Tokenize a text dataset. - Resample an audio dataset. - Apply transforms to an image dataset. The last preprocessing step is usually setting your dataset format to be compatible with your machine learning framework's expected input format. In this tutorial, you'll also need to install the πŸ€— Transformers library: ```bash pip install transformers ``` Grab a dataset of your choice and follow along! ## Tokenize text Models cannot process raw text, so you'll need to convert the text into numbers. Tokenization provides a way to do this by dividing text into individual words called *tokens*. Tokens are finally converted to numbers. <Tip> Check out the [Tokenizers](https://huggingface.co/course/chapter2/4?fw=pt) section in Chapter 2 of the Hugging Face course to learn more about tokenization and different tokenization algorithms. </Tip> **1**. Start by loading the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset and the tokenizer corresponding to a pretrained [BERT](https://huggingface.co/bert-base-uncased) model. Using the same tokenizer as the pretrained model is important because you want to make sure the text is split in the same way. ```py >>> from transformers import AutoTokenizer >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> dataset = load_dataset("rotten_tomatoes", split="train") ``` **2**. Call your tokenizer on the first row of `text` in the dataset: ```py >>> tokenizer(dataset[0]["text"]) {'input_ids': [101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary with three items: - `input_ids`: the numbers representing the tokens in the text. - `token_type_ids`: indicates which sequence a token belongs to if there is more than one sequence. - `attention_mask`: indicates whether a token should be masked or not. These values are actually the model inputs. **3**. The fastest way to tokenize your entire dataset is to use the [`~Dataset.map`] function. This function speeds up tokenization by applying the tokenizer to batches of examples instead of individual examples. Set the `batched` parameter to `True`: ```py >>> def tokenization(example): ... return tokenizer(example["text"]) >>> dataset = dataset.map(tokenization, batched=True) ``` **4**. Set the format of your dataset to be compatible with your machine learning framework: <frameworkcontent> <pt> Use the [`~Dataset.set_format`] function to set the dataset format to be compatible with PyTorch: ```py >>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) >>> dataset.format['type'] 'torch' ``` </pt> <tf> Use the [`~Dataset.to_tf_dataset`] function to set the dataset format to be compatible with TensorFlow. You'll also need to import a [data collator](https://huggingface.co/docs/transformers/main_classes/data_collator#transformers.DataCollatorWithPadding) from πŸ€— Transformers to combine the varying sequence lengths into a single batch of equal lengths: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf") >>> tf_dataset = dataset.to_tf_dataset( ... columns=["input_ids", "token_type_ids", "attention_mask"], ... label_cols=["label"], ... batch_size=2, ... collate_fn=data_collator, ... shuffle=True ... ) ``` </tf> </frameworkcontent> **5**. The dataset is now ready for training with your machine learning framework! ## Resample audio signals Audio inputs like text datasets need to be divided into discrete data points. This is known as *sampling*; the sampling rate tells you how much of the speech signal is captured per second. It is important to make sure the sampling rate of your dataset matches the sampling rate of the data used to pretrain the model you're using. If the sampling rates are different, the pretrained model may perform poorly on your dataset because it doesn't recognize the differences in the sampling rate. **1**. Start by loading the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset, the [`Audio`] feature, and the feature extractor corresponding to a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h) model: ```py >>> from transformers import AutoFeatureExtractor >>> from datasets import load_dataset, Audio >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") ``` **2**. Index into the first row of the dataset. When you call the `audio` column of the dataset, it is automatically decoded and resampled: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` **3**. Reading a dataset card is incredibly useful and can give you a lot of information about the dataset. A quick look at the MInDS-14 dataset card tells you the sampling rate is 8kHz. Likewise, you can get many details about a model from its model card. The Wav2Vec2 model card says it was sampled on 16kHz speech audio. This means you'll need to upsample the MInDS-14 dataset to match the sampling rate of the model. Use the [`~Dataset.cast_column`] function and set the `sampling_rate` parameter in the [`Audio`] feature to upsample the audio signal. When you call the `audio` column now, it is decoded and resampled to 16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` **4**. Use the [`~Dataset.map`] function to resample the entire dataset to 16kHz. This function speeds up resampling by applying the feature extractor to batches of examples instead of individual examples. Set the `batched` parameter to `True`: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ... ) ... return inputs >>> dataset = dataset.map(preprocess_function, batched=True) ``` **5**. The dataset is now ready for training with your machine learning framework! ## Apply data augmentations The most common preprocessing you'll do with image datasets is *data augmentation*, a process that introduces random variations to an image without changing the meaning of the data. This can mean changing the color properties of an image or randomly cropping an image. You are free to use any data augmentation library you like, and πŸ€— Datasets will help you apply your data augmentations to your dataset. **1**. Start by loading the [Beans](https://huggingface.co/datasets/beans) dataset, the `Image` feature, and the feature extractor corresponding to a pretrained [ViT](https://huggingface.co/google/vit-base-patch16-224-in21k) model: ```py >>> from transformers import AutoFeatureExtractor >>> from datasets import load_dataset, Image >>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") >>> dataset = load_dataset("beans", split="train") ``` **2**. Index into the first row of the dataset. When you call the `image` column of the dataset, the underlying PIL object is automatically decoded into an image. ```py >>> dataset[0]["image"] ``` **3**. Now, you can apply some transforms to the image. Feel free to take a look at the [various transforms available](https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py) in torchvision and choose one you'd like to experiment with. This example applies a transform that randomly rotates the image: ```py >>> from torchvision.transforms import RandomRotation >>> rotate = RandomRotation(degrees=(0, 90)) >>> def transforms(examples): ... examples["pixel_values"] = [rotate(image.convert("RGB")) for image in examples["image"]] ... return examples ``` **4**. Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly. When you index into the image `pixel_values`, the transform is applied, and your image gets rotated. ```py >>> dataset.set_transform(transforms) >>> dataset[0]["pixel_values"] ``` **5**. The dataset is now ready for training with your machine learning framework!
datasets/docs/source/use_dataset.mdx/0
{ "file_path": "datasets/docs/source/use_dataset.mdx", "repo_id": "datasets", "token_count": 3252 }
60
# Metric Card for chrF(++) ## Metric Description ChrF and ChrF++ are two MT evaluation metrics that use the F-score statistic for character n-gram matches. ChrF++ additionally includes word n-grams, which correlate more strongly with direct assessment. We use the implementation that is already present in sacrebleu. While this metric is included in sacreBLEU, the implementation here is slightly different from sacreBLEU in terms of the required input format. Here, the length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information. ## How to Use At minimum, this metric requires a `list` of predictions and a `list` of `list`s of references: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` ### Inputs - **`predictions`** (`list` of `str`): The predicted sentences. - **`references`** (`list` of `list` of `str`): The references. There should be one reference sub-list for each prediction sentence. - **`char_order`** (`int`): Character n-gram order. Defaults to `6`. - **`word_order`** (`int`): Word n-gram order. If equals to 2, the metric is referred to as chrF++. Defaults to `0`. - **`beta`** (`int`): Determine the importance of recall w.r.t precision. Defaults to `2`. - **`lowercase`** (`bool`): If `True`, enables case-insensitivity. Defaults to `False`. - **`whitespace`** (`bool`): If `True`, include whitespaces when extracting character n-grams. Defaults to `False`. - **`eps_smoothing`** (`bool`): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK, and Moses implementations. If `False`, takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. ### Output Values The output is a dictionary containing the following fields: - **`'score'`** (`float`): The chrF (chrF++) score. - **`'char_order'`** (`int`): The character n-gram order. - **`'word_order'`** (`int`): The word n-gram order. If equals to `2`, the metric is referred to as chrF++. - **`'beta'`** (`int`): Determine the importance of recall w.r.t precision. The output is formatted as below: ```python {'score': 61.576379378113785, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` The chrF(++) score can be any value between `0.0` and `100.0`, inclusive. #### Values from Popular Papers ### Examples A simple example of calculating chrF: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` The same example, but with the argument `word_order=2`, to calculate chrF++ instead of chrF: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} ``` The same chrF++ example as above, but with `lowercase=True` to normalize all case: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} ``` ## Limitations and Bias - According to [Popović 2017](https://www.statmt.org/wmt17/pdf/WMT70.pdf), chrF+ (where `word_order=1`) and chrF++ (where `word_order=2`) produce scores that correlate better with human judgements than chrF (where `word_order=0`) does. ## Citation ```bibtex @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ``` ## Further References - See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information on this implementation.
datasets/metrics/chrf/README.md/0
{ "file_path": "datasets/metrics/chrf/README.md", "repo_id": "datasets", "token_count": 2254 }
61
# Metric Card for F1 ## Metric Description The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ## How to Use At minimum, this metric requires predictions and references as input ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(predictions=[0, 1], references=[0, 1]) >>> print(results) ["{'f1': 1.0}"] ``` ### Inputs - **predictions** (`list` of `int`): Predicted labels. - **references** (`list` of `int`): Ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. - **pos_label** (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to None. ### Output Values - **f1**(`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Output Example(s): ```python {'f1': 0.26666666666666666} ``` ```python {'f1': array([0.8, 0.0, 0.0])} ``` This metric outputs a dictionary, with either a single f1 score, of type `float`, or an array of f1 scores, with entries of type `float`. #### Values from Popular Papers ### Examples Example 1-A simple binary example ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} ``` Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 ``` Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 ``` Example 4-A multiclass example, with different values for the `average` input. ```python >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} ``` ## Limitations and Bias ## Citation(s) ```bibtex @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ``` ## Further References
datasets/metrics/f1/README.md/0
{ "file_path": "datasets/metrics/f1/README.md", "repo_id": "datasets", "token_count": 1624 }
62
# Metric Card for MAUVE ## Metric description MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. It summarizes both Type I and Type II errors measured softly using [Kullback–Leibler (KL) divergences](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence). This metric is a wrapper around the [official implementation](https://github.com/krishnap25/mauve) of MAUVE. For more details, consult the [MAUVE paper](https://arxiv.org/abs/2102.01454). ## How to use The metric takes two lists of strings of tokens separated by spaces: one representing `predictions` (i.e. the text generated by the model) and the second representing `references` (a reference text for each prediction): ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello world", "goodnight moon"] mauve_results = mauve.compute(predictions=predictions, references=references) ``` It also has several optional arguments: `num_buckets`: the size of the histogram to quantize P and Q. Options: `auto` (default) or an integer. `pca_max_data`: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. The default is `-1`. `kmeans_explained_var`: amount of variance of the data to keep in dimensionality reduction by PCA. The default is `0.9`. `kmeans_num_redo`: number of times to redo k-means clustering (the best objective is kept). The default is `5`. `kmeans_max_iter`: maximum number of k-means iterations. The default is `500`. `featurize_model_name`: name of the model from which features are obtained, from one of the following: `gpt2`, `gpt2-medium`, `gpt2-large`, `gpt2-xl`. The default is `gpt2-large`. `device_id`: Device for featurization. Supply a GPU id (e.g. `0` or `3`) to use GPU. If no GPU with this id is found, the metric will use CPU. `max_text_length`: maximum number of tokens to consider. The default is `1024`. `divergence_curve_discretization_size` Number of points to consider on the divergence curve. The default is `25`. `mauve_scaling_factor`: Hyperparameter for scaling. The default is `5`. `verbose`: If `True` (default), running the metric will print running time updates. `seed`: random seed to initialize k-means cluster assignments, randomly assigned by default. ## Output values This metric outputs a dictionary with 5 key-value pairs: `mauve`: MAUVE score, which ranges between 0 and 1. **Larger** values indicate that P and Q are closer. `frontier_integral`: Frontier Integral, which ranges between 0 and 1. **Smaller** values indicate that P and Q are closer. `divergence_curve`: a numpy.ndarray of shape (m, 2); plot it with `matplotlib` to view the divergence curve. `p_hist`: a discrete distribution, which is a quantized version of the text distribution `p_text`. `q_hist`: same as above, but with `q_text`. ### Values from popular papers The [original MAUVE paper](https://arxiv.org/abs/2102.01454) reported values ranging from 0.88 to 0.94 for open-ended text generation using a text completion task in the web text domain. The authors found that bigger models resulted in higher MAUVE scores, and that MAUVE is correlated with human judgments. ## Examples Perfect match between prediction and reference: ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello world", "goodnight moon"] mauve_results = mauve.compute(predictions=predictions, references=references) print(mauve_results.mauve) 1.0 ``` Partial match between prediction and reference: ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello there", "general kenobi"] mauve_results = mauve.compute(predictions=predictions, references=references) print(mauve_results.mauve) 0.27811372536724027 ``` ## Limitations and bias The [original MAUVE paper](https://arxiv.org/abs/2102.01454) did not analyze the inductive biases present in different embedding models, but related work has shown different kinds of biases exist in many popular generative language models including GPT-2 (see [Kirk et al., 2021](https://arxiv.org/pdf/2102.04130.pdf), [Abid et al., 2021](https://arxiv.org/abs/2101.05783)). The extent to which these biases can impact the MAUVE score has not been quantified. Also, calculating the MAUVE metric involves downloading the model from which features are obtained -- the default model, `gpt2-large`, takes over 3GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `gpt` is 523MB. ## Citation ```bibtex @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ``` ## Further References - [Official MAUVE implementation](https://github.com/krishnap25/mauve) - [Hugging Face Tasks - Text Generation](https://huggingface.co/tasks/text-generation)
datasets/metrics/mauve/README.md/0
{ "file_path": "datasets/metrics/mauve/README.md", "repo_id": "datasets", "token_count": 1650 }
63
# Metric Card for ROC AUC ## Metric Description This metric computes the area under the curve (AUC) for the Receiver Operating Characteristic Curve (ROC). The return values represent how well the model used is predicting the correct classes, based on the input data. A score of `0.5` means that the model is predicting exactly at chance, i.e. the model's predictions are correct at the same rate as if the predictions were being decided by the flip of a fair coin or the roll of a fair die. A score above `0.5` indicates that the model is doing better than chance, while a score below `0.5` indicates that the model is doing worse than chance. This metric has three separate use cases: - **binary**: The case in which there are only two different label classes, and each example gets only one label. This is the default implementation. - **multiclass**: The case in which there can be more than two different label classes, but each example still gets only one label. - **multilabel**: The case in which there can be more than two different label classes, and each example can have more than one label. ## How to Use At minimum, this metric requires references and prediction scores: ```python >>> roc_auc_score = datasets.load_metric("roc_auc") >>> refs = [1, 0, 1, 1, 0, 0] >>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores) >>> print(round(results['roc_auc'], 2)) 0.78 ``` The default implementation of this metric is the **binary** implementation. If employing the **multiclass** or **multilabel** use cases, the keyword `"multiclass"` or `"multilabel"` must be specified when loading the metric: - In the **multiclass** case, the metric is loaded with: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass") ``` - In the **multilabel** case, the metric is loaded with: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel") ``` See the [Examples Section Below](#examples_section) for more extensive examples. ### Inputs - **`references`** (array-like of shape (n_samples,) or (n_samples, n_classes)): Ground truth labels. Expects different inputs based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples,) - multilabel: expects an array-like of shape (n_samples, n_classes) - **`prediction_scores`** (array-like of shape (n_samples,) or (n_samples, n_classes)): Model predictions. Expects different inputs based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples, n_classes). The probability estimates must sum to 1 across the possible classes. - multilabel: expects an array-like of shape (n_samples, n_classes) - **`average`** (`str`): Type of average, and is ignored in the binary use case. Defaults to `'macro'`. Options are: - `'micro'`: Calculates metrics globally by considering each element of the label indicator matrix as a label. Only works with the multilabel use case. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average, weighted by support (i.e. the number of true instances for each label). - `'samples'`: Calculate metrics for each instance, and find their average. Only works with the multilabel use case. - `None`: No average is calculated, and scores for each class are returned. Only works with the multilabels use case. - **`sample_weight`** (array-like of shape (n_samples,)): Sample weights. Defaults to None. - **`max_fpr`** (`float`): If not None, the standardized partial AUC over the range [0, `max_fpr`] is returned. Must be greater than `0` and less than or equal to `1`. Defaults to `None`. Note: For the multiclass use case, `max_fpr` should be either `None` or `1.0` as ROC AUC partial computation is not currently supported for `multiclass`. - **`multi_class`** (`str`): Only used for multiclass targets, in which case it is required. Determines the type of configuration to use. Options are: - `'ovr'`: Stands for One-vs-rest. Computes the AUC of each class against the rest. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when `average == 'macro'`, because class imbalance affects the composition of each of the 'rest' groupings. - `'ovo'`: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes. Insensitive to class imbalance when `average == 'macro'`. - **`labels`** (array-like of shape (n_classes,)): Only used for multiclass targets. List of labels that index the classes in `prediction_scores`. If `None`, the numerical or lexicographical order of the labels in `prediction_scores` is used. Defaults to `None`. ### Output Values This metric returns a dict containing the `roc_auc` score. The score is a `float`, unless it is the multilabel case with `average=None`, in which case the score is a numpy `array` with entries of type `float`. The output therefore generally takes the following format: ```python {'roc_auc': 0.778} ``` In contrast, though, the output takes the following format in the multilabel case when `average=None`: ```python {'roc_auc': array([0.83333333, 0.375, 0.94444444])} ``` ROC AUC scores can take on any value between `0` and `1`, inclusive. #### Values from Popular Papers ### <a name="examples_section"></a>Examples Example 1, the **binary** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc") >>> refs = [1, 0, 1, 1, 0, 0] >>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores) >>> print(round(results['roc_auc'], 2)) 0.78 ``` Example 2, the **multiclass** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass") >>> refs = [1, 0, 1, 2, 2, 0] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, ... prediction_scores=pred_scores, ... multi_class='ovr') >>> print(round(results['roc_auc'], 2)) 0.85 ``` Example 3, the **multilabel** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel") >>> refs = [[1, 1, 0], ... [1, 1, 0], ... [0, 1, 0], ... [0, 0, 1], ... [0, 1, 1], ... [1, 0, 1]] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, ... prediction_scores=pred_scores, ... average=None) >>> print([round(res, 2) for res in results['roc_auc']) [0.83, 0.38, 0.94] ``` ## Limitations and Bias ## Citation ```bibtex @article{doi:10.1177/0272989X8900900307, author = {Donna Katzman McClish}, title ={Analyzing a Portion of the ROC Curve}, journal = {Medical Decision Making}, volume = {9}, number = {3}, pages = {190-195}, year = {1989}, doi = {10.1177/0272989X8900900307}, note ={PMID: 2668680}, URL = {https://doi.org/10.1177/0272989X8900900307}, eprint = {https://doi.org/10.1177/0272989X8900900307} } ``` ```bibtex @article{10.1023/A:1010920819831, author = {Hand, David J. and Till, Robert J.}, title = {A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems}, year = {2001}, issue_date = {November 2001}, publisher = {Kluwer Academic Publishers}, address = {USA}, volume = {45}, number = {2}, issn = {0885-6125}, url = {https://doi.org/10.1023/A:1010920819831}, doi = {10.1023/A:1010920819831}, journal = {Mach. Learn.}, month = {oct}, pages = {171–186}, numpages = {16}, keywords = {Gini index, AUC, error rate, ROC curve, receiver operating characteristic} } ``` ```bibtex @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ``` ## Further References This implementation is a wrapper around the [Scikit-learn implementation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html). Much of the documentation here was adapted from their existing documentation, as well. The [Guide to ROC and AUC](https://youtu.be/iCZJfO-7C5Q) video from the channel Data Science Bits is also very informative.
datasets/metrics/roc_auc/README.md/0
{ "file_path": "datasets/metrics/roc_auc/README.md", "repo_id": "datasets", "token_count": 3273 }
64
"""Official evaluation script for SQuAD version 2.0. In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import argparse import collections import json import os import re import string import sys import numpy as np ARTICLES_REGEX = re.compile(r"\b(a|an|the)\b", re.UNICODE) OPTS = None def parse_args(): parser = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.") parser.add_argument("data_file", metavar="data.json", help="Input data JSON file.") parser.add_argument("pred_file", metavar="pred.json", help="Model predictions.") parser.add_argument( "--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh", "-t", type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).', ) parser.add_argument( "--out-image-dir", "-p", metavar="out_images", default=None, help="Save precision-recall curves to directory." ) parser.add_argument("--verbose", "-v", action="store_true") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid_to_has_ans[qa["id"]] = bool(qa["answers"]["text"]) return qid_to_has_ans def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return ARTICLES_REGEX.sub(" ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid = qa["id"] gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qid not in preds: print(f"Missing prediction for {qid}") continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] ) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval[f"{prefix}_{k}"] = new_eval[k] def plot_pr_curve(precisions, recalls, out_image, title): plt.step(recalls, precisions, color="b", alpha=0.2, where="post") plt.fill_between(recalls, precisions, step="post", alpha=0.2, color="b") plt.xlabel("Recall") plt.ylabel("Precision") plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(title) plt.savefig(out_image) plt.clf() def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i + 1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {"ap": 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_exact.png"), title="Precision-Recall curve for Exact Match score", ) pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_f1.png"), title="Precision-Recall curve for F1 score", ) oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_oracle.png"), title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)", ) merge_eval(main_eval, pr_exact, "pr_exact") merge_eval(main_eval, pr_f1, "pr_f1") merge_eval(main_eval, pr_oracle, "pr_oracle") def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel("Model probability of no-answer") plt.ylabel("Proportion of dataset") plt.title(f"Histogram of no-answer probability: {name}") plt.savefig(os.path.join(image_dir, f"na_prob_hist_{name}.png")) plt.clf() def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh def main(): with open(OPTS.data_file) as f: dataset_json = json.load(f) dataset = dataset_json["data"] with open(OPTS.pred_file) as f: preds = json.load(f) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: na_probs = json.load(f) else: na_probs = {k: 0.0 for k in preds} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, "HasAns") if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, "NoAns") if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, "hasAns") histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, "noAns") if OPTS.out_file: with open(OPTS.out_file, "w") as f: json.dump(out_eval, f) else: print(json.dumps(out_eval, indent=2)) if __name__ == "__main__": OPTS = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
datasets/metrics/squad_v2/evaluate.py/0
{ "file_path": "datasets/metrics/squad_v2/evaluate.py", "repo_id": "datasets", "token_count": 5443 }
65
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # πŸ€— Datasets Notebooks You can find here a list of the official notebooks provided by Hugging Face. Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging πŸ€— Datasets and would like it to be listed here, please open a Pull Request so it can be included under the Community notebooks. ## Hugging Face's notebooks πŸ€— ### Documentation notebooks You can open any page of the documentation as a notebook in Colab (there is a button directly on said pages) but they are also listed here if you need them: | Notebook | Description | | | |:----------|:-------------|:-------------|------:| | [Quickstart](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb) | A quick presentation on integrating Datasets into a model training workflow |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)|
datasets/notebooks/README.md/0
{ "file_path": "datasets/notebooks/README.md", "repo_id": "datasets", "token_count": 534 }
66
import importlib import importlib.metadata import logging import os import platform from pathlib import Path from typing import Optional from packaging import version logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging # Datasets S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets" CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets" REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}" # Metrics S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics" CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric" REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}" # Hub HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}" HUB_DEFAULT_VERSION = "main" PY_VERSION = version.parse(platform.python_version()) # General environment variables accepted values for booleans ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"}) # Imports DILL_VERSION = version.parse(importlib.metadata.version("dill")) FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow")) HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub")) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() TORCH_VERSION = "N/A" TORCH_AVAILABLE = False if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None if TORCH_AVAILABLE: try: TORCH_VERSION = version.parse(importlib.metadata.version("torch")) logger.info(f"PyTorch version {TORCH_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling PyTorch because USE_TF is set") TF_VERSION = "N/A" TF_AVAILABLE = False if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None if TF_AVAILABLE: # For the metadata, we have to look for both tensorflow and tensorflow-cpu for package in [ "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "intel-tensorflow", "tensorflow-rocm", "tensorflow-macos", ]: try: TF_VERSION = version.parse(importlib.metadata.version(package)) except importlib.metadata.PackageNotFoundError: continue else: break else: TF_AVAILABLE = False if TF_AVAILABLE: if TF_VERSION.major < 2: logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") TF_AVAILABLE = False else: logger.info(f"TensorFlow version {TF_VERSION} available.") else: logger.info("Disabling Tensorflow because USE_TORCH is set") JAX_VERSION = "N/A" JAX_AVAILABLE = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None if JAX_AVAILABLE: try: JAX_VERSION = version.parse(importlib.metadata.version("jax")) logger.info(f"JAX version {JAX_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling JAX because USE_JAX is set to False") USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper() BEAM_VERSION = "N/A" BEAM_AVAILABLE = False if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES: try: BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam")) BEAM_AVAILABLE = True logger.info(f"Apache Beam version {BEAM_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling Apache Beam because USE_BEAM is set to False") # Optional tools for data loading SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None # Optional tools for feature decoding PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.0.31") IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.1.0") # Optional compression tools RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None # Cache location DEFAULT_XDG_CACHE_HOME = "~/.cache" XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets") HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE)) DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics") HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE)) DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) DOWNLOADED_DATASETS_DIR = "downloads" DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR) DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH)) EXTRACTED_DATASETS_DIR = "extracted" DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR) EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH)) # Download count for the website HF_UPDATE_DOWNLOAD_COUNTS = ( os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES ) # Remote dataset scripts support __HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1") HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = ( True if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES else False if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES else None ) TIME_OUT_REMOTE_CODE = 15 # Datasets-server USE_PARQUET_EXPORT = True # Batch size constants. For more info, see: # https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations) DEFAULT_MAX_BATCH_SIZE = 1000 # Size of the preloaded record batch in `Dataset.__iter__` ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10 # Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare) MAX_SHARD_SIZE = "500MB" # Parquet configuration PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 # Offline mode HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES # Here, `True` will disable progress bars globally without possibility of enabling it # programmatically. `False` will enable them without possibility of disabling them. # If environment variable is not set (None), then the user is free to enable/disable # them programmatically. # TL;DR: env variable has priority over code __HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS") HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = ( __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None else None ) # In-memory DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE)) # File names DATASET_ARROW_FILENAME = "dataset.arrow" DATASET_INDICES_FILENAME = "indices.arrow" DATASET_STATE_JSON_FILENAME = "state.json" DATASET_INFO_FILENAME = "dataset_info.json" DATASETDICT_INFOS_FILENAME = "dataset_infos.json" LICENSE_FILENAME = "LICENSE" METRIC_INFO_FILENAME = "metric_info.json" DATASETDICT_JSON_FILENAME = "dataset_dict.json" METADATA_CONFIGS_FIELD = "configs" REPOCARD_FILENAME = "README.md" REPOYAML_FILENAME = ".huggingface.yaml" MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules" MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255 # Temporary cache directory prefix TEMP_CACHE_DIR_PREFIX = "hf_datasets-" # Streaming STREAMING_READ_MAX_RETRIES = 20 STREAMING_READ_RETRY_INTERVAL = 5 # Datasets without script DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10 ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 # Progress bars PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec # Maximum number of uploaded files per commit UPLOADS_MAX_NUMBER_PER_COMMIT = 50 # Backward compatibiliy MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
datasets/src/datasets/config.py/0
{ "file_path": "datasets/src/datasets/config.py", "repo_id": "datasets", "token_count": 4310 }
67
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): """Read contents of compressed file as a filesystem with one file inside.""" root_marker = "" protocol: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) compression: str = None # compression type in fsspec. ex: "gzip" extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs ): """ The compressed file system can be instantiated from any compressed file. It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive. The single file inside the filesystem is named after the compresssed file, without the compression extension at the end of the filename. Args: fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()`` mode (:obj:``str``): Currently, only 'rb' accepted target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL. target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS. """ super().__init__(self, **kwargs) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode self.file = fsspec.open( fo, mode="rb", protocol=target_protocol, compression=self.compression, client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed. }, **(target_options or {}), ) self.compressed_name = os.path.basename(self.file.path.split("::")[0]) self.uncompressed_name = ( self.compressed_name[: self.compressed_name.rindex(".")] if "." in self.compressed_name else self.compressed_name ) self.dir_cache = None @classmethod def _strip_protocol(cls, path): # compressed file paths are always relative to the archive root return super()._strip_protocol(path).lstrip("/") def _get_dirs(self): if self.dir_cache is None: f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name} self.dir_cache = {f["name"]: f} def cat(self, path: str): return self.file.open().read() def _open( self, path: str, mode: str = "rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): path = self._strip_protocol(path) if mode != "rb": raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'") return self.file.open() class Bz2FileSystem(BaseCompressedFileFileSystem): """Read contents of BZ2 file as a filesystem with one file inside.""" protocol = "bz2" compression = "bz2" extension = ".bz2" class GzipFileSystem(BaseCompressedFileFileSystem): """Read contents of GZIP file as a filesystem with one file inside.""" protocol = "gzip" compression = "gzip" extension = ".gz" class Lz4FileSystem(BaseCompressedFileFileSystem): """Read contents of LZ4 file as a filesystem with one file inside.""" protocol = "lz4" compression = "lz4" extension = ".lz4" class XzFileSystem(BaseCompressedFileFileSystem): """Read contents of .xz (LZMA) file as a filesystem with one file inside.""" protocol = "xz" compression = "xz" extension = ".xz" class ZstdFileSystem(BaseCompressedFileFileSystem): """ Read contents of zstd file as a filesystem with one file inside. Note that reading in binary mode with fsspec isn't supported yet: https://github.com/indygreg/python-zstandard/issues/136 """ protocol = "zstd" compression = "zstd" extension = ".zst" def __init__( self, fo: str, mode: str = "rb", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, block_size: int = DEFAULT_BLOCK_SIZE, **kwargs, ): super().__init__( fo=fo, mode=mode, target_protocol=target_protocol, target_options=target_options, block_size=block_size, **kwargs, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 _enter = self.file.__enter__ class WrappedFile: def __init__(self, file_): self._file = file_ def __enter__(self): self._file.__enter__() return self def __exit__(self, *args, **kwargs): self._file.__exit__(*args, **kwargs) def __iter__(self): return iter(self._file) def __next__(self): return next(self._file) def __getattr__(self, attr): return getattr(self._file, attr) def fixed_enter(*args, **kwargs): return WrappedFile(_enter(*args, **kwargs)) self.file.__enter__ = fixed_enter
datasets/src/datasets/filesystems/compression.py/0
{ "file_path": "datasets/src/datasets/filesystems/compression.py", "repo_id": "datasets", "token_count": 2608 }
68
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def get_writer_batch_size(features: Features) -> Optional[int]: """ Get the writer_batch_size that defines the maximum row group size in the parquet files. The default in `datasets` is 1,000 but we lower it to 100 for image datasets. This allows to optimize random access to parquet file, since accessing 1 row requires to read its entire row group. This can be improved to get optimized size for querying/iterating but at least it matches the dataset viewer expectations on HF. Args: ds_config_info (`datasets.info.DatasetInfo`): Dataset info from `datasets`. Returns: writer_batch_size (`Optional[int]`): Writer batch size to pass to a dataset builder. If `None`, then it will use the `datasets` default. """ batch_size = np.inf def set_batch_size(feature: FeatureType) -> None: nonlocal batch_size if isinstance(feature, Image): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(feature, Audio): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(feature, Value) and feature.dtype == "binary": batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(features, set_batch_size) return None if batch_size is np.inf else batch_size class ParquetDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} hash = _PACKAGED_DATASETS_MODULES["parquet"][1] self.builder = Parquet( cache_dir=cache_dir, data_files=path_or_paths, features=features, hash=hash, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class ParquetDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, **parquet_writer_kwargs, ): self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size or get_writer_batch_size(dataset.features) self.parquet_writer_kwargs = parquet_writer_kwargs def write(self) -> int: batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with open(self.path_or_buf, "wb+") as buffer: written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) else: written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) return written def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: """Writes the pyarrow table as Parquet to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 _ = parquet_writer_kwargs.pop("path_or_buf", None) schema = self.dataset.features.arrow_schema writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) for offset in hf_tqdm( range(0, len(self.dataset), batch_size), unit="ba", desc="Creating parquet from Arrow format", ): batch = query_table( table=self.dataset._data, key=slice(offset, offset + batch_size), indices=self.dataset._indices, ) writer.write_table(batch) written += batch.nbytes writer.close() return written
datasets/src/datasets/io/parquet.py/0
{ "file_path": "datasets/src/datasets/io/parquet.py", "repo_id": "datasets", "token_count": 2566 }
69
import abc import copy import dataclasses from dataclasses import dataclass from typing import ClassVar, Dict, Type, TypeVar from ..features import Features T = TypeVar("T", bound="TaskTemplate") @dataclass(frozen=True) class TaskTemplate(abc.ABC): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str input_schema: ClassVar[Features] label_schema: ClassVar[Features] def align_with_features(self: T, features: Features) -> T: """ Align features with the task template. """ # No-op return copy.deepcopy(self) @property def features(self) -> Features: return Features(**self.input_schema, **self.label_schema) @property @abc.abstractmethod def column_mapping(self) -> Dict[str, str]: raise NotImplementedError @classmethod def from_dict(cls: Type[T], template_dict: dict) -> T: field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in template_dict.items() if k in field_names})
datasets/src/datasets/tasks/base.py/0
{ "file_path": "datasets/src/datasets/tasks/base.py", "repo_id": "datasets", "token_count": 417 }
70
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import copy import io import json import multiprocessing import os import posixpath import re import shutil import sys import time import urllib import warnings from contextlib import closing, contextmanager from functools import partial from pathlib import Path from typing import Optional, TypeVar, Union from unittest.mock import patch from urllib.parse import urljoin, urlparse import fsspec import huggingface_hub import requests from fsspec.core import strip_protocol from fsspec.utils import can_be_local from huggingface_hub.utils import insecure_hashlib from packaging import version from .. import __version__, config from ..download.download_config import DownloadConfig from . import _tqdm, logging from . import tqdm as hf_tqdm from ._filelock import FileLock from .extract import ExtractManager logger = logging.get_logger(__name__) # pylint: disable=invalid-name INCOMPLETE_SUFFIX = ".incomplete" T = TypeVar("T", str, Path) def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: """ Add hf_modules_cache to the python path. By default hf_modules_cache='~/.cache/huggingface/modules'. It can also be set with the environment variable HF_MODULES_CACHE. This is used to add modules such as `datasets_modules` """ hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE hf_modules_cache = str(hf_modules_cache) if hf_modules_cache not in sys.path: sys.path.append(hf_modules_cache) os.makedirs(hf_modules_cache, exist_ok=True) if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): pass return hf_modules_cache def is_remote_url(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_local_path(url_or_filename: str) -> bool: # On unix the scheme of a local path is empty (for both absolute and relative), # while on windows the scheme is the drive name (ex: "c") for absolute paths. # for details on the windows behavior, see https://bugs.python.org/issue42215 return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_relative_path(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) def relative_to_absolute_path(path: T) -> T: """Convert relative path to absolute path.""" abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) return Path(abs_path_str) if isinstance(path, Path) else abs_path_str def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: if dataset: endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX else: endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX return "/".join((endpoint, identifier, filename)) def head_hf_s3( identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 ) -> Union[requests.Response, Exception]: return http_head( hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), max_retries=max_retries, ) def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str: default_revision = "main" if version.parse(__version__).is_devrelease else __version__ revision = revision or default_revision if dataset: return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name) else: return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name) def url_or_path_join(base_name: str, *pathnames: str) -> str: if is_remote_url(base_name): return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) else: return Path(base_name, *pathnames).as_posix() def url_or_path_parent(url_or_path: str) -> str: if is_remote_url(url_or_path): return url_or_path[: url_or_path.rindex("/")] else: return os.path.dirname(url_or_path) def hash_url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".py"): filename += ".py" return filename def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """ if download_config is None: download_config = DownloadConfig(**download_kwargs) cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) # Convert fsspec URL in the format "file://local/path" to "local/path" if can_be_local(url_or_filename): url_or_filename = strip_protocol(url_or_filename) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=download_config.force_download, proxies=download_config.proxies, resume_download=download_config.resume_download, user_agent=download_config.user_agent, local_files_only=download_config.local_files_only, use_etag=download_config.use_etag, max_retries=download_config.max_retries, token=download_config.token, ignore_url_params=download_config.ignore_url_params, storage_options=download_config.storage_options, download_desc=download_config.download_desc, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif is_local_path(url_or_filename): # File, but it doesn't exist. raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if output_path is None: return output_path if download_config.extract_compressed_file: output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( output_path, force_extract=download_config.force_extract ) return relative_to_absolute_path(output_path) def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: ua = f"datasets/{__version__}" ua += f"; python/{config.PY_VERSION}" ua += f"; huggingface_hub/{huggingface_hub.__version__}" ua += f"; pyarrow/{config.PYARROW_VERSION}" if config.TORCH_AVAILABLE: ua += f"; torch/{config.TORCH_VERSION}" if config.TF_AVAILABLE: ua += f"; tensorflow/{config.TF_VERSION}" if config.JAX_AVAILABLE: ua += f"; jax/{config.JAX_VERSION}" if config.BEAM_AVAILABLE: ua += f"; apache_beam/{config.BEAM_VERSION}" if isinstance(user_agent, dict): ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" elif isinstance(user_agent, str): ua += "; " + user_agent return ua def get_authentication_headers_for_url( url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" ) -> dict: """Handle the HF authentication""" if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if url.startswith(config.HF_ENDPOINT): return huggingface_hub.utils.build_hf_headers( token=token, library_name="datasets", library_version=__version__ ) else: return {} class OfflineModeIsEnabled(ConnectionError): pass def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True.""" if config.HF_DATASETS_OFFLINE: raise OfflineModeIsEnabled( "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) ) def _request_with_retry( method: str, url: str, max_retries: int = 0, base_wait_time: float = 0.5, max_wait_time: float = 2, timeout: float = 10.0, **params, ) -> requests.Response: """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. Args: method (str): HTTP method, such as 'GET' or 'HEAD'. url (str): The URL of the resource to fetch. max_retries (int): Maximum number of retries, defaults to 0 (no retries). base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between retries then grows exponentially, capped by max_wait_time. max_wait_time (float): Maximum amount of time between two retries, in seconds. **params (additional keyword arguments): Params to pass to :obj:`requests.request`. """ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") tries, success = 0, False while not success: tries += 1 try: response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) success = True except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: if tries > max_retries: raise err else: logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff time.sleep(sleep_time) return response def fsspec_head(url, storage_options=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) if len(paths) > 1: raise ValueError(f"HEAD can be called with at most one path but was called with {paths}") return fs.info(paths[0]) def stack_multiprocessing_download_progress_bars(): # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1 # We use environment variables since the download may happen in a subprocess return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"}) class TqdmCallback(fsspec.callbacks.TqdmCallback): def __init__(self, tqdm_kwargs=None, *args, **kwargs): super().__init__(tqdm_kwargs, *args, **kwargs) self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm def fsspec_get(url, temp_file, storage_options=None, desc=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) if len(paths) > 1: raise ValueError(f"GET can be called with at most one path but was called with {paths}") callback = TqdmCallback( tqdm_kwargs={ "desc": desc or "Downloading", "unit": "B", "unit_scale": True, "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" and multiprocessing.current_process()._identity else None, } ) fs.get_file(paths[0], temp_file.name, callback=callback) def ftp_head(url, timeout=10.0): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") try: with closing(urllib.request.urlopen(url, timeout=timeout)) as r: r.read(1) except Exception: return False return True def ftp_get(url, temp_file, timeout=10.0): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") try: logger.info(f"Getting through FTP {url} into {temp_file.name}") with closing(urllib.request.urlopen(url, timeout=timeout)) as r: shutil.copyfileobj(r, temp_file) except urllib.error.URLError as e: raise ConnectionError(e) from None def http_get( url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None ) -> Optional[requests.Response]: headers = dict(headers) if headers is not None else {} headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) if resume_size > 0: headers["Range"] = f"bytes={resume_size:d}-" response = _request_with_retry( method="GET", url=url, stream=True, proxies=proxies, headers=headers, cookies=cookies, max_retries=max_retries, timeout=timeout, ) if temp_file is None: return response if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None with hf_tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc=desc or "Downloading", position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" and multiprocessing.current_process()._identity else None, ) as progress: for chunk in response.iter_content(chunk_size=1024): progress.update(len(chunk)) temp_file.write(chunk) def http_head( url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 ) -> requests.Response: headers = copy.deepcopy(headers) or {} headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) response = _request_with_retry( method="HEAD", url=url, proxies=proxies, headers=headers, cookies=cookies, allow_redirects=allow_redirects, timeout=timeout, max_retries=max_retries, ) return response def request_etag( url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" ) -> Optional[str]: if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if urlparse(url).scheme not in ("http", "https"): return None headers = get_authentication_headers_for_url(url, token=token) response = http_head(url, headers=headers, max_retries=3) response.raise_for_status() etag = response.headers.get("ETag") if response.ok else None return etag def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=100, resume_download=False, user_agent=None, local_files_only=False, use_etag=True, max_retries=0, token=None, use_auth_token="deprecated", ignore_url_params=False, storage_options=None, download_desc=None, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """ if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if cache_dir is None: cache_dir = config.HF_DATASETS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) if ignore_url_params: # strip all query parameters and #fragments from the URL cached_url = urljoin(url, urlparse(url).path) else: cached_url = url # additional parameters may be added to the given URL connected = False response = None cookies = None etag = None head_error = None scheme = None # Try a first time to file the file on the local file system without eTag (None) # if we don't ask for 'force_download' then we spare a request filename = hash_url_to_filename(cached_url, etag=None) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download and not use_etag: return cache_path # Prepare headers for authentication headers = get_authentication_headers_for_url(url, token=token) if user_agent is not None: headers["user-agent"] = user_agent # We don't have the file locally or we need an eTag if not local_files_only: scheme = urlparse(url).scheme if scheme == "ftp": connected = ftp_head(url) elif scheme not in ("http", "https"): response = fsspec_head(url, storage_options=storage_options) # s3fs uses "ETag", gcsfs uses "etag" etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None connected = True try: response = http_head( url, allow_redirects=True, proxies=proxies, timeout=etag_timeout, max_retries=max_retries, headers=headers, ) if response.status_code == 200: # ok etag = response.headers.get("ETag") if use_etag else None for k, v in response.cookies.items(): # In some edge cases, we need to get a confirmation token if k.startswith("download_warning") and "drive.google.com" in url: url += "&confirm=" + v cookies = response.cookies connected = True # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in url and "confirm=" not in url: url += "&confirm=t" # In some edge cases, head request returns 400 but the connection is actually ok elif ( (response.status_code == 400 and "firebasestorage.googleapis.com" in url) or (response.status_code == 405 and "drive.google.com" in url) or ( response.status_code == 403 and ( re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) ) ) or (response.status_code == 403 and "ndownloader.figstatic.com" in url) ): connected = True logger.info(f"Couldn't get ETag version for url {url}") elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None: raise ConnectionError( f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`" ) except (OSError, requests.exceptions.Timeout) as e: # not connected head_error = e pass # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if not connected: if os.path.exists(cache_path) and not force_download: return cache_path if local_files_only: raise FileNotFoundError( f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" " disabled. To enable file online look-ups, set 'local_files_only' to False." ) elif response is not None and response.status_code == 404: raise FileNotFoundError(f"Couldn't find file at {url}") _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") if head_error is not None: raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") elif response is not None: raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") else: raise ConnectionError(f"Couldn't reach {url}") # Try a second time filename = hash_url_to_filename(cached_url, etag) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download: return cache_path # From now on, connected is True. # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # Retry in case previously locked processes just enter after the precedent process releases the lock if os.path.exists(cache_path) and not force_download: return cache_path incomplete_path = cache_path + ".incomplete" @contextmanager def temp_file_manager(mode="w+b"): with open(incomplete_path, mode) as f: yield f resume_size = 0 if resume_download: temp_file_manager = partial(temp_file_manager, mode="a+b") if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size # Download to temporary file, then copy to cache path once finished. # Otherwise, you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") # GET file object if scheme == "ftp": ftp_get(url, temp_file) elif scheme not in ("http", "https"): fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc) else: http_get( url, temp_file=temp_file, proxies=proxies, resume_size=resume_size, headers=headers, cookies=cookies, max_retries=max_retries, desc=download_desc, ) logger.info(f"storing {url} in cache at {cache_path}") shutil.move(temp_file.name, cache_path) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_path, 0o666 & ~umask) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w", encoding="utf-8") as meta_file: json.dump(meta, meta_file) return cache_path def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) return fn return docstring_decorator def estimate_dataset_size(paths): return sum(path.stat().st_size for path in paths) def readline(f: io.RawIOBase): # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 res = bytearray() while True: b = f.read(1) if not b: break res += b if res.endswith(b"\n"): break return bytes(res)
datasets/src/datasets/utils/file_utils.py/0
{ "file_path": "datasets/src/datasets/utils/file_utils.py", "repo_id": "datasets", "token_count": 11169 }
71
import numpy as np def approximate_mode(class_counts, n_draws, rng): """Computes approximate mode of multivariate hypergeometric. This is an approximation to the mode of the multivariate hypergeometric given by class_counts and n_draws. It shouldn't be off by more than one. It is the mostly likely outcome of drawing n_draws many samples from the population given by class_counts. Args ---------- class_counts : ndarray of int Population per class. n_draws : int Number of draws (samples to draw) from the overall population. rng : random state Used to break ties. Returns ------- sampled_classes : ndarray of int Number of samples drawn from each class. np.sum(sampled_classes) == n_draws """ # this computes a bad approximation to the mode of the # multivariate hypergeometric given by class_counts and n_draws continuous = n_draws * class_counts / class_counts.sum() # floored means we don't overshoot n_samples, but probably undershoot floored = np.floor(continuous) # we add samples according to how much "left over" probability # they had, until we arrive at n_samples need_to_add = int(n_draws - floored.sum()) if need_to_add > 0: remainder = continuous - floored values = np.sort(np.unique(remainder))[::-1] # add according to remainder, but break ties # randomly to avoid biases for value in values: (inds,) = np.where(remainder == value) # if we need_to_add less than what's in inds # we draw randomly from them. # if we need to add more, we add them all and # go to the next value add_now = min(len(inds), need_to_add) inds = rng.choice(inds, size=add_now, replace=False) floored[inds] += 1 need_to_add -= add_now if need_to_add == 0: break return floored.astype(np.int64) def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10): """ Provides train/test indices to split data in train/test sets. It's reference is taken from StratifiedShuffleSplit implementation of scikit-learn library. Args ---------- n_train : int, represents the absolute number of train samples. n_test : int, represents the absolute number of test samples. random_state : int or RandomState instance, default=None Controls the randomness of the training and testing indices produced. Pass an int for reproducible output across multiple function calls. n_splits : int, default=10 Number of re-shuffling & splitting iterations. """ classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = np.bincount(y_indices) if np.min(class_counts) < 2: raise ValueError("Minimum class count error") if n_train < n_classes: raise ValueError( "The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes) ) if n_test < n_classes: raise ValueError( "The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes) ) class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]) for _ in range(n_splits): n_i = approximate_mode(class_counts, n_train, rng) class_counts_remaining = class_counts - n_i t_i = approximate_mode(class_counts_remaining, n_test, rng) train = [] test = [] for i in range(n_classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = class_indices[i].take(permutation, mode="clip") train.extend(perm_indices_class_i[: n_i[i]]) test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) train = rng.permutation(train) test = rng.permutation(test) yield train, test
datasets/src/datasets/utils/stratify.py/0
{ "file_path": "datasets/src/datasets/utils/stratify.py", "repo_id": "datasets", "token_count": 1674 }
72
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["integration", "unit"]): continue item.add_marker(pytest.mark.unit) def pytest_configure(config): config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12") @pytest.fixture(autouse=True) def set_test_cache_config(tmp_path_factory, monkeypatch): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache" test_hf_datasets_cache = test_hf_cache_home / "datasets" test_hf_metrics_cache = test_hf_cache_home / "metrics" test_hf_modules_cache = test_hf_cache_home / "modules" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache)) monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache)) monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache)) test_downloaded_datasets_path = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path)) test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path)) @pytest.fixture(autouse=True, scope="session") def disable_tqdm_output(): datasets.disable_progress_bar() @pytest.fixture(autouse=True) def set_update_download_counts_to_false(monkeypatch): # don't take tests into account when counting downloads monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False) @pytest.fixture def set_sqlalchemy_silence_uber_warning(monkeypatch): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported try: monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True) except AttributeError: pass @pytest.fixture(autouse=True, scope="session") def zero_time_out_for_remote_code(): datasets.config.TIME_OUT_REMOTE_CODE = 0
datasets/tests/conftest.py/0
{ "file_path": "datasets/tests/conftest.py", "repo_id": "datasets", "token_count": 957 }
73
import posixpath from pathlib import Path from unittest.mock import patch import pytest from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path from fsspec.registry import _registry as _fsspec_registry class MockFileSystem(AbstractFileSystem): protocol = "mock" def __init__(self, *args, local_root_dir, **kwargs): super().__init__() self._fs = LocalFileSystem(*args, **kwargs) self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/" def mkdir(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.mkdir(path, *args, **kwargs) def makedirs(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.makedirs(path, *args, **kwargs) def rmdir(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rmdir(path) def ls(self, path, detail=True, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = self._fs.ls(path, detail=detail, *args, **kwargs) if detail: return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] else: return [name[len(self.local_root_dir) :] for name in out] def info(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = dict(self._fs.info(path, *args, **kwargs)) out["name"] = out["name"][len(self.local_root_dir) :] return out def cp_file(self, path1, path2, *args, **kwargs): path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1)) path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2)) return self._fs.cp_file(path1, path2, *args, **kwargs) def rm_file(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm_file(path, *args, **kwargs) def rm(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm(path, *args, **kwargs) def _open(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs._open(path, *args, **kwargs) def created(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.created(path) def modified(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.modified(path) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("mock://"): path = path[7:] return path class TmpDirFileSystem(MockFileSystem): protocol = "tmp" tmp_dir = None def __init__(self, *args, **kwargs): assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set" super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("tmp://"): path = path[6:] return path @pytest.fixture def mock_fsspec(): _fsspec_registry["mock"] = MockFileSystem _fsspec_registry["tmp"] = TmpDirFileSystem yield del _fsspec_registry["mock"] del _fsspec_registry["tmp"] @pytest.fixture def mockfs(tmp_path_factory, mock_fsspec): local_fs_dir = tmp_path_factory.mktemp("mockfs") return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True) @pytest.fixture def tmpfs(tmp_path_factory, mock_fsspec): tmp_fs_dir = tmp_path_factory.mktemp("tmpfs") with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir): yield TmpDirFileSystem() TmpDirFileSystem.clear_instance_cache()
datasets/tests/fixtures/fsspec.py/0
{ "file_path": "datasets/tests/fixtures/fsspec.py", "repo_id": "datasets", "token_count": 1757 }
74
import shutil import textwrap import numpy as np import pytest from datasets import ClassLabel, Features, Image, Value from datasets.data_files import DataFilesDict, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder from ..utils import require_pil @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "imagefolder_cache_dir") @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, image_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "cat" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "dog" subdir_class_1.mkdir(parents=True, exist_ok=True) image_filename = subdir_class_0 / "image_cat.jpg" shutil.copyfile(image_file, image_filename) image_filename2 = subdir_class_1 / "image_dog.jpg" shutil.copyfile(image_file, image_filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def image_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, image_file): data_dir = tmp_path / "image_files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "cat" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "dog" subdir_class_1.mkdir(parents=True, exist_ok=True) image_filename = subdir_class_0 / "image_cat.jpg" shutil.copyfile(image_file, image_filename) image_filename2 = subdir_class_1 / "image_dog.jpg" shutil.copyfile(image_file, image_filename2) image_metadata_filename = tmp_path / data_dir / "metadata.jsonl" image_metadata = textwrap.dedent( """\ {"file_name": "cat/image_cat.jpg", "caption": "Nice image of a cat", "label": "Cat"} {"file_name": "dog/image_dog.jpg", "caption": "Nice image of a dog", "label": "Dog"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) return str(image_filename), str(image_filename2), str(image_metadata_filename) @pytest.fixture def image_file_with_metadata(tmp_path, image_file): image_filename = tmp_path / "image_rgb.jpg" shutil.copyfile(image_file, image_filename) image_metadata_filename = tmp_path / "metadata.jsonl" image_metadata = textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) return str(image_filename), str(image_metadata_filename) @pytest.fixture def image_files_with_metadata_that_misses_one_image(tmp_path, image_file): image_filename = tmp_path / "image_rgb.jpg" shutil.copyfile(image_file, image_filename) image_filename2 = tmp_path / "image_rgb2.jpg" shutil.copyfile(image_file, image_filename2) image_metadata_filename = tmp_path / "metadata.jsonl" image_metadata = textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) return str(image_filename), str(image_filename2), str(image_metadata_filename) @pytest.fixture(params=["jsonl", "csv"]) def data_files_with_one_split_and_metadata(request, tmp_path, image_file): data_dir = tmp_path / "imagefolder_data_dir_with_metadata_one_split" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) image_filename = data_dir / "image_rgb.jpg" shutil.copyfile(image_file, image_filename) image_filename2 = data_dir / "image_rgb2.jpg" shutil.copyfile(image_file, image_filename2) image_filename3 = subdir / "image_rgb3.jpg" # in subdir shutil.copyfile(image_file, image_filename3) image_metadata_filename = data_dir / f"metadata.{request.param}" image_metadata = ( textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} {"file_name": "image_rgb2.jpg", "caption": "Nice second image"} {"file_name": "subdir/image_rgb3.jpg", "caption": "Nice third image"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,caption image_rgb.jpg,Nice image image_rgb2.jpg,Nice second image subdir/image_rgb3.jpg,Nice third image """ ) ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture(params=["jsonl", "csv"]) def data_files_with_two_splits_and_metadata(request, tmp_path, image_file): data_dir = tmp_path / "imagefolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) image_filename = train_dir / "image_rgb.jpg" # train image shutil.copyfile(image_file, image_filename) image_filename2 = train_dir / "image_rgb2.jpg" # train image shutil.copyfile(image_file, image_filename2) image_filename3 = test_dir / "image_rgb3.jpg" # test image shutil.copyfile(image_file, image_filename3) train_image_metadata_filename = train_dir / f"metadata.{request.param}" image_metadata = ( textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice train image"} {"file_name": "image_rgb2.jpg", "caption": "Nice second train image"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,caption image_rgb.jpg,Nice train image image_rgb2.jpg,Nice second train image """ ) ) with open(train_image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) test_image_metadata_filename = test_dir / f"metadata.{request.param}" image_metadata = ( textwrap.dedent( """\ {"file_name": "image_rgb3.jpg", "caption": "Nice test image"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,caption image_rgb3.jpg,Nice test image """ ) ) with open(test_image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, image_file): from PIL import Image, ImageOps data_dir = tmp_path / "imagefolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) image_filename = archive_dir / "image_rgb.jpg" shutil.copyfile(image_file, image_filename) image_filename2 = subdir / "image_rgb2.jpg" # in subdir # make sure they're two different images # Indeed we won't be able to compare the image.filename, since the archive is not extracted in streaming mode ImageOps.flip(Image.open(image_file)).save(image_filename2) image_metadata_filename = archive_dir / "metadata.jsonl" image_metadata = textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} {"file_name": "subdir/image_rgb2.jpg", "caption": "Nice second image"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) shutil.make_archive(archive_dir, "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives @require_pil # check that labels are inferred correctly from dir names def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir): # there are no metadata.jsonl files in this test case imagefolder = ImageFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False) imagefolder.download_and_prepare() assert imagefolder.info.features == Features({"image": Image(), "label": ClassLabel(names=["cat", "dog"])}) dataset = list(imagefolder.as_dataset()["train"]) label_feature = imagefolder.info.features["label"] assert dataset[0]["label"] == label_feature._str2int["cat"] assert dataset[1]["label"] == label_feature._str2int["dog"] @require_pil @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_duplicated_label_key( image_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog ): cat_image_file, dog_image_file, image_metadata_file = image_files_with_labels_and_duplicated_label_key_in_metadata imagefolder = ImageFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=[cat_image_file, dog_image_file, image_metadata_file], cache_dir=cache_dir, ) if drop_labels is False: # infer labels from directories even if metadata files are found imagefolder.download_and_prepare() warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) assert warning_in_logs if drop_metadata is not True else not warning_in_logs dataset = imagefolder.as_dataset()["train"] assert imagefolder.info.features["label"] == ClassLabel(names=["cat", "dog"]) assert all(example["label"] in imagefolder.info.features["label"]._str2int.values() for example in dataset) else: imagefolder.download_and_prepare() dataset = imagefolder.as_dataset()["train"] if drop_metadata is not True: # labels are from metadata assert imagefolder.info.features["label"] == Value("string") assert all(example["label"] in ["Cat", "Dog"] for example in dataset) else: # drop both labels and metadata assert imagefolder.info.features == Features({"image": Image()}) assert all(example.keys() == {"image"} for example in dataset) @require_pil @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels): imagefolder = ImageFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata ) gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing the labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False generator = imagefolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"image", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"image"} and all(val is not None for val in example.values()) for _, example in generator ) @require_pil @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(image_file_with_metadata, drop_metadata, drop_labels): image_file, image_metadata_file = image_file_with_metadata imagefolder = ImageFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [image_file, image_metadata_file]} ) gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is (drop_labels is False) generator = imagefolder._generate_examples(**gen_kwargs) expected_columns = {"image"} if gen_kwargs["add_metadata"]: expected_columns.add("caption") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @require_pil @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_generate_examples_with_metadata_in_wrong_location(image_file, image_file_with_metadata, drop_metadata): _, image_metadata_file = image_file_with_metadata imagefolder = ImageFolder(drop_metadata=drop_metadata, data_files={"train": [image_file, image_metadata_file]}) gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = imagefolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"image"} and all(val is not None for val in example.values()) for _, example in generator ) @require_pil @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_generate_examples_with_metadata_that_misses_one_image( image_files_with_metadata_that_misses_one_image, drop_metadata ): image_file, image_file2, image_metadata_file = image_files_with_metadata_that_misses_one_image if not drop_metadata: features = Features({"image": Image(), "caption": Value("string")}) else: features = Features({"image": Image()}) imagefolder = ImageFolder( drop_metadata=drop_metadata, features=features, data_files={"train": [image_file, image_file2, image_metadata_file]}, ) gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = imagefolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"image"} and all(val is not None for val in example.values()) for _, example in generator ) @require_pil @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata): data_files = data_files_with_one_split_and_metadata imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir) imagefolder.download_and_prepare() datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_images = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_images # make sure each sample has its own image and metadata assert len({example["image"].filename for example in dataset}) == expected_num_of_images assert len({example["caption"] for example in dataset}) == expected_num_of_images assert all(example["caption"] is not None for example in dataset) @require_pil @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata): data_files = data_files_with_two_splits_and_metadata imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir) imagefolder.download_and_prepare() datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_images = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_images # make sure each sample has its own image and metadata assert len({example["image"].filename for example in dataset}) == expected_num_of_images assert len({example["caption"] for example in dataset}) == expected_num_of_images assert all(example["caption"] is not None for example in dataset) @require_pil @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): imagefolder = ImageFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) imagefolder.download_and_prepare() datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() for split, data_files in data_files_with_zip_archives.items(): num_of_archives = len(data_files) # the metadata file is inside the archive expected_num_of_images = 2 * num_of_archives assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_images # make sure each sample has its own image and metadata assert len({np.array(example["image"])[0, 0, 0] for example in dataset}) == expected_num_of_images assert len({example["caption"] for example in dataset}) == expected_num_of_images assert all(example["caption"] is not None for example in dataset) @require_pil def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, image_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(image_file, data_dir / "image_rgb.jpg") image_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file image_metadata = textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) imagefolder.download_and_prepare() dataset = imagefolder.as_dataset(split="train") # check that there are no metadata, since the metadata file name doesn't have the right name assert "caption" not in dataset.column_names @require_pil def test_data_files_with_wrong_image_file_name_column_in_metadata_file(cache_dir, tmp_path, image_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(image_file, data_dir / "image_rgb.jpg") image_metadata_filename = data_dir / "metadata.jsonl" image_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"bad_file_name": "image_rgb.jpg", "caption": "Nice image"} """ ) with open(image_metadata_filename, "w", encoding="utf-8") as f: f.write(image_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: imagefolder.download_and_prepare() assert "`file_name` must be present" in str(exc_info.value) @require_pil def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, image_file): data_dir = tmp_path / "data_dir_with_metadata_in_different_format" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(image_file, data_dir / "image_rgb.jpg") image_metadata_filename_jsonl = data_dir / "metadata.jsonl" image_metadata_jsonl = textwrap.dedent( """\ {"file_name": "image_rgb.jpg", "caption": "Nice image"} """ ) with open(image_metadata_filename_jsonl, "w", encoding="utf-8") as f: f.write(image_metadata_jsonl) image_metadata_filename_csv = data_dir / "metadata.csv" image_metadata_csv = textwrap.dedent( """\ file_name,caption image_rgb.jpg,Nice image """ ) with open(image_metadata_filename_csv, "w", encoding="utf-8") as f: f.write(image_metadata_csv) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: imagefolder.download_and_prepare() assert "metadata files with different extensions" in str(exc_info.value)
datasets/tests/packaged_modules/test_imagefolder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_imagefolder.py", "repo_id": "datasets", "token_count": 8692 }
75
import os import zipfile import pytest from datasets.utils.extract import ( Bzip2Extractor, Extractor, GzipExtractor, Lz4Extractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lz4, require_py7zr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def test_base_extractors( compression_format, is_archive, bz2_file, gz_file, lz4_file, seven_zip_file, tar_file, xz_file, zip_file, zstd_file, tmp_path, text_file, ): input_paths_and_base_extractors = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bz2_file, Bzip2Extractor), "gzip": (gz_file, GzipExtractor), "lz4": (lz4_file, Lz4Extractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } input_path, base_extractor = input_paths_and_base_extractors[compression_format] if input_path is None: reason = f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_py7zr.kwargs["reason"] elif compression_format == "lz4": reason += require_lz4.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) assert base_extractor.is_extractable(input_path) output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(input_path, output_path) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name extracted_file_content = file_path.read_text(encoding="utf-8") else: extracted_file_content = output_path.read_text(encoding="utf-8") expected_file_content = text_file.read_text(encoding="utf-8") assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def test_extractor( compression_format, is_archive, bz2_file, gz_file, lz4_file, seven_zip_file, tar_file, xz_file, zip_file, zstd_file, tmp_path, text_file, ): input_paths = { "7z": seven_zip_file, "bz2": bz2_file, "gzip": gz_file, "lz4": lz4_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } input_path = input_paths[compression_format] if input_path is None: reason = f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_py7zr.kwargs["reason"] elif compression_format == "lz4": reason += require_lz4.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) extractor_format = Extractor.infer_extractor_format(input_path) assert extractor_format is not None output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(input_path, output_path, extractor_format) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name extracted_file_content = file_path.read_text(encoding="utf-8") else: extracted_file_content = output_path.read_text(encoding="utf-8") expected_file_content = text_file.read_text(encoding="utf-8") assert extracted_file_content == expected_file_content @pytest.fixture def tar_file_with_dot_dot(tmp_path, text_file): import tarfile directory = tmp_path / "data_dot_dot" directory.mkdir() path = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(path, "w") as f: f.add(text_file, arcname=os.path.join("..", text_file.name)) return path @pytest.fixture def tar_file_with_sym_link(tmp_path): import tarfile directory = tmp_path / "data_sym_link" directory.mkdir() path = directory / "tar_file_with_sym_link.tar" os.symlink("..", directory / "subdir", target_is_directory=True) with tarfile.TarFile(path, "w") as f: f.add(str(directory / "subdir"), arcname="subdir") # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], ) def test_tar_extract_insecure_files( insecure_tar_file, error_log, tar_file_with_dot_dot, tar_file_with_sym_link, tmp_path, caplog ): insecure_tar_files = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } input_path = insecure_tar_files[insecure_tar_file] output_path = tmp_path / "extracted" TarExtractor.extract(input_path, output_path) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def test_is_zipfile_false_positive(tmpdir): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number not_a_zip_file = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 data = ( b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb") as f: f.write(data) assert zipfile.is_zipfile(str(not_a_zip_file)) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(not_a_zip_file) # but we're right
datasets/tests/test_extract.py/0
{ "file_path": "datasets/tests/test_extract.py", "repo_id": "datasets", "token_count": 2984 }
76
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def test_offline_with_timeout(): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT): with pytest.raises(RequestWouldHangIndefinitelyError): requests.request("GET", "https://huggingface.co") with pytest.raises(requests.exceptions.ConnectTimeout): requests.request("GET", "https://huggingface.co", timeout=1.0) @pytest.mark.integration def test_offline_with_connection_error(): with offline(OfflineSimulationMode.CONNECTION_FAILS): with pytest.raises(requests.exceptions.ConnectionError): requests.request("GET", "https://huggingface.co") def test_offline_with_datasets_offline_mode_enabled(): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1): with pytest.raises(ConnectionError): http_head("https://huggingface.co")
datasets/tests/test_offline_util.py/0
{ "file_path": "datasets/tests/test_offline_util.py", "repo_id": "datasets", "token_count": 382 }
77
<jupyter_start><jupyter_text>Unit 8: Proximal Policy Gradient (PPO) with PyTorch πŸ€–In this notebook, you'll learn to **code your PPO agent from scratch with PyTorch using CleanRL implementation as model**.To test its robustness, we're going to train it in:- [LunarLander-v2 πŸš€](https://www.gymlibrary.dev/environments/box2d/lunar_lander/) ⬇️ Here is an example of what you will achieve. ⬇️<jupyter_code>%%html <video controls autoplay><source src="https://huggingface.co/sb3/ppo-LunarLander-v2/resolve/main/replay.mp4" type="video/mp4"></video><jupyter_output><empty_output><jupyter_text>We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook πŸ†At the end of the notebook, you will:- Be able to **code your PPO agent from scratch using PyTorch**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score πŸ”₯. This notebook is from the Deep Reinforcement Learning CourseIn this free course, you will:- πŸ“– Study Deep Reinforcement Learning in **theory and practice**.- πŸ§‘β€πŸ’» Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- πŸ€– Train **agents in unique environments** Don’t forget to **sign up to the course** (we are collecting your email to be able toΒ **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us πŸ‘‰πŸ» https://discord.gg/ydHrjt3WP5 Prerequisites πŸ—οΈBefore diving into the notebook, you need to:πŸ”² πŸ“š Study [PPO by reading Unit 8](https://huggingface.co/deep-rl-course/unit8/introduction) πŸ€— To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push one model, we don't ask for a minimal result but we **advise you to try different hyperparameters settings to get better results**.If you don't find your model, **go to the bottom of the page and click on the refresh button**For more information about the certification process, check this section πŸ‘‰ https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU πŸ’ͺ- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display πŸ”½During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen πŸ–₯<jupyter_code>!pip install setuptools==65.5.0 %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !apt install swig cmake !pip install pyglet==1.5 !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install dependencies πŸ”½For this exercise, we use `gym==0.22`.<jupyter_code>!pip install gym==0.22 !pip install imageio-ffmpeg !pip install huggingface_hub !pip install gym[box2d]==0.22<jupyter_output><empty_output><jupyter_text>Let's code PPO from scratch with Costa Huang tutorial- For the core implementation of PPO we're going to use the excellent [Costa Huang](https://costa.sh/) tutorial.- In addition to the tutorial, to go deeper you can read the 37 core implementation details: https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/πŸ‘‰ The video tutorial: https://youtu.be/MEt6rrxH8W4<jupyter_code>from IPython.display import HTML HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/MEt6rrxH8W4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')<jupyter_output><empty_output><jupyter_text>- The best is to code first on the cell below, this way, if you kill the machine **you don't loose the implementation**.<jupyter_code>### Your code here:<jupyter_output><empty_output><jupyter_text>Add the Hugging Face Integration πŸ€—- In order to push our model to the Hub, we need to define a function `package_to_hub` - Add dependencies we need to push our model to the Hub<jupyter_code>from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer()<jupyter_output><empty_output><jupyter_text>- Add new argument in `parse_args()` function to define the repo-id where we want to push the model.<jupyter_code># Adding HuggingFace argument parser.add_argument("--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}")<jupyter_output><empty_output><jupyter_text>- Next, we add the methods needed to push the model to the Hub- These methods will: - `_evalutate_agent()`: evaluate the agent. - `_generate_model_card()`: generate the model card of your agent. - `_record_video()`: record a video of your agent.<jupyter_code>def package_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token= None, logs=None ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card("PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step(action.cpu().numpy()) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = '\n'.join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters ```python {converted_str} ``` """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course" ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir)<jupyter_output><empty_output><jupyter_text>- Finally, we call this function at the end of the PPO training<jupyter_code># Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub(repo_id = args.repo_id, model = agent, # The model we want to save hyperparameters = args, eval_env = gym.make(args.env_id), logs= f"runs/{run_name}", )<jupyter_output><empty_output><jupyter_text>- Here's what look the ppo.py final file<jupyter_code># docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppopy import argparse import os import random import time from distutils.util import strtobool import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.distributions.categorical import Categorical from torch.utils.tensorboard import SummaryWriter from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer() def parse_args(): # fmt: off parser = argparse.ArgumentParser() parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), help="the name of this experiment") parser.add_argument("--seed", type=int, default=1, help="seed of the experiment") parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, `torch.backends.cudnn.deterministic=False`") parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, cuda will be enabled by default") parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="if toggled, this experiment will be tracked with Weights and Biases") parser.add_argument("--wandb-project-name", type=str, default="cleanRL", help="the wandb's project name") parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project") parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="weather to capture videos of the agent performances (check out `videos` folder)") # Algorithm specific arguments parser.add_argument("--env-id", type=str, default="CartPole-v1", help="the id of the environment") parser.add_argument("--total-timesteps", type=int, default=50000, help="total timesteps of the experiments") parser.add_argument("--learning-rate", type=float, default=2.5e-4, help="the learning rate of the optimizer") parser.add_argument("--num-envs", type=int, default=4, help="the number of parallel game environments") parser.add_argument("--num-steps", type=int, default=128, help="the number of steps to run in each environment per policy rollout") parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggle learning rate annealing for policy and value networks") parser.add_argument("--gae", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Use GAE for advantage computation") parser.add_argument("--gamma", type=float, default=0.99, help="the discount factor gamma") parser.add_argument("--gae-lambda", type=float, default=0.95, help="the lambda for the general advantage estimation") parser.add_argument("--num-minibatches", type=int, default=4, help="the number of mini-batches") parser.add_argument("--update-epochs", type=int, default=4, help="the K epochs to update the policy") parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles advantages normalization") parser.add_argument("--clip-coef", type=float, default=0.2, help="the surrogate clipping coefficient") parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles whether or not to use a clipped loss for the value function, as per the paper.") parser.add_argument("--ent-coef", type=float, default=0.01, help="coefficient of the entropy") parser.add_argument("--vf-coef", type=float, default=0.5, help="coefficient of the value function") parser.add_argument("--max-grad-norm", type=float, default=0.5, help="the maximum norm for the gradient clipping") parser.add_argument("--target-kl", type=float, default=None, help="the target KL divergence threshold") # Adding HuggingFace argument parser.add_argument("--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}") args = parser.parse_args() args.batch_size = int(args.num_envs * args.num_steps) args.minibatch_size = int(args.batch_size // args.num_minibatches) # fmt: on return args def package_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token= None, logs=None ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card("PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step(action.cpu().numpy()) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = '\n'.join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters ```python {converted_str} ``` """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course" ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir) def make_env(env_id, seed, idx, capture_video, run_name): def thunk(): env = gym.make(env_id) env = gym.wrappers.RecordEpisodeStatistics(env) if capture_video: if idx == 0: env = gym.wrappers.RecordVideo(env, f"videos/{run_name}") env.seed(seed) env.action_space.seed(seed) env.observation_space.seed(seed) return env return thunk def layer_init(layer, std=np.sqrt(2), bias_const=0.0): torch.nn.init.orthogonal_(layer.weight, std) torch.nn.init.constant_(layer.bias, bias_const) return layer class Agent(nn.Module): def __init__(self, envs): super().__init__() self.critic = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, 1), std=1.0), ) self.actor = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, envs.single_action_space.n), std=0.01), ) def get_value(self, x): return self.critic(x) def get_action_and_value(self, x, action=None): logits = self.actor(x) probs = Categorical(logits=logits) if action is None: action = probs.sample() return action, probs.log_prob(action), probs.entropy(), self.critic(x) if __name__ == "__main__": args = parse_args() run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}" if args.track: import wandb wandb.init( project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=run_name, monitor_gym=True, save_code=True, ) writer = SummaryWriter(f"runs/{run_name}") writer.add_text( "hyperparameters", "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])), ) # TRY NOT TO MODIFY: seeding random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = args.torch_deterministic device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") # env setup envs = gym.vector.SyncVectorEnv( [make_env(args.env_id, args.seed + i, i, args.capture_video, run_name) for i in range(args.num_envs)] ) assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported" agent = Agent(envs).to(device) optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5) # ALGO Logic: Storage setup obs = torch.zeros((args.num_steps, args.num_envs) + envs.single_observation_space.shape).to(device) actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device) logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device) rewards = torch.zeros((args.num_steps, args.num_envs)).to(device) dones = torch.zeros((args.num_steps, args.num_envs)).to(device) values = torch.zeros((args.num_steps, args.num_envs)).to(device) # TRY NOT TO MODIFY: start the game global_step = 0 start_time = time.time() next_obs = torch.Tensor(envs.reset()).to(device) next_done = torch.zeros(args.num_envs).to(device) num_updates = args.total_timesteps // args.batch_size for update in range(1, num_updates + 1): # Annealing the rate if instructed to do so. if args.anneal_lr: frac = 1.0 - (update - 1.0) / num_updates lrnow = frac * args.learning_rate optimizer.param_groups[0]["lr"] = lrnow for step in range(0, args.num_steps): global_step += 1 * args.num_envs obs[step] = next_obs dones[step] = next_done # ALGO LOGIC: action logic with torch.no_grad(): action, logprob, _, value = agent.get_action_and_value(next_obs) values[step] = value.flatten() actions[step] = action logprobs[step] = logprob # TRY NOT TO MODIFY: execute the game and log data. next_obs, reward, done, info = envs.step(action.cpu().numpy()) rewards[step] = torch.tensor(reward).to(device).view(-1) next_obs, next_done = torch.Tensor(next_obs).to(device), torch.Tensor(done).to(device) for item in info: if "episode" in item.keys(): print(f"global_step={global_step}, episodic_return={item['episode']['r']}") writer.add_scalar("charts/episodic_return", item["episode"]["r"], global_step) writer.add_scalar("charts/episodic_length", item["episode"]["l"], global_step) break # bootstrap value if not done with torch.no_grad(): next_value = agent.get_value(next_obs).reshape(1, -1) if args.gae: advantages = torch.zeros_like(rewards).to(device) lastgaelam = 0 for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done nextvalues = next_value else: nextnonterminal = 1.0 - dones[t + 1] nextvalues = values[t + 1] delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t] advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam returns = advantages + values else: returns = torch.zeros_like(rewards).to(device) for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done next_return = next_value else: nextnonterminal = 1.0 - dones[t + 1] next_return = returns[t + 1] returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return advantages = returns - values # flatten the batch b_obs = obs.reshape((-1,) + envs.single_observation_space.shape) b_logprobs = logprobs.reshape(-1) b_actions = actions.reshape((-1,) + envs.single_action_space.shape) b_advantages = advantages.reshape(-1) b_returns = returns.reshape(-1) b_values = values.reshape(-1) # Optimizing the policy and value network b_inds = np.arange(args.batch_size) clipfracs = [] for epoch in range(args.update_epochs): np.random.shuffle(b_inds) for start in range(0, args.batch_size, args.minibatch_size): end = start + args.minibatch_size mb_inds = b_inds[start:end] _, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions.long()[mb_inds]) logratio = newlogprob - b_logprobs[mb_inds] ratio = logratio.exp() with torch.no_grad(): # calculate approx_kl http://joschu.net/blog/kl-approx.html old_approx_kl = (-logratio).mean() approx_kl = ((ratio - 1) - logratio).mean() clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()] mb_advantages = b_advantages[mb_inds] if args.norm_adv: mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8) # Policy loss pg_loss1 = -mb_advantages * ratio pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef) pg_loss = torch.max(pg_loss1, pg_loss2).mean() # Value loss newvalue = newvalue.view(-1) if args.clip_vloss: v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2 v_clipped = b_values[mb_inds] + torch.clamp( newvalue - b_values[mb_inds], -args.clip_coef, args.clip_coef, ) v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2 v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped) v_loss = 0.5 * v_loss_max.mean() else: v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean() entropy_loss = entropy.mean() loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm) optimizer.step() if args.target_kl is not None: if approx_kl > args.target_kl: break y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy() var_y = np.var(y_true) explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y # TRY NOT TO MODIFY: record rewards for plotting purposes writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step) writer.add_scalar("losses/value_loss", v_loss.item(), global_step) writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step) writer.add_scalar("losses/entropy", entropy_loss.item(), global_step) writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step) writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step) writer.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step) writer.add_scalar("losses/explained_variance", explained_var, global_step) print("SPS:", int(global_step / (time.time() - start_time))) writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step) envs.close() writer.close() # Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub(repo_id = args.repo_id, model = agent, # The model we want to save hyperparameters = args, eval_env = gym.make(args.env_id), logs= f"runs/{run_name}", )<jupyter_output><empty_output><jupyter_text>To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➑ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token - Run the cell below and paste the token<jupyter_code>from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` Let's start the training πŸ”₯- ⚠️ ⚠️ ⚠️ Don't use **the same repo id with the one you used for the Unit 1** - Now that you've coded from scratch PPO and added the Hugging Face Integration, we're ready to start the training πŸ”₯ - First, you need to copy all your code to a file you create called `ppo.py` - Now we just need to run this python script using `python .py` with the additional parameters we defined with `argparse`- You should modify more hyperparameters otherwise the training will not be super stable.<jupyter_code>!python ppo.py --env-id="LunarLander-v2" --repo-id="YOUR_REPO_ID" --total-timesteps=50000<jupyter_output><empty_output>
deep-rl-class/notebooks/unit8/unit8_part1.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit8/unit8_part1.ipynb", "repo_id": "deep-rl-class", "token_count": 15492 }
78
# Quiz [[quiz]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What is Reinforcement Learning? <details> <summary>Solution</summary> Reinforcement learning is a **framework for solving control tasks (also called decision problems)** by building agents that learn from the environment by interacting with it through trial and error and **receiving rewards (positive or negative) as unique feedback**. </details> ### Q2: Define the RL Loop <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rl-loop-ex.jpg" alt="Exercise RL Loop"/> At every step: - Our Agent receives ______ from the environment - Based on that ______ the Agent takes an ______ - Our Agent will move to the right - The Environment goes to a ______ - The Environment gives a ______ to the Agent <Question choices={[ { text: "an action a0, action a0, state s0, state s1, reward r1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." }, { text: "state s0, state s0, action a0, new state s1, reward r1", explain: "", correct: true }, { text: "a state s0, state s0, action a0, state s1, action a1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." } ]} /> ### Q3: What's the difference between a state and an observation? <Question choices={[ { text: "The state is a complete description of the state of the world (there is no hidden information)", explain: "", correct: true }, { text: "The state is a partial description of the state", explain: "" }, { text: "The observation is a complete description of the state of the world (there is no hidden information)", explain: "" }, { text: "The observation is a partial description of the state", explain: "", correct: true }, { text: "We receive a state when we play with chess environment", explain: "Since we have access to the whole checkboard information.", correct: true }, { text: "We receive an observation when we play with chess environment", explain: "Since we have access to the whole checkboard information." }, { text: "We receive a state when we play with Super Mario Bros", explain: "We only see a part of the level close to the player, so we receive an observation." }, { text: "We receive an observation when we play with Super Mario Bros", explain: "We only see a part of the level close to the player.", correct: true } ]} /> ### Q4: A task is an instance of a Reinforcement Learning problem. What are the two types of tasks? <Question choices={[ { text: "Episodic", explain: "In Episodic task, we have a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ending when you’re killed or you reached the end of the level.", correct: true }, { text: "Recursive", explain: "" }, { text: "Adversarial", explain: "" }, { text: "Continuing", explain: "Continuing tasks are tasks that continue forever (no terminal state). In this case, the agent must learn how to choose the best actions and simultaneously interact with the environment.", correct: true } ]} /> ### Q5: What is the exploration/exploitation tradeoff? <details> <summary>Solution</summary> In Reinforcement Learning, we need to **balance how much we explore the environment and how much we exploit what we know about the environment**. - *Exploration* is exploring the environment by **trying random actions in order to find more information about the environment**. - *Exploitation* is **exploiting known information to maximize the reward**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%"> </details> ### Q6: What is a policy? <details> <summary>Solution</summary> - The Policy Ο€ **is the brain of our Agent**. It’s the function that tells us what action to take given the state we are in. So it defines the agent’s behavior at a given time. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_1.jpg" alt="Policy"> </details> ### Q7: What are value-based methods? <details> <summary>Solution</summary> - Value-based methods is one of the main approaches for solving RL problems. - In Value-based methods, instead of training a policy function, **we train a value function that maps a state to the expected value of being at that state**. </details> ### Q8: What are policy-based methods? <details> <summary>Solution</summary> - In *Policy-Based Methods*, we learn a **policy function directly**. - This policy function will **map from each state to the best corresponding action at that state**. Or a **probability distribution over the set of possible actions at that state**. </details> Congrats on finishing this Quiz πŸ₯³, if you missed some elements, take time to read again the chapter to reinforce (😏) your knowledge, but **do not worry**: during the course we'll go over again of these concepts, and you'll **reinforce your theoretical knowledge with hands-on**.
deep-rl-class/units/en/unit1/quiz.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/quiz.mdx", "repo_id": "deep-rl-class", "token_count": 1866 }
79
# Q-Learning Recap [[q-learning-recap]] *Q-Learning* **is the RL algorithm that** : - Trains a *Q-function*, an **action-value function** encoded, in internal memory, by a *Q-table* **containing all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q function" width="100%"/> - When the training is done, **we have an optimal Q-function, or, equivalently, an optimal Q-table.** - And if we **have an optimal Q-function**, we have an optimal policy, since we **know, for each state, the best action to take.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" width="100%"/> But, in the beginning,Β our **Q-table is useless since it gives arbitrary values for each state-action pairΒ (most of the time we initialize the Q-table to 0 values)**. But, as weΒ explore the environment and update our Q-table it will give us a better and better approximation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/q-learning.jpeg" alt="q-learning.jpeg" width="100%"/> This is the Q-Learning pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/>
deep-rl-class/units/en/unit2/q-learning-recap.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning-recap.mdx", "repo_id": "deep-rl-class", "token_count": 505 }
80
# Conclusion **Congrats on finishing this unit**!Β There was a lot of information. And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub πŸ₯³. Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents to compete against other agents in a snowball fight and a soccer game.** Sound fun? See you next time! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please πŸ‘‰ [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome πŸ€—
deep-rl-class/units/en/unit4/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 250 }
81
# The SnowballTarget Environment <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget.gif" alt="SnowballTarget"/> SnowballTarget is an environment we created at Hugging Face using assets from [Kay Lousberg](https://kaylousberg.com/). We have an optional section at the end of this Unit **if you want to learn to use Unity and create your environments**. ## The agent's Goal The first agent you're going to train is called Julien the bear 🐻. Julien is trained **to hit targets with snowballs**. The Goal in this environment is that Julien **hits as many targets as possible in the limited time** (1000 timesteps). It will need **to place itself correctly in relation to the target and shoot**to do that. In addition, to avoid "snowball spamming" (aka shooting a snowball every timestep), **Julien has a "cool off" system** (it needs to wait 0.5 seconds after a shoot to be able to shoot again). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/cooloffsystem.gif" alt="Cool Off System"/> <figcaption>The agent needs to wait 0.5s before being able to shoot a snowball again</figcaption> </figure> ## The reward function and the reward engineering problem The reward function is simple. **The environment gives a +1 reward every time the agent's snowball hits a target**. Because the agent's Goal is to maximize the expected cumulative reward, **it will try to hit as many targets as possible**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_reward.png" alt="Reward system"/> We could have a more complex reward function (with a penalty to push the agent to go faster, for example). But when you design an environment, you need to avoid the *reward engineering problem*, which is having a too complex reward function to force your agent to behave as you want it to do. Why? Because by doing that, **you might miss interesting strategies that the agent will find with a simpler reward function**. In terms of code, it looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget-reward-code.png" alt="Reward"/> ## The observation space Regarding observations, we don't use normal vision (frame), but **we use raycasts**. Think of raycasts as lasers that will detect if they pass through an object. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/raycasts.png" alt="Raycasts"/> <figcaption>Source: <a href="https://github.com/Unity-Technologies/ml-agents">ML-Agents documentation</a></figcaption> </figure> In this environment, our agent has multiple set of raycasts: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowball_target_raycasts.png" alt="Raycasts"/> In addition to raycasts, the agent gets a "can I shoot" bool as observation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget-obs-code.png" alt="Obs"/> ## The action space The action space is discrete: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_action_space.png" alt="Action Space"/>
deep-rl-class/units/en/unit5/snowball-target.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/snowball-target.mdx", "repo_id": "deep-rl-class", "token_count": 1019 }
82
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## PPO Explained - [Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization by Daniel Bick](https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf) - [What is the way to understand Proximal Policy Optimization Algorithm in RL?](https://stackoverflow.com/questions/46422845/what-is-the-way-to-understand-proximal-policy-optimization-algorithm-in-rl) - [Foundations of Deep RL Series, L4 TRPO and PPO by Pieter Abbeel](https://youtu.be/KjWF8VIMGiY) - [OpenAI PPO Blogpost](https://openai.com/blog/openai-baselines-ppo/) - [Spinning Up RL PPO](https://spinningup.openai.com/en/latest/algorithms/ppo.html) - [Paper Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347) ## PPO Implementation details - [The 37 Implementation Details of Proximal Policy Optimization](https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/) - [Part 1 of 3 β€” Proximal Policy Optimization Implementation: 11 Core Implementation Details](https://www.youtube.com/watch?v=MEt6rrxH8W4) ## Importance Sampling - [Importance Sampling Explained](https://youtu.be/C3p2wI4RAi8)
deep-rl-class/units/en/unit8/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 418 }
83
# Introduction [[introduction]] One of the most critical tasks in Deep Reinforcement Learning is to **find a good set of training hyperparameters**. <img src="https://raw.githubusercontent.com/optuna/optuna/master/docs/image/optuna-logo.png" alt="Optuna Logo"/> [Optuna](https://optuna.org/) is a library that helps you to automate the search. In this Unit, we'll study a **little bit of the theory behind automatic hyperparameter tuning**. We'll first try to optimize the parameters of the DQN studied in the last unit manually. We'll then **learn how to automate the search using Optuna**.
deep-rl-class/units/en/unitbonus2/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 156 }
84
import argparse import sys sys.path.append(".") from base_classes import T2IAdapterBenchmark, T2IAdapterSDXLBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="TencentARC/t2iadapter_canny_sd14v1", choices=["TencentARC/t2iadapter_canny_sd14v1", "TencentARC/t2i-adapter-canny-sdxl-1.0"], ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = ( T2IAdapterBenchmark(args) if args.ckpt == "TencentARC/t2iadapter_canny_sd14v1" else T2IAdapterSDXLBenchmark(args) ) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_t2i_adapter.py/0
{ "file_path": "diffusers/benchmarks/benchmark_t2i_adapter.py", "repo_id": "diffusers", "token_count": 393 }
85
# docstyle-ignore INSTALL_CONTENT = """ # Diffusers installation ! pip install diffusers transformers datasets accelerate # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/diffusers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
diffusers/docs/source/_config.py/0
{ "file_path": "diffusers/docs/source/_config.py", "repo_id": "diffusers", "token_count": 102 }
86
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKL The variational autoencoder (VAE) model with KL loss was introduced in [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114v11) by Diederik P. Kingma and Max Welling. The model is used in πŸ€— Diffusers to encode images into latents and to decode latent representations into images. The abstract from the paper is: *How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions are two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.* ## Loading from the original format By default the [`AutoencoderKL`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded from the original format using [`FromOriginalVAEMixin.from_single_file`] as follows: ```py from diffusers import AutoencoderKL url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be a local file model = AutoencoderKL.from_single_file(url) ``` ## AutoencoderKL [[autodoc]] AutoencoderKL - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput ## FlaxAutoencoderKL [[autodoc]] FlaxAutoencoderKL ## FlaxAutoencoderKLOutput [[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput ## FlaxDecoderOutput [[autodoc]] models.vae_flax.FlaxDecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl.md", "repo_id": "diffusers", "token_count": 785 }
87
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # aMUSEd aMUSEd was introduced in [aMUSEd: An Open MUSE Reproduction](https://huggingface.co/papers/2401.01808) by Suraj Patil, William Berman, Robin Rombach, and Patrick von Platen. Amused is a lightweight text to image model based off of the [MUSE](https://arxiv.org/abs/2301.00704) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once. Amused is a vqvae token based transformer that can generate an image in fewer forward passes than many diffusion models. In contrast with muse, it uses the smaller text encoder CLIP-L/14 instead of t5-xxl. Due to its small parameter count and few forward pass generation process, amused can generate many images quickly. This benefit is seen particularly at larger batch sizes. The abstract from the paper is: *We present aMUSEd, an open-source, lightweight masked image model (MIM) for text-to-image generation based on MUSE. With 10 percent of MUSE's parameters, aMUSEd is focused on fast image generation. We believe MIM is under-explored compared to latent diffusion, the prevailing approach for text-to-image generation. Compared to latent diffusion, MIM requires fewer inference steps and is more interpretable. Additionally, MIM can be fine-tuned to learn additional styles with only a single image. We hope to encourage further exploration of MIM by demonstrating its effectiveness on large-scale text-to-image generation and releasing reproducible training code. We also release checkpoints for two models which directly produce images at 256x256 and 512x512 resolutions.* | Model | Params | |-------|--------| | [amused-256](https://huggingface.co/amused/amused-256) | 603M | | [amused-512](https://huggingface.co/amused/amused-512) | 608M | ## AmusedPipeline [[autodoc]] AmusedPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention [[autodoc]] AmusedImg2ImgPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention [[autodoc]] AmusedInpaintPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention
diffusers/docs/source/en/api/pipelines/amused.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/amused.md", "repo_id": "diffusers", "token_count": 772 }
88
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ONNX Runtime πŸ€— [Optimum](https://github.com/huggingface/optimum) provides a Stable Diffusion pipeline compatible with ONNX Runtime. You'll need to install πŸ€— Optimum with the following command for ONNX Runtime support: ```bash pip install -q optimum["onnxruntime"] ``` This guide will show you how to use the Stable Diffusion and Stable Diffusion XL (SDXL) pipelines with ONNX Runtime. ## Stable Diffusion To load and run inference, use the [`~optimum.onnxruntime.ORTStableDiffusionPipeline`]. If you want to load a PyTorch model and convert it to the ONNX format on-the-fly, set `export=True`: ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] pipeline.save_pretrained("./onnx-stable-diffusion-v1-5") ``` <Tip warning={true}> Generating multiple prompts in a batch seems to take too much memory. While we look into it, you may need to iterate instead of batching. </Tip> To export the pipeline in the ONNX format offline and use it later for inference, use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command: ```bash optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/ ``` Then to perform inference (you don't have to specify `export=True` again): ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "sd_v15_onnx" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/stable_diffusion_v1_5_ort_sail_boat.png"> </div> You can find more examples in πŸ€— Optimum [documentation](https://huggingface.co/docs/optimum/), and Stable Diffusion is supported for text-to-image, image-to-image, and inpainting. ## Stable Diffusion XL To load and run inference with SDXL, use the [`~optimum.onnxruntime.ORTStableDiffusionXLPipeline`]: ```python from optimum.onnxruntime import ORTStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = ORTStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` To export the pipeline in the ONNX format and use it later for inference, use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command: ```bash optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/ ``` SDXL in the ONNX format is supported for text-to-image and image-to-image.
diffusers/docs/source/en/optimization/onnx.md/0
{ "file_path": "diffusers/docs/source/en/optimization/onnx.md", "repo_id": "diffusers", "token_count": 1194 }
89
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.2 <Tip warning={true}> This script is experimental, and it's easy to overfit and run into issues like catastrophic forgetting. Try exploring different hyperparameters to get the best results on your dataset. </Tip> Kandinsky 2.2 is a multilingual text-to-image model capable of producing more photorealistic images. The model includes an image prior model for creating image embeddings from text prompts, and a decoder model that generates images based on the prior model's embeddings. That's why you'll find two separate scripts in Diffusers for Kandinsky 2.2, one for training the prior model and one for training the decoder model. You can train both models separately, but to get the best results, you should train both the prior and decoder models. Depending on your GPU, you may need to enable `gradient_checkpointing` (⚠️ not supported for the prior model!), `mixed_precision`, and `gradient_accumulation_steps` to help fit the model into memory and to speedup training. You can reduce your memory-usage even more by enabling memory-efficient attention with [xFormers](../optimization/xformers) (version [v0.0.16](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212) fails for training on some GPUs so you may need to install a development version instead). This guide explores the [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py) and the [train_text_to_image_decoder.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py) scripts to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the scripts, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/kandinsky2_2/text_to_image pip install -r requirements.txt ``` <Tip> πŸ€— Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the πŸ€— Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an πŸ€— Accelerate environment: ```bash accelerate config ``` To setup a default πŸ€— Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training scripts that are important for understanding how to modify it, but it doesn't cover every aspect of the scripts in detail. If you're interested in learning more, feel free to read through the scripts and let us know if you have any questions or concerns. </Tip> ## Script parameters The training scripts provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190) function. The training scripts provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_text_to_image_prior.py \ --mixed_precision="fp16" ``` Most of the parameters are identical to the parameters in the [Text-to-image](text2image#script-parameters) training guide, so let's get straight to a walkthrough of the Kandinsky training scripts! ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_text_to_image_prior.py \ --snr_gamma=5.0 ``` ## Training script The training script is also similar to the [Text-to-image](text2image#training-script) training guide, but it's been modified to support training the prior and decoder models. This guide focuses on the code that is unique to the Kandinsky 2.2 training scripts. <hfoptions id="script"> <hfoption id="prior model"> The [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441) function contains the code for preparing the dataset and training the model. One of the main differences you'll notice right away is that the training script also loads a [`~transformers.CLIPImageProcessor`] - in addition to a scheduler and tokenizer - for preprocessing images and a [`~transformers.CLIPVisionModelWithProjection`] model for encoding the images: ```py noise_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", prediction_type="sample") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="tokenizer") with ContextManagers(deepspeed_zero_init_disabled_context_manager()): image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() text_encoder = CLIPTextModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype ).eval() ``` Kandinsky uses a [`PriorTransformer`] to generate the image embeddings, so you'll want to setup the optimizer to learn the prior mode's parameters. ```py prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior") prior.train() optimizer = optimizer_cls( prior.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Next, the input captions are tokenized, and images are [preprocessed](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632) by the [`~transformers.CLIPImageProcessor`]: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples) return examples ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718) converts the input images into latents, adds noise to the image embeddings, and makes a prediction: ```py model_pred = prior( noisy_latents, timestep=timesteps, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. </hfoption> <hfoption id="decoder model"> The [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) function contains the code for preparing the dataset and training the model. Unlike the prior model, the decoder initializes a [`VQModel`] to decode the latents into images and it uses a [`UNet2DConditionModel`]: ```py with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ``` Next, the script includes several image transforms and a [preprocessing](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622) function for applying the transforms to the images and returning the pixel values: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples ``` Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706) handles converting the images to latents, adding noise, and predicting the noise residual. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ```py model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] ``` </hfoption> </hfoptions> ## Launch the script Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! πŸš€ You'll train on the [PokΓ©mon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own PokΓ©mon, but you can also create and train on your own dataset by following the [Create a dataset for training](create_dataset) guide. Set the environment variable `DATASET_NAME` to the name of the dataset on the Hub or if you're training on your own files, set the environment variable `TRAIN_DIR` to a path to your dataset. If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip> To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. You’ll also need to add the `--validation_prompt` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results. </Tip> <hfoptions id="training-inference"> <hfoption id="prior model"> ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-prior-pokemon-model" ``` </hfoption> <hfoption id="decoder model"> ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-decoder-pokemon-model" ``` </hfoption> </hfoptions> Once training is finished, you can use your newly trained model for inference! <hfoptions id="training-inference"> <hfoption id="prior model"> ```py from diffusers import AutoPipelineForText2Image, DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16) prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()} pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt="A robot pokemon, 4k photo" image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0] ``` <Tip> Feel free to replace `kandinsky-community/kandinsky-2-2-decoder` with your own trained decoder checkpoint! </Tip> </hfoption> <hfoption id="decoder model"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt="A robot pokemon, 4k photo" image = pipeline(prompt=prompt).images[0] ``` For the decoder model, you can also perform inference from a saved checkpoint which can be useful for viewing intermediate results. In this case, load the checkpoint into the UNet: ```py from diffusers import AutoPipelineForText2Image, UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint-<N>/unet") pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() image = pipeline(prompt="A robot pokemon, 4k photo").images[0] ``` </hfoption> </hfoptions> ## Next steps Congratulations on training a Kandinsky 2.2 model! To learn more about how to use your new model, the following guides may be helpful: - Read the [Kandinsky](../using-diffusers/kandinsky) guide to learn how to use it for a variety of different tasks (text-to-image, image-to-image, inpainting, interpolation), and how it can be combined with a ControlNet. - Check out the [DreamBooth](dreambooth) and [LoRA](lora) training guides to learn how to train a personalized Kandinsky model with just a few example images. These two training techniques can even be combined!
diffusers/docs/source/en/training/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/training/kandinsky.md", "repo_id": "diffusers", "token_count": 5046 }
90
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-image [[open-in-colab]] When you think of diffusion models, text-to-image is usually one of the first things that come to mind. Text-to-image generates an image from a text description (for example, "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k") which is also known as a *prompt*. From a very high level, a diffusion model takes a prompt and some random initial noise, and iteratively removes the noise to construct an image. The *denoising* process is guided by the prompt, and once the denoising process ends after a predetermined number of time steps, the image representation is decoded into an image. <Tip> Read the [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) blog post to learn more about how a latent diffusion model works. </Tip> You can generate images from a prompt in πŸ€— Diffusers in two steps: 1. Load a checkpoint into the [`AutoPipelineForText2Image`] class, which automatically detects the appropriate pipeline class to use based on the checkpoint: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") ``` 2. Pass a prompt to the pipeline to generate an image: ```py image = pipeline( "stained glass of darth vader, backlight, centered composition, masterpiece, photorealistic, 8k" ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-vader.png"/> </div> ## Popular models The most common text-to-image models are [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [Stable Diffusion XL (SDXL)](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder). There are also ControlNet models or adapters that can be used with text-to-image models for more direct control in generating images. The results from each model are slightly different because of their architecture and training process, but no matter which model you choose, their usage is more or less the same. Let's use the same prompt for each model and compare their results. ### Stable Diffusion v1.5 [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) is a latent diffusion model initialized from [Stable Diffusion v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4), and finetuned for 595K steps on 512x512 images from the LAION-Aesthetics V2 dataset. You can use this model like: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### Stable Diffusion XL SDXL is a much larger version of the previous Stable Diffusion models, and involves a two-stage model process that adds even more details to an image. It also includes some additional *micro-conditionings* to generate high-quality images centered subjects. Take a look at the more comprehensive [SDXL](sdxl) guide to learn more about how to use it. In general, you can use SDXL like: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### Kandinsky 2.2 The Kandinsky model is a bit different from the Stable Diffusion models because it also uses an image prior model to create embeddings that are used to better align text and images in the diffusion model. The easiest way to use Kandinsky 2.2 is: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### ControlNet ControlNet models are auxiliary models or adapters that are finetuned on top of text-to-image models, such as [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5). Using ControlNet models in combination with text-to-image models offers diverse options for more explicit control over how to generate an image. With ControlNet, you add an additional conditioning input image to the model. For example, if you provide an image of a human pose (usually represented as multiple keypoints that are connected into a skeleton) as a conditioning input, the model generates an image that follows the pose of the image. Check out the more in-depth [ControlNet](controlnet) guide to learn more about other conditioning inputs and how to use them. In this example, let's condition the ControlNet with a human pose estimation image. Load the ControlNet model pretrained on human pose estimations: ```py from diffusers import ControlNetModel, AutoPipelineForText2Image from diffusers.utils import load_image import torch controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pose_image = load_image("https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png") ``` Pass the `controlnet` to the [`AutoPipelineForText2Image`], and provide the prompt and pose estimation image: ```py pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=pose_image, generator=generator).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion v1.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion XL</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Kandinsky 2.2</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-3.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet (pose conditioning)</figcaption> </div> </div> ## Configure pipeline parameters There are a number of parameters that can be configured in the pipeline that affect how an image is generated. You can change the image's output size, specify a negative prompt to improve image quality, and more. This section dives deeper into how to use these parameters. ### Height and width The `height` and `width` parameters control the height and width (in pixels) of the generated image. By default, the Stable Diffusion v1.5 model outputs 512x512 images, but you can change this to any size that is a multiple of 8. For example, to create a rectangular image: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", height=768, width=512 ).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-hw.png"/> </div> <Tip warning={true}> Other models may have different default image sizes depending on the image sizes in the training dataset. For example, SDXL's default image size is 1024x1024 and using lower `height` and `width` values may result in lower quality images. Make sure you check the model's API reference first! </Tip> ### Guidance scale The `guidance_scale` parameter affects how much the prompt influences image generation. A lower value gives the model "creativity" to generate images that are more loosely related to the prompt. Higher `guidance_scale` values push the model to follow the prompt more closely, and if this value is too high, you may observe some artifacts in the generated image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", guidance_scale=3.5 ).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-2.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 2.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-7.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 7.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-10.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 10.5</figcaption> </div> </div> ### Negative prompt Just like how a prompt guides generation, a *negative prompt* steers the model away from things you don't want the model to generate. This is commonly used to improve overall image quality by removing poor or bad image features such as "low resolution" or "bad details". You can also use a negative prompt to remove or modify the content and style of an image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", negative_prompt="ugly, deformed, disfigured, poor details, bad anatomy", ).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-neg-prompt-1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-neg-prompt-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "astronaut"</figcaption> </div> </div> ### Generator A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator) object enables reproducibility in a pipeline by setting a manual seed. You can use a `Generator` to generate batches of images and iteratively improve on an image generated from a seed as detailed in the [Improve image quality with deterministic generation](reusing_seeds) guide. You can set a seed and `Generator` as shown below. Creating an image with a `Generator` should return the same result each time instead of randomly generating a new image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") generator = torch.Generator(device="cuda").manual_seed(30) image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator, ).images[0] image ``` ## Control image generation There are several ways to exert more control over how an image is generated outside of configuring a pipeline's parameters, such as prompt weighting and ControlNet models. ### Prompt weighting Prompt weighting is a technique for increasing or decreasing the importance of concepts in a prompt to emphasize or minimize certain features in an image. We recommend using the [Compel](https://github.com/damian0815/compel) library to help you generate the weighted prompt embeddings. <Tip> Learn how to create the prompt embeddings in the [Prompt weighting](weighted_prompts) guide. This example focuses on how to use the prompt embeddings in the pipeline. </Tip> Once you've created the embeddings, you can pass them to the `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter in the pipeline. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel ).images[0] ``` ### ControlNet As you saw in the [ControlNet](#controlnet) section, these models offer a more flexible and accurate way to generate images by incorporating an additional conditioning image input. Each ControlNet model is pretrained on a particular type of conditioning image to generate new images that resemble it. For example, if you take a ControlNet model pretrained on depth maps, you can give the model a depth map as a conditioning input and it'll generate an image that preserves the spatial information in it. This is quicker and easier than specifying the depth information in a prompt. You can even combine multiple conditioning inputs with a [MultiControlNet](controlnet#multicontrolnet)! There are many types of conditioning inputs you can use, and πŸ€— Diffusers supports ControlNet for Stable Diffusion and SDXL models. Take a look at the more comprehensive [ControlNet](controlnet) guide to learn how you can use these models. ## Optimize Diffusion models are large, and the iterative nature of denoising an image is computationally expensive and intensive. But this doesn't mean you need access to powerful - or even many - GPUs to use them. There are many optimization techniques for running diffusion models on consumer and free-tier resources. For example, you can load model weights in half-precision to save GPU memory and increase speed or offload the entire model to the GPU to save even more memory. PyTorch 2.0 also supports a more memory-efficient attention mechanism called [*scaled dot product attention*](../optimization/torch2.0#scaled-dot-product-attention) that is automatically enabled if you're using PyTorch 2.0. You can combine this with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) to speed your code up even more: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16").to("cuda") pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` For more tips on how to optimize your code to save memory and speed up inference, read the [Memory and speed](../optimization/fp16) and [Torch 2.0](../optimization/torch2.0) guides.
diffusers/docs/source/en/using-diffusers/conditional_image_generation.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/conditional_image_generation.md", "repo_id": "diffusers", "token_count": 5137 }
91
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load pipelines, models, and schedulers [[open-in-colab]] Having an easy way to use a diffusion system for inference is essential to 🧨 Diffusers. Diffusion systems often consist of multiple components like parameterized models, tokenizers, and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API, while remaining flexible enough to be adapted for other use cases, such as loading each component individually as building blocks to assemble your own diffusion system. Everything you need for inference or training is accessible with the `from_pretrained()` method. This guide will show you how to load: - pipelines from the Hub and locally - different components into a pipeline - checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights - models and schedulers ## Diffusion Pipeline <Tip> πŸ’‘ Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you are interested in learning in more detail about how the [`DiffusionPipeline`] class works. </Tip> The [`DiffusionPipeline`] class is the simplest and most generic way to load the latest trending diffusion model from the [Hub](https://huggingface.co/models?library=diffusers&sort=trending). The [`DiffusionPipeline.from_pretrained`] method automatically detects the correct pipeline class from the checkpoint, downloads, and caches all the required configuration and weight files, and returns a pipeline instance ready for inference. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) ``` You can also load a checkpoint with its specific pipeline class. The example above loaded a Stable Diffusion model; to get the same result, use the [`StableDiffusionPipeline`] class: ```python from diffusers import StableDiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) ``` A checkpoint (such as [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) or [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)) may also be used for more than one task, like text-to-image or image-to-image. To differentiate what task you want to use the checkpoint for, you have to load it directly with its corresponding task-specific pipeline class: ```python from diffusers import StableDiffusionImg2ImgPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id) ``` ### Local pipeline To load a diffusion pipeline locally, use [`git-lfs`](https://git-lfs.github.com/) to manually download the checkpoint (in this case, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)) to your local disk. This creates a local folder, `./stable-diffusion-v1-5`, on your disk: ```bash git-lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` Then pass the local path to [`~DiffusionPipeline.from_pretrained`]: ```python from diffusers import DiffusionPipeline repo_id = "./stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) ``` The [`~DiffusionPipeline.from_pretrained`] method won't download any files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint. ### Swap components in a pipeline You can customize the default components of any pipeline with another compatible component. Customization is important because: - Changing the scheduler is important for exploring the trade-off between generation speed and quality. - Different components of a model are typically trained independently and you can swap out a component with a better-performing one. - During finetuning, usually only some components - like the UNet or text encoder - are trained. To find out which schedulers are compatible for customization, you can use the `compatibles` method: ```py from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) stable_diffusion.scheduler.compatibles ``` Let's use the [`SchedulerMixin.from_pretrained`] method to replace the default [`PNDMScheduler`] with a more performant scheduler, [`EulerDiscreteScheduler`]. The `subfolder="scheduler"` argument is required to load the scheduler configuration from the correct [subfolder](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler) of the pipeline repository. Then you can pass the new [`EulerDiscreteScheduler`] instance to the `scheduler` argument in [`DiffusionPipeline`]: ```python from diffusers import DiffusionPipeline, EulerDiscreteScheduler repo_id = "runwayml/stable-diffusion-v1-5" scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler, use_safetensors=True) ``` ### Safety checker Diffusion models like Stable Diffusion can generate harmful content, which is why 🧨 Diffusers has a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) to check generated outputs against known hardcoded NSFW content. If you'd like to disable the safety checker for whatever reason, pass `None` to the `safety_checker` argument: ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None, use_safetensors=True) """ You have disabled the safety checker for <class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'> by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . """ ``` ### Reuse components across pipelines You can also reuse the same components in multiple pipelines to avoid loading the weights into RAM twice. Use the [`~DiffusionPipeline.components`] method to save the components: ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id, use_safetensors=True) components = stable_diffusion_txt2img.components ``` Then you can pass the `components` to another pipeline without reloading the weights into RAM: ```py stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components) ``` You can also pass the components individually to the pipeline if you want more flexibility over which components to reuse or disable. For example, to reuse the same components in the text-to-image pipeline, except for the safety checker and feature extractor, in the image-to-image pipeline: ```py from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id, use_safetensors=True) stable_diffusion_img2img = StableDiffusionImg2ImgPipeline( vae=stable_diffusion_txt2img.vae, text_encoder=stable_diffusion_txt2img.text_encoder, tokenizer=stable_diffusion_txt2img.tokenizer, unet=stable_diffusion_txt2img.unet, scheduler=stable_diffusion_txt2img.scheduler, safety_checker=None, feature_extractor=None, requires_safety_checker=False, ) ``` ## Checkpoint variants A checkpoint variant is usually a checkpoint whose weights are: - Stored in a different floating point type for lower precision and lower storage, such as [`torch.float16`](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU. - Non-exponential mean averaged (EMA) weights, which shouldn't be used for inference. You should use these to continue fine-tuning a model. <Tip> πŸ’‘ When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories instead of variations (for example, [`stable-diffusion-v1-4`] and [`stable-diffusion-v1-5`]). </Tip> Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [Safetensors](./using_safetensors)), model structure, and weights that have identical tensor shapes. | **checkpoint type** | **weight name** | **argument for loading weights** | |---------------------|-------------------------------------|----------------------------------| | original | diffusion_pytorch_model.bin | | | floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` | There are two important arguments to know for loading variants: - `torch_dtype` defines the floating point precision of the loaded checkpoints. For example, if you want to save bandwidth by loading a `fp16` variant, you should specify `torch_dtype=torch.float16` to *convert the weights* to `fp16`. Otherwise, the `fp16` weights are converted to the default `fp32` precision. You can also load the original checkpoint without defining the `variant` argument, and convert it to `fp16` with `torch_dtype=torch.float16`. In this case, the default `fp32` weights are downloaded first, and then they're converted to `fp16` after loading. - `variant` defines which files should be loaded from the repository. For example, if you want to load a `non_ema` variant from the [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) repository, you should specify `variant="non_ema"` to download the `non_ema` files. ```python from diffusers import DiffusionPipeline import torch # load fp16 variant stable_diffusion = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True ) # load non_ema variant stable_diffusion = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", variant="non_ema", use_safetensors=True ) ``` To save a checkpoint stored in a different floating-point type or as a non-EMA variant, use the [`DiffusionPipeline.save_pretrained`] method and specify the `variant` argument. You should try and save a variant to the same folder as the original checkpoint, so you can load both from the same folder: ```python from diffusers import DiffusionPipeline # save as fp16 variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16") # save as non-ema variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint: ```python # πŸ‘Ž this won't work stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) # πŸ‘ this works stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True ) ``` <!-- TODO(Patrick) - Make sure to uncomment this part as soon as things are deprecated. #### Using `revision` to load pipeline variants is deprecated Previously the `revision` argument of [`DiffusionPipeline.from_pretrained`] was heavily used to load model variants, e.g.: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", use_safetensors=True) ``` However, this behavior is now deprecated since the "revision" argument should (just as it's done in GitHub) better be used to load model checkpoints from a specific commit or branch in development. The above example is therefore deprecated and won't be supported anymore for `diffusers >= 1.0.0`. <Tip warning={true}> If you load diffusers pipelines or models with `revision="fp16"` or `revision="non_ema"`, please make sure to update the code and use `variant="fp16"` or `variation="non_ema"` respectively instead. </Tip> --> ## Models Models are loaded from the [`ModelMixin.from_pretrained`] method, which downloads and caches the latest version of the model weights and configurations. If the latest files are available in the local cache, [`~ModelMixin.from_pretrained`] reuses files in the cache instead of re-downloading them. Models can be loaded from a subfolder with the `subfolder` argument. For example, the model weights for `runwayml/stable-diffusion-v1-5` are stored in the [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) subfolder: ```python from diffusers import UNet2DConditionModel repo_id = "runwayml/stable-diffusion-v1-5" model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet", use_safetensors=True) ``` Or directly from a repository's [directory](https://huggingface.co/google/ddpm-cifar10-32/tree/main): ```python from diffusers import UNet2DModel repo_id = "google/ddpm-cifar10-32" model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` You can also load and save model variants by specifying the `variant` argument in [`ModelMixin.from_pretrained`] and [`ModelMixin.save_pretrained`]: ```python from diffusers import UNet2DConditionModel model = UNet2DConditionModel.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non_ema", use_safetensors=True ) model.save_pretrained("./local-unet", variant="non_ema") ``` ## Schedulers Schedulers are loaded from the [`SchedulerMixin.from_pretrained`] method, and unlike models, schedulers are **not parameterized** or **trained**; they are defined by a configuration file. Loading schedulers does not consume any significant amount of memory and the same configuration file can be used for a variety of different schedulers. For example, the following schedulers are compatible with [`StableDiffusionPipeline`], which means you can load the same scheduler configuration file in any of these classes: ```python from diffusers import StableDiffusionPipeline from diffusers import ( DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, ) repo_id = "runwayml/stable-diffusion-v1-5" ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler") ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler") pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler") # replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler` pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm, use_safetensors=True) ``` ## DiffusionPipeline explained As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things: - Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files. - Load the cached weights into the correct pipeline [class](../api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it. The pipelines' underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5). ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) print(pipeline) ``` You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components: - `"feature_extractor"`: a [`~transformers.CLIPImageProcessor`] from πŸ€— Transformers. - `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content. - `"scheduler"`: an instance of [`PNDMScheduler`]. - `"text_encoder"`: a [`~transformers.CLIPTextModel`] from πŸ€— Transformers. - `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from πŸ€— Transformers. - `"unet"`: an instance of [`UNet2DConditionModel`]. - `"vae"`: an instance of [`AutoencoderKL`]. ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` Compare the components of the pipeline instance to the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) folder structure, and you'll see there is a separate folder for each of the components in the repository: ``` . β”œβ”€β”€ feature_extractor β”‚Β Β  └── preprocessor_config.json β”œβ”€β”€ model_index.json β”œβ”€β”€ safety_checker β”‚Β Β  β”œβ”€β”€ config.json | β”œβ”€β”€ model.fp16.safetensors β”‚ β”œβ”€β”€ model.safetensors β”‚ β”œβ”€β”€ pytorch_model.bin | └── pytorch_model.fp16.bin β”œβ”€β”€ scheduler β”‚Β Β  └── scheduler_config.json β”œβ”€β”€ text_encoder β”‚Β Β  β”œβ”€β”€ config.json | β”œβ”€β”€ model.fp16.safetensors β”‚ β”œβ”€β”€ model.safetensors β”‚ |── pytorch_model.bin | └── pytorch_model.fp16.bin β”œβ”€β”€ tokenizer β”‚Β Β  β”œβ”€β”€ merges.txt β”‚Β Β  β”œβ”€β”€ special_tokens_map.json β”‚Β Β  β”œβ”€β”€ tokenizer_config.json β”‚Β Β  └── vocab.json β”œβ”€β”€ unet β”‚Β Β  β”œβ”€β”€ config.json β”‚Β Β  β”œβ”€β”€ diffusion_pytorch_model.bin | |── diffusion_pytorch_model.fp16.bin β”‚ |── diffusion_pytorch_model.f16.safetensors β”‚ |── diffusion_pytorch_model.non_ema.bin β”‚ |── diffusion_pytorch_model.non_ema.safetensors β”‚ └── diffusion_pytorch_model.safetensors |── vae . β”œβ”€β”€ config.json . β”œβ”€β”€ diffusion_pytorch_model.bin β”œβ”€β”€ diffusion_pytorch_model.fp16.bin β”œβ”€β”€ diffusion_pytorch_model.fp16.safetensors └── diffusion_pytorch_model.safetensors ``` You can access each of the components of the pipeline as an attribute to view its configuration: ```py pipeline.tokenizer CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, clean_up_tokenization_spaces=True ) ``` Every pipeline expects a [`model_index.json`](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json) file that tells the [`DiffusionPipeline`]: - which pipeline class to load from `_class_name` - which version of 🧨 Diffusers was used to create the model in `_diffusers_version` - what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
diffusers/docs/source/en/using-diffusers/loading.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/loading.md", "repo_id": "diffusers", "token_count": 7192 }
92