text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
[metadata] license_file = LICENSE
trl/setup.cfg/0
{ "file_path": "trl/setup.cfg", "repo_id": "trl", "token_count": 10 }
423
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from functools import partial import torch from datasets import Dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments from trl import IterativeSFTTrainer class IterativeTrainerTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab-calibrated" cls.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_tensor_dummy_dataset(self): dummy_dataset_dict = { "input_ids": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], "attention_mask": [ torch.tensor([1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1]), ], "labels": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def _init_textual_dummy_dataset(self): dummy_dataset_dict = { "texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], "texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def setUp(self): # initialize trainer self.model.train() return super().setUp() @parameterized.expand( [ ["gpt2", "tensor"], ["gpt2", "text"], ["t5", "tensor"], ["t5", "text"], ] ) def test_iterative_step_from_tensor(self, model_name, input_name): with tempfile.TemporaryDirectory() as tmp_dir: # initialize dataset if input_name == "tensor": dummy_dataset = self._init_tensor_dummy_dataset() inputs = { "input_ids": dummy_dataset["input_ids"], "attention_mask": dummy_dataset["attention_mask"], "labels": dummy_dataset["labels"], } else: dummy_dataset = self._init_textual_dummy_dataset() inputs = { "texts": dummy_dataset["texts"], "texts_labels": dummy_dataset["texts_labels"], } if model_name == "gpt2": model = self.model tokenizer = self.tokenizer else: model = self.t5_model tokenizer = self.t5_tokenizer args = TrainingArguments( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, learning_rate=1e-3 ) iterative_trainer = IterativeSFTTrainer(model=model, args=args, tokenizer=tokenizer) iterative_trainer.optimizer.zero_grad = partial(iterative_trainer.optimizer.zero_grad, set_to_none=False) iterative_trainer.step(**inputs) for param in iterative_trainer.model.parameters(): assert param.grad is not None
trl/tests/test_iterative_sft_trainer.py/0
{ "file_path": "trl/tests/test_iterative_sft_trainer.py", "repo_id": "trl", "token_count": 2176 }
424
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import warnings from typing import Optional import torch from accelerate.utils import extract_model_from_parallel from transformers import StoppingCriteria, StoppingCriteriaList from ..import_utils import is_rich_available if is_rich_available(): from rich import print from rich.text import Text class StringStoppingCriteria(StoppingCriteria): """Custom `StoppingCriteria` which checks if all generations in the batch are completed.""" def __init__(self, stop_strings, tokenizer): self.stop_strings = stop_strings self.tokenizer = tokenizer self.first_call = True def __call__(self, input_ids, scores, **kwargs): """Returns true if all generated sequences contain any of the stop strings.""" if self.first_call: self.generated_tokens = [1 for _ in range(input_ids.shape[0])] self.start_length = input_ids.shape[-1] - 1 self.first_call = False decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) done = [] for i, decoded_generation in enumerate(decoded_generations): sequence_complete = any(stop_string in decoded_generation for stop_string in self.stop_strings) done.append(sequence_complete) if not sequence_complete: self.generated_tokens[i] += 1 if all(done): self.first_call = True return all(done) class TextHistory: """The TextHistory class keeps track of the history of an interaction between the language model and the environment.""" def __init__(self, text, tokens, system=True): """ Initialize TextHistory. args: text (`str`): The text of the first segment. tokens (`torch.LongTensor`): The tokens of the first segment. system (`bool`, *optional*): Whether the first segment is a system or user segment. """ self.system_spans = [] self.text_spans = [] self.token_spans = [] self.token_masks = torch.tensor([], dtype=torch.long).to(tokens.device) self.text = "" self.tokens = torch.tensor([], dtype=torch.long).to(tokens.device) self.completed = False self.truncated = False self.reward = 0.0 self.prompt_color = "black on grey85" self.system_color = "black on cyan3" self.model_color = "black on deep_sky_blue1" self.reward_color = "black on plum1" self.append_segment(text, tokens, system=system) def append_segment(self, text, tokens, system=True): """ Append a new segment to the history. args: text (`str`): The text of the new segment. tokens (`torch.LongTensor`): The tokens of the new segment. system (`bool`, *optional*): Whether the new segment is a system or user segment. """ if len(text) == 0 or len(tokens) == 0: raise ValueError("Can't append empty text or token list to history.") original_text_length = len(self.text) self.text += text self.text_spans.append((original_text_length, len(self.text))) self.system_spans.append(system) original_token_length = len(self.tokens) self.tokens = torch.cat((self.tokens, tokens)) if system: self.token_masks = torch.cat((self.token_masks, torch.zeros_like(tokens))) else: self.token_masks = torch.cat((self.token_masks, torch.ones_like(tokens))) self.token_spans.append((original_token_length, len(self.tokens))) def complete(self, truncated=False): """ Mark the history as completed. """ self.completed = True self.truncated = truncated @property def last_text_segment(self): """ Get the last text segment. """ start, end = self.text_spans[-1] return self.text[start:end] def split_query_response_tokens(self): """ Split the tokens into query and response tokens. """ split_index = self.token_spans[0][1] query = self.tokens[:split_index] response = self.tokens[split_index:] mask = self.token_masks[split_index:] return query, response, mask def show_text(self, show_legend=False): """ Print the text history. """ if not is_rich_available(): warnings.warn("install rich to display text") return text = Text(self.text) text.stylize(self.prompt_color, self.text_spans[0][0], self.text_spans[1][0]) for i, (start, end) in enumerate(self.text_spans[1:]): if self.system_spans[i + 1]: text.stylize(self.system_color, start, end) else: text.stylize(self.model_color, start, end) text.append(f"\n\nReward: {self.reward}", style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_tokens(self, tokenizer, show_legend=False): """ Print the history tokens. """ if not is_rich_available(): warnings.warn("install rich to display tokens") return text = Text() prompt_end = self.token_spans[0][1] for i, (token, mask) in enumerate(zip(self.tokens, self.token_masks)): if i < prompt_end: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.prompt_color) text.append(" ") elif mask == 0: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.system_color) text.append(" ") else: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.model_color) text.append(" ") text.append(f"\n\nReward: {self.reward}", style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_colour_legend(self): """ Print the colour legend. """ if not is_rich_available(): warnings.warn("install rich to display colour legend") return text = Text("\n\n(Colour Legend: ") text.append("Prompt", style=self.prompt_color) text.append("|") text.append("System", style=self.system_color) text.append("|") text.append("Model", style=self.model_color) text.append("|") text.append("Reward", style=self.reward_color) text.append(")") print(text) class TextEnvironment: """ The TextEnvironment enables interaction of a LLM with an environment using tools. """ def __init__( self, model=None, tokenizer=None, tools=None, reward_fn=None, prompt=None, max_turns=4, max_tool_reponse=100, max_length=None, generation_kwargs=None, ): """ Initialize TextEnvironment. Args: model (`PreTrainedModelWrapper`): The model to use for generation. tokenizer (`transformers.PreTrainedTokenizer`): The tokenizer to use for generation. tools (list): A list of tools to use for interaction. reward_fn (function): A function that takes a string and returns a reward. prompt (str): The base prompt to use for generation. Is prepended to the tasks. max_turns (Optional[int]): The maximum number of turns to allow. max_tool_response (Optional[int]): The maximum number of characters to allow in a tool response. max_length (Optional[int]): The maximum number of tokens to allow in an episode. generation_kwargs (Optional[dict]): A dictionary of keyword arguments to pass to the model's generate method. """ self.model = model self.tokenizer = tokenizer self.prompt = prompt if isinstance(tools, dict): self.tools = tools else: self.tools = {tool.__class__.__name__: tool for tool in tools} self.reward_fn = reward_fn self.max_length = max_length self.request_token = "<request>" self.call_token = "<call>" self.response_token = "<response>" self.submit_token = "<submit>" self.max_turns = max_turns self.max_tool_response = max_tool_reponse if generation_kwargs is None: self.generation_kwargs = dict() else: self.generation_kwargs = generation_kwargs self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder") self.current_device = extract_model_from_parallel(self.model).pretrained_model.device def run(self, queries, **rewards_kwargs): """ Run the environment on a list of queries. Args: queries (list[str]): A list of queries to run the model in the environment on. """ turns = 0 queries = [self.prompt + task for task in queries] queries_tokens = [ self.tokenizer(query, return_tensors="pt").input_ids[0].to(self.model.pretrained_model.device) for query in queries ] histories = [TextHistory(q, qt, system=True) for q, qt in zip(queries, queries_tokens)] while any(not history.completed for history in histories) and turns < self.max_turns: histories = self.generate(histories) histories = self.tasks_end_check(histories) # TODO: make this parallel rather than for-loop for i in range(len(histories)): histories[i] = self.step(histories[i]) histories = self.tasks_end_check(histories, model_turn=False) turns += 1 self.compute_reward(histories, **rewards_kwargs) # convert a list of (q, r, m) tuples to lists of all qs, rs, and ms respectively queries, responses, masks = map(list, zip(*[history.split_query_response_tokens() for history in histories])) rewards = [history.reward for history in histories] return queries, responses, masks, rewards, histories def step(self, history): """ Step the environment forward one turn. Args: history (`TextHistory`): The history to step forward. """ truncated, ended = self.task_end_check(history) if ended: history.complete(truncated=truncated) if history.completed: return history tool, query = self.parse_tool_call(history.last_text_segment) if tool is None or query is None: response = f"Unknown tool call: {history.last_text_segment}" else: if tool not in self.tools: response = f"Unknown tool {tool}." try: response = self.tools[tool](query) except Exception as error: response = f"Tool error: {str(error)}" if len(response) > self.max_tool_response: response = response[: (self.max_tool_response - 3)] + "..." history.append_segment( response + self.response_token, self.tokenizer(response + self.response_token, return_tensors="pt") .input_ids[0] .to(self.model.pretrained_model.device), system=True, ) return history def parse_tool_call(self, text): """ Parse request string. Expected format: <request><tool_name>query<call> """ result = re.search(f"(?<={self.request_token}).*?(?={self.call_token})", text, re.DOTALL) # if we can't find a <request>/<call> span we return none if result is None: return None, None else: extracted_text = result.group() result = re.search(r"<(.*?)>", extracted_text) # if we can't find a tool name we return none if result is None: return None, None else: tool = result.group(1) # split off the tool name query = ">".join(extracted_text.split(">")[1:]) return tool, query def compute_reward(self, histories, **reward_kwargs): """ Compute the reward for a list of histories. """ rewards = self.reward_fn([history.last_text_segment for history in histories], **reward_kwargs) for history, reward in zip(histories, rewards): history.reward = reward return histories def generate(self, histories): """ Generate responses for a list of histories. """ active_histories = [i for i, history in enumerate(histories) if not history.completed] query_tensors = [histories[i].tokens for i in active_histories] response_tensors = self._generate_batched(query_tensors) response_texts = self.tokenizer.batch_decode(response_tensors) for i, response_text, response_tensor in zip(active_histories, response_texts, response_tensors): histories[i].append_segment(response_text, response_tensor, system=False) return histories def tasks_end_check(self, histories, model_turn=True): """ Check if the current generation sequences have finished. """ for history in histories: if not history.completed: truncated, ended = self.task_end_check(history, model_turn=model_turn) if ended: history.complete(truncated=truncated) return histories def task_end_check(self, history, model_turn=True): """ Check if the current generation sequence has finished. """ truncated = False ended = False if history.completed: return truncated, ended if self.max_length is not None and len(self.tokenizer(history.text).input_ids[0]) > self.max_length: truncated = True ended = True elif self.tokenizer.eos_token in history.text: ended = True elif model_turn and not ( (self.request_token in history.last_text_segment and self.call_token in history.last_text_segment) or self.submit_token in history.last_text_segment ): ended = True elif self.submit_token in history.last_text_segment: ended = True return truncated, ended def _generate_batched( self, query_tensors, batch_size: int = 16, pad_to_multiple_of: Optional[int] = None, ): """ Generate responses for a list of query tensors. args: query_tensors (list[torch.Tensor]): A list of query tensors to generate responses for. batch_size (int): The batch size to use for generation. pad_to_multiple_of (int): The padding length to use for generation. """ outputs = [] padding_side_default = self.tokenizer.padding_side if not self.is_encoder_decoder: self.tokenizer.padding_side = "left" # in case we have fewer examples than bs batch_size = min(len(query_tensors), batch_size) for i in range(0, len(query_tensors), batch_size): # prevent overflow if query tensors are not even multiple of bs end_index = min(len(query_tensors), i + batch_size) batch = query_tensors[i:end_index] batch_mask = [torch.ones_like(element) for element in batch] inputs = {"input_ids": batch, "attention_mask": batch_mask} padded_inputs = self.tokenizer.pad( inputs, padding=True, max_length=None, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ).to(self.current_device) stopping_criteria = StringStoppingCriteria([self.call_token, self.submit_token], self.tokenizer) self.generation_kwargs["stopping_criteria"] = StoppingCriteriaList([stopping_criteria]) generations = extract_model_from_parallel(self.model).generate(**padded_inputs, **self.generation_kwargs) for generation, mask, generated_tokens in zip( generations, padded_inputs["attention_mask"], stopping_criteria.generated_tokens ): if not self.is_encoder_decoder: output = generation[(1 - mask).sum() :] # remove padding else: output = generation if not self.is_encoder_decoder: output = output[(mask).sum() :] # remove prompt # remove chunk generated after stopping criteria in batch mode outputs.append(output[:generated_tokens]) self.tokenizer.padding_side = padding_side_default return outputs
trl/trl/environment/base_environment.py/0
{ "file_path": "trl/trl/environment/base_environment.py", "repo_id": "trl", "token_count": 7661 }
425
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Callable, Dict, List, Optional, Tuple, Union import torch from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( DataCollator, DataCollatorForLanguageModeling, DataCollatorForSeq2Seq, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments, ) from transformers.trainer_utils import EvalLoopOutput from ..core import PPODecorators from ..import_utils import is_peft_available if is_peft_available(): from peft import PeftModel class IterativeSFTTrainer(Trainer): """ The IterativeSFTTrainer can be used to finetune models with methods that requires some steps between optimization. Attributes: **model** (`PreTrainedModel`) -- Model to be optimized, either an 'AutoModelForCausalLM' or an 'AutoModelForSeq2SeqLM'. Check the documentation of `PreTrainedModel` for more details. **args** (`transformers.TrainingArguments`): -- The arguments to use for training. **tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the data. Check the documentation of `transformers.PreTrainedTokenizer` and `transformers.PreTrainedTokenizerFast` for more details. **optimizers** (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): -- The optimizer and scheduler to use for training. **data_collator** (Union[DataCollatorForLanguageModeling, DataCollatorForSeq2Seq], *optional*) -- Data collator to be used for training and passed along the dataloader. **eval_dataset** (`datasets.Dataset`): The dataset to use for evaluation. **max_length** (`int`, defaults to `None`): -- The maximum length of the input. **truncation_mode** (`str`, defaults to `keep_end`): -- The truncation mode to use, either `keep_end` or `keep_start`. **preprocess_logits_for_metrics** (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): -- The function to use to preprocess the logits before computing the metrics. **compute_metrics** (`Callable[[EvalPrediction], Dict]`, *optional*): -- The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. **optimize_device_cache ** (`bool`, *optional*, defaults to `False`) -- Optimize CUDA cache for slightly more memory-efficient training. """ def __init__( self, model: Optional[PreTrainedModel] = None, args: Optional[TrainingArguments] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), data_collator: Optional[DataCollator] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, max_length: Optional[int] = None, truncation_mode: Optional[str] = "keep_end", preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None, optimize_device_cache: Optional[bool] = False, ): # Step 0: check positional arguments validity if not isinstance(tokenizer, (PreTrainedTokenizerBase)): raise ValueError( f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}" ) if not isinstance(model, PreTrainedModel): raise ValueError(f"model must be a PreTrainedModel, got {type(model)}") if not model.can_generate(): warnings.warn( f"The current model class {type(model)} is not compatible with `.generate()`" "Please make sure that this is intended." ) if optimizers[1] is None and args.max_steps == -1: raise ValueError( "When no scheduler is provided, you need to set the total number of training steps to perform `max_steps`" ) self.is_encoder_decoder = getattr(model.config, "is_encoder_decoder", False) self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.tokenizer = tokenizer if data_collator is None: if self.is_encoder_decoder: warnings.warn( "No data collator is provided. Using 'DataCollatorForSeq2Seq' with" "'labels_pad_token_id' set to '-100' and 'pad_to_multiple_of' set to 8." ) self.data_collator = DataCollatorForSeq2Seq(tokenizer, label_pad_token_id=-100, pad_to_multiple_of=8) else: warnings.warn("No data collator is provided. Using 'DataCollatorForLanguageModeling'") self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False) else: self.data_collator = data_collator self.max_length = max_length self.truncation_mode = truncation_mode self.optimize_device_cache = optimize_device_cache super().__init__( model=model, args=args, data_collator=self.data_collator, eval_dataset=eval_dataset, tokenizer=tokenizer, compute_metrics=compute_metrics, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) self.create_optimizer_and_scheduler(self.args.max_steps) # prepare model, optimizer and lr_scheduler self.model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) self.tokenizer.truncation_side = "left" if self.truncation_mode == "keep_end" else "right" if not hasattr(self, "accelerator"): raise AttributeError( "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." ) PPODecorators.optimize_device_cache = self.optimize_device_cache def prepare_model_inputs(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, labels: torch.Tensor): if attention_mask is None: attention_mask = [torch.ones_like(ids) for ids in input_ids] if self.is_encoder_decoder: input_data = self.data_collator( [ {"input_ids": ids, "attention_mask": att, "labels": lab} for ids, att, lab in zip(input_ids, attention_mask, labels) ] ).to(self.model.device) input_data.pop("decoder_input_ids", None) # This is directly computed inside the model input_data["labels"][input_data["labels"] == self.tokenizer.pad_token_id] = -100 else: input_data = self.data_collator( [{"input_ids": ids, "attention_mask": att} for ids, att in zip(input_ids, attention_mask)] ).to(self.model.device) # truncate in case the user has provided input_ids, attention_mask and labels if self.max_length is not None: if self.truncation_mode == "keep_start": input_data = {k: v[: self.max_length] for k, v in input_data.items()} elif self.truncation_mode == "keep_end": input_data = {k: v[-self.max_length :] for k, v in input_data.items()} else: raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") return input_data @staticmethod def _step_safety_checker( input_ids: List[torch.LongTensor], attention_mask: List[torch.LongTensor], labels: List[torch.LongTensor], texts: List[str], texts_labels: List[str], ): """ Check if the input data is valid for training. Args: input_ids (List[`torch.LongTensor`]): List of tensors containing the input_ids attention_mask (List[`torch.LongTensor`]): List of tensors containing the attention_mask labels (List[`torch.FloatTensor`]): List of tensors containing the labels texts (List[`str`]): List of string containing the text input. texts_labels (List[`str`]): List of string containing the text labels. Returns: `tuple`: The input data. """ if texts is None: if attention_mask is None: for name, tensor_list in zip(["input_ids", "labels"], [input_ids, labels]): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: for name, tensor_list in zip( ["input_ids", "attention_mask", "labels"], [input_ids, attention_mask, labels] ): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: if not isinstance(texts, list): raise ValueError(f"'text' must be a list of strings - got {type(texts)}") if not isinstance(texts[0], str): raise ValueError(f"Elements in 'text' must be strings - got {type(texts[0])}") if texts_labels is not None: if not isinstance(texts_labels, list): raise ValueError(f"'text_labels' must be a list of strings - got {type(texts_labels)}") if not isinstance(texts_labels[0], str): raise ValueError(f"Elements in 'text_labels' must be strings - got {type(texts_labels[0])}") return input_ids, attention_mask, labels, texts, texts_labels @PPODecorators.empty_device_cache() def step( self, input_ids: Optional[List[torch.LongTensor]] = None, attention_mask: Optional[List[torch.LongTensor]] = None, labels: Optional[List[torch.LongTensor]] = None, texts: Optional[List[str]] = None, texts_labels: Optional[List[str]] = None, ): """ Run an optimisation step given a list of input_ids, attention_mask, and labels or a list of text and text_labels. Args: input_ids (List[`torch.LongTensor`]): List of tensors containing the input_ids (if not provided, text will be used) attention_mask (List[`torch.LongTensor`], , *optional*): List of tensors containing the attention_mask labels (List[`torch.FloatTensor`], *optional*): List of tensors containing the labels (if set to None, will default to input_ids) texts (List[`str`], *optional*): List of strings containing the text input (if not provided, input_ids will directly be used) texts_labels (List[`str`], *optional*): List of strings containing the text labels (if set to None, will default to text) Returns: `dict[str, Any]`: A summary of the training statistics """ self.model.train() if self.state.global_step == 0: self.tr_loss = torch.tensor(0.0).to(self.args.device) self._globalstep_last_logged = self.state.global_step if input_ids is None and texts is None: raise ValueError("Step should include `input_ids` or `texts` as keyword arguments.") elif input_ids is not None and texts is not None: warnings.warn( "Both 'input_ids' and 'texts' are provided. 'input_ids' will be overwritten using inputs provided by the 'texts' keyword argument." ) if labels is None and texts_labels is None and self.is_encoder_decoder: raise ValueError( "No 'labels' or 'text_labels' are provided. When using an encoder-decoder architecture, 'labels' or 'text_labels' must be passed." ) input_ids, attention_mask, labels, texts, texts_labels = self._step_safety_checker( input_ids, attention_mask, labels, texts, texts_labels ) if texts is not None: model_inputs = self.tokenizer( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" ) input_ids, attention_mask = model_inputs["input_ids"], model_inputs["attention_mask"] if texts_labels is not None: labels = self.tokenizer( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" )["input_ids"] if labels is None: warnings.warn("No labels are provided. Setting labels to input_ids") labels = input_ids model_inputs = self.prepare_model_inputs(input_ids, attention_mask, labels) model_inputs_names = list(model_inputs.keys()) batch_dict = {} batch_dict.update(model_inputs) def collator(data): return_dict = dict() for key in data[0]: if key in ["input_ids", "attention_mask", "labels"]: return_dict[key] = torch.stack([d[key] for d in data]).to(self.model.device) return return_dict batch_data = Dataset.from_dict(batch_dict) batch_data.set_format("torch") step_dataloader = DataLoader( batch_data, batch_size=self.args.per_device_train_batch_size, shuffle=True, collate_fn=collator, ) for _, batch in enumerate(step_dataloader): with self.accelerator.accumulate(self.model): model_inputs = {k: batch[k] for k in model_inputs_names} loss = self.compute_loss(self.model, model_inputs) if self.args.n_gpu > 1: loss = loss.mean() tr_loss_step = loss.detach() self.accelerator.backward(loss) if self.accelerator.sync_gradients and self.args.max_grad_norm is not None: self.accelerator.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() if self.lr_scheduler is not None: self.lr_scheduler.step() self.state.global_step += 1 # update stats etc self.tr_loss += tr_loss_step self._maybe_log_save_evaluate() def _maybe_log_save_evaluate(self): # check if eval is required if self.args.eval_steps is not None: if self.state.global_step % self.args.eval_steps == 0 and self.state.global_step != 0: self.evaluate(self.eval_dataset) # check if logging is required if self.args.logging_steps is not None: if self.state.global_step % self.args.logging_steps == 0 and self.state.global_step != 0: logs: Dict[str, float] = {} tr_loss_scalar = self._nested_gather(self.tr_loss).mean().item() # reset tr_loss to zero self.tr_loss -= self.tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) logs["learning_rate"] = self._get_learning_rate() self._globalstep_last_logged = self.state.global_step self.log(logs)
trl/trl/trainer/iterative_sft_trainer.py/0
{ "file_path": "trl/trl/trainer/iterative_sft_trainer.py", "repo_id": "trl", "token_count": 7430 }
426
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import threading import time import psutil import torch class PeakCPUMemory: def __init__(self): self.process = psutil.Process() self.peak_monitoring = False def peak_monitor(self): self.cpu_memory_peak = -1 while True: self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def start(self): self.peak_monitoring = True self.thread = threading.Thread(target=self.peak_monitor) self.thread.daemon = True self.thread.start() def stop(self): self.peak_monitoring = False self.thread.join() return self.cpu_memory_peak cpu_peak_tracker = PeakCPUMemory() def start_measure(): # Time measures = {"time": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = torch.cuda.memory_allocated(i) torch.cuda.reset_peak_memory_stats() return measures def end_measure(start_measures): # Time measures = {"time": time.time() - start_measures["time"]} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20 measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20 return measures def log_measures(measures, description): print(f"{description}:") print(f"- Time: {measures['time']:.2f}s") for i in range(torch.cuda.device_count()): print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB") peak = measures[f"{i}-peak"] print(f"- GPU {i} peak: {peak:.2f}MiB") print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB") print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
accelerate/benchmarks/measures_util.py/0
{ "file_path": "accelerate/benchmarks/measures_util.py", "repo_id": "accelerate", "token_count": 1146 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Deferring Executions When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be faster than others. You might need to wait for all processes to have reached a certain point before executing a given instruction. For instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to continue training before all the model weights have been loaded in. To do this, just write the following line in your code: ``` accelerator.wait_for_everyone() ``` This instruction will block all the processes that arrive first until all the other processes have reached that point (if you run your script on just one GPU or CPU, this won't do anything). A few example cases of when to use this utility are listed below: <Tip> Some of these are utilized with the [`~Accelerator.main_process_first`] context manager, which utilizes [`~Accelerator.wait_for_everyone`] to run a particular set of code on the main process beforehand before triggering and launching the other processes </Tip> ## Downloading a Dataset When downloading a dataset, you should download it first on the main process and then load the cached dataset afterward <Tip> `load_dataset` will perform a lock under the hood to stop multiple downloads from happening at once, but if you are downloading something not using this library you should use this method. </Tip> ```python with accelerator.main_process_first(): datasets = load_dataset("glue", "mrpc") ``` Under the hood this is the same as calling: ```python # First do something on the main process if accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() # And then send it to the rest of them if not accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() ``` ## Saving the `state_dict` When saving the `state_dict` of the model, since you would normally save one file on just the main process you should specify that: ```python if accelerator.is_main_process: model = accelerator.unwrap_model(model) torch.save(model.state_dict(), "weights.pth") ``` ## Loading in the `state_dict` When loading in the `state_dict` to a model, optimizer, or scheduler, you should wait for all workers to have the weights loaded in before moving on to training ```python with accelerator.main_process_first(): state = torch.load("weights.pth") model.load_state_dict(state) ``` ## Applying a multi-worker CPU operation Applying a `map()` operation on multiple workers, such as tokenizing should be done on the main process first, and then propagated to each one. ```python datasets = load_dataset("glue", "mrpc") with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) ``` ## Applying checks such as Early Stopping To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process). Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process: ```python for (x,y) in data_loader: logits = model(x) loss = loss_func(logits, y) # Assume `should_do_early_stopping` is a custom defined function that returns a conditional if should_do_early_stopping(loss): accelerator.set_trigger() # Later in the training script when we need to check for the breakpoint if accelerator.check_trigger(): break ```
accelerate/docs/source/concept_guides/deferring_execution.md/0
{ "file_path": "accelerate/docs/source/concept_guides/deferring_execution.md", "repo_id": "accelerate", "token_count": 1350 }
1
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Using Local SGD with 🤗 Accelerate Local SGD is a technique for distributed training where gradients are not synchronized every step. Thus, each process updates its own version of the model weights and after a given number of steps these weights are synchronized by averaging across all processes. This improves communication efficiency and can lead to substantial training speed up especially when a computer lacks a faster interconnect such as NVLink. Unlike gradient accumulation (where improving communication efficiency requires increasing the effective batch size), Local SGD does not require changing a batch size or a learning rate / schedule. However, if necessary, Local SGD can be combined with gradient accumulation as well. In this tutorial you will see how to quickly setup Local SGD 🤗 Accelerate. Compared to a standard Accelerate setup, this requires only two extra lines of code. This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: ```python device = "cuda" model.to(device) gradient_accumulation_steps = 2 for index, batch in enumerate(training_dataloader): inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps loss.backward() if (index + 1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Converting it to 🤗 Accelerate First the code shown earlier will be converted to use 🤗 Accelerate with neither a LocalSGD or a gradient accumulation helper: ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for index, batch in enumerate(training_dataloader): inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps + accelerator.backward(loss) if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() ``` ## Letting 🤗 Accelerate handle model synchronization All that is left now is to let 🤗 Accelerate handle model parameter synchronization **and** the gradient accumulation for us. For simplicity let us assume we need to synchronize every 8 steps. This is achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()` after every optimizer step: ```diff +local_sgd_steps=8 +with LocalSGD(accelerator=accelerator, model=model, local_sgd_steps=8, enabled=True) as local_sgd: for batch in training_dataloader: with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() + local_sgd.step() ``` Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop). ## Limitations The current implementation works only with basic multi-GPU (or multi-CPU) training without, e.g., [DeepSpeed.](https://github.com/microsoft/DeepSpeed). ## References Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes back to at least: Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
accelerate/docs/source/usage_guides/local_sgd.md/0
{ "file_path": "accelerate/docs/source/usage_guides/local_sgd.md", "repo_id": "accelerate", "token_count": 1491 }
2
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import threading import evaluate import psutil import torch from datasets import load_dataset from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig from torch.utils.data import DataLoader from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, ) from accelerate import Accelerator, DistributedType, FullyShardedDataParallelPlugin from accelerate.utils import is_npu_available, is_xpu_available ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # - FSDP # # This example also demonstrates the checkpointing and sharding capabilities # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # New Code # # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # New Code # # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.xpu.memory_allocated() elif is_npu_available(): torch.npu.empty_cache() torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.npu.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() self.end = torch.xpu.memory_allocated() self.peak = torch.xpu.max_memory_allocated() elif is_npu_available(): torch.npu.empty_cache() self.end = torch.npu.memory_allocated() self.peak = torch.npu.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # # Pass the advanced FSDP settings not part of the accelerate config by creating fsdp_plugin fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) # Initialize accelerator if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", project_dir=args.logging_dir, fsdp_plugin=fsdp_plugin, ) else: accelerator = Accelerator(fsdp_plugin=fsdp_plugin) accelerator.print(accelerator.distributed_type) if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) accelerator.init_trackers("fsdp_glue_no_trainer", experiment_config) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) datasets = load_dataset("glue", "mrpc") metric = evaluate.load("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.003, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=10, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) overall_step = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: num_epochs -= int(training_difference.replace("epoch_", "")) resume_step = None else: resume_step = int(training_difference.replace("step_", "")) num_epochs -= resume_step // len(train_dataloader) # If resuming by step, we also need to know exactly how far into the DataLoader we went resume_step = (num_epochs * len(train_dataloader)) - resume_step # Now we train the model for epoch in range(num_epochs): # New Code # # context manager to track the peak memory usage during the training epoch with TorchTracemalloc() as tracemalloc: model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == 0: if resume_step is not None and step < resume_step: pass # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # accelerator.print(lr_scheduler.get_lr()) overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "train_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) # New Code # # context manager to track the peak memory usage during the evaluation with TorchTracemalloc() as tracemalloc: model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(train_dataloader), }, step=epoch, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "eval_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) if args.with_tracking: accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help="Location on where to store experiment tracking logs`", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py/0
{ "file_path": "accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py", "repo_id": "accelerate", "token_count": 7850 }
3
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch from transformers import AutoModelForMaskedLM from accelerate import PartialState, prepare_pippy from accelerate.utils import set_seed # Set the random seed to have reproducable outputs set_seed(42) # Create an example model model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased") model.eval() # Input configs # Create example inputs for the model input = torch.randint( low=0, high=model.config.vocab_size, size=(2, 512), # bs x seq_len device="cpu", dtype=torch.int64, requires_grad=False, ) # Create a pipeline stage from the model # Using `auto` is equivalent to letting `device_map="auto"` figure # out device mapping and will also split the model according to the # number of total GPUs available if it fits on one GPU model = prepare_pippy(model, split_points="auto", example_args=(input,)) # You can pass `gather_output=True` to have the output from the model # available on all GPUs # model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True) # Move the inputs to the first device input = input.to("cuda:0") # Take an average of 5 times # Measure first batch torch.cuda.synchronize() start_time = time.time() with torch.no_grad(): output = model(input) torch.cuda.synchronize() end_time = time.time() first_batch = end_time - start_time # Now that CUDA is init, measure after torch.cuda.synchronize() start_time = time.time() for i in range(5): with torch.no_grad(): output = model(input) torch.cuda.synchronize() end_time = time.time() # The outputs are only on the final process by default if PartialState().is_last_process: output = torch.stack(tuple(output[0])) print(f"Time of first pass: {first_batch}") print(f"Average time per batch: {(end_time - start_time) / 5}")
accelerate/examples/inference/bert.py/0
{ "file_path": "accelerate/examples/inference/bert.py", "repo_id": "accelerate", "token_count": 762 }
4
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup extras = {} extras["quality"] = [ "black ~= 23.1", # hf-doc-builder has a hidden dependency on `black` "hf-doc-builder >= 0.3.0", "ruff ~= 0.2.1", ] extras["docs"] = [] extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"] extras["test_dev"] = [ "datasets", "evaluate", "torchpippy>=0.2.0", "transformers", "scipy", "scikit-learn", "deepspeed<0.13.0", "tqdm", "bitsandbytes", "timm", ] extras["testing"] = extras["test_prod"] + extras["test_dev"] extras["rich"] = ["rich"] extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"] extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] extras["sagemaker"] = [ "sagemaker", # boto3 is a required package in sagemaker ] setup( name="accelerate", version="0.29.0.dev", description="Accelerate", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="[email protected]", url="https://github.com/huggingface/accelerate", package_dir={"": "src"}, packages=find_packages("src"), entry_points={ "console_scripts": [ "accelerate=accelerate.commands.accelerate_cli:main", "accelerate-config=accelerate.commands.config:main", "accelerate-estimate-memory=accelerate.commands.estimate:main", "accelerate-launch=accelerate.commands.launch:main", ] }, python_requires=">=3.8.0", install_requires=[ "numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one): # git checkout -b vXX.xx-release # The -b is only necessary for creation (so remove it when doing a patch) # 2. Change the version in __init__.py and setup.py to the proper value. # 3. Commit these changes with the message: "Release: v<VERSION>" # 4. Add a tag in git to mark the release: # git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' # Push the tag and release commit to git: git push --tags origin vXX.xx-release # 5. Run the following commands in the top-level directory: # rm -rf dist # rm -rf build # python setup.py bdist_wheel # python setup.py sdist # 6. Upload the package to the pypi test server first: # twine upload dist/* -r testpypi # 7. Check that you can install it in a virtualenv by running: # pip install accelerate # pip uninstall accelerate # pip install -i https://testpypi.python.org/pypi accelerate # accelerate env # accelerate test # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag in github once everything is looking hunky-dory. # 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to # main.
accelerate/setup.py/0
{ "file_path": "accelerate/setup.py", "repo_id": "accelerate", "token_count": 1649 }
5
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import model_info from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate import init_empty_weights from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import ( calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available, ) if is_transformers_available(): import transformers from transformers import AutoConfig, AutoModel if is_timm_available(): import timm def verify_on_hub(repo: str, token: str = None): "Verifies that the model is on the hub and returns the model info." try: return model_info(repo, token=token) except GatedRepoError: return "gated" except RepositoryNotFoundError: return "repo" def check_has_model(error): """ Checks what library spawned `error` when a model is not found """ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: return "timm" elif ( is_transformers_available() and isinstance(error, OSError) and "does not appear to have a file named" in error.args[0] ): return "transformers" else: return "unknown" def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): """ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. Args: model_name (`str`): The model name on the Hub library_name (`str`): The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no metadata on the Hub to determine the library. trust_remote_code (`bool`, `optional`, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. access_token (`str`, `optional`, defaults to `None`): The access token to use to access private or gated models on the Hub. (for use on the Gradio app) Returns: `torch.nn.Module`: The torch model that has been initialized on the `meta` device. """ model_info = verify_on_hub(model_name, access_token) # Simplified errors if model_info == "gated": raise GatedRepoError( f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." ) elif model_info == "repo": raise RepositoryNotFoundError( f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," " make sure you are authenticated via `huggingface-cli login` and have access." ) if library_name is None: library_name = getattr(model_info, "library_name", False) if not library_name: raise ValueError( f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" ) if library_name == "transformers": if not is_transformers_available(): raise ImportError( f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" ) print(f"Loading pretrained config for `{model_name}` from `transformers`...") if model_info.config is None: raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") auto_map = model_info.config.get("auto_map", False) config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) with init_empty_weights(): # remote code could specify a specific `AutoModel` class in the `auto_map` constructor = AutoModel if isinstance(auto_map, dict): value = None for key in auto_map.keys(): if key.startswith("AutoModelFor"): value = key break if value is not None: constructor = getattr(transformers, value) model = constructor.from_config(config, trust_remote_code=trust_remote_code) elif library_name == "timm": if not is_timm_available(): raise ImportError( f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" ) print(f"Loading pretrained config for `{model_name}` from `timm`...") with init_empty_weights(): model = timm.create_model(model_name, pretrained=False) else: raise ValueError( f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." ) return model def create_ascii_table(headers: list, rows: list, title: str): "Creates a pretty table from a list of rows, minimal version of `tabulate`." sep_char, in_between = "│", "─" column_widths = [] for i in range(len(headers)): column_values = [row[i] for row in rows] + [headers[i]] max_column_width = max(len(value) for value in column_values) column_widths.append(max_column_width) formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" diff = 0 def make_row(left_char, middle_char, right_char): return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" separator = make_row("├", "┼", "┤") if len(title) > sum(column_widths): diff = abs(len(title) - len(separator)) column_widths[-1] += diff # Update with diff separator = make_row("├", "┼", "┤") initial_rows = [ make_row("┌", in_between, "┐"), f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", make_row("├", "┬", "┤"), ] table = "\n".join(initial_rows) + "\n" column_widths[-1] += diff centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] table += f"{pattern % tuple(centered_line)}\n{separator}\n" for i, line in enumerate(rows): centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] table += f"{pattern % tuple(centered_line)}\n" table += f'└{"┴".join([in_between * n for n in column_widths])}┘' return table def estimate_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("estimate-memory") else: parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") parser.add_argument( "--library_name", type=str, help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", choices=["timm", "transformers"], ) parser.add_argument( "--dtypes", type=str, nargs="+", default=["float32", "float16", "int8", "int4"], help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", choices=["float32", "float16", "int8", "int4"], ) parser.add_argument( "--trust_remote_code", action="store_true", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag should only be used for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.""", ) if subparsers is not None: parser.set_defaults(func=estimate_command) return parser def gather_data(args): "Creates an empty model and gathers the data for the sizes" try: model = create_empty_model( args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code ) except (RuntimeError, OSError) as e: library = check_has_model(e) if library != "unknown": raise RuntimeError( f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." ) raise e total_size, largest_layer = calculate_maximum_sizes(model) data = [] for dtype in args.dtypes: dtype_total_size = total_size dtype_largest_layer = largest_layer[0] if dtype == "float16": dtype_total_size /= 2 dtype_largest_layer /= 2 elif dtype == "int8": dtype_total_size /= 4 dtype_largest_layer /= 4 elif dtype == "int4": dtype_total_size /= 8 dtype_largest_layer /= 8 dtype_training_size = dtype_total_size * 4 data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) return data def estimate_command(args): data = gather_data(args) for row in data: for i, item in enumerate(row): if isinstance(item, (int, float)): row[i] = convert_bytes(item) headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] title = f"Memory Usage for loading `{args.model_name}`" table = create_ascii_table(headers, data, title) print(table) def main(): parser = estimate_command_parser() args = parser.parse_args() estimate_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/estimate.py/0
{ "file_path": "accelerate/src/accelerate/commands/estimate.py", "repo_id": "accelerate", "token_count": 4241 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import os from .state import PartialState class MultiProcessAdapter(logging.LoggerAdapter): """ An adapter to assist with logging in multiprocess. `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes or only the main executed one. Default is `main_process_only=True`. Does not require an `Accelerator` object to be created first. """ @staticmethod def _should_log(main_process_only): "Check if log should be performed" state = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def log(self, level, msg, *args, **kwargs): """ Delegates logger call after checking if we should log. Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes or only the main executed one. Default is `True` if not passed Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not break with the previous behavior. `in_order` is ignored if `main_process_only` is passed. """ if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) main_process_only = kwargs.pop("main_process_only", True) in_order = kwargs.pop("in_order", False) if self.isEnabledFor(level): if self._should_log(main_process_only): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) elif in_order: state = PartialState() for i in range(state.num_processes): if i == state.process_index: msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) state.wait_for_everyone() @functools.lru_cache(None) def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) def get_logger(name: str, log_level: str = None): """ Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ``` """ if log_level is None: log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) logger = logging.getLogger(name) if log_level is not None: logger.setLevel(log_level.upper()) logger.root.setLevel(log_level.upper()) return MultiProcessAdapter(logger, {})
accelerate/src/accelerate/logging.py/0
{ "file_path": "accelerate/src/accelerate/logging.py", "repo_id": "accelerate", "token_count": 1789 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test file to ensure that in general certain situational setups for notebooks work. """ import os from pytest import raises from accelerate import PartialState, notebook_launcher from accelerate.test_utils import require_bnb from accelerate.utils import is_bnb_available def basic_function(): # Just prints the PartialState print(f"PartialState:\n{PartialState()}") NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1)) def test_can_initialize(): notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) @require_bnb def test_problematic_imports(): with raises(RuntimeError, match="Please keep these imports"): import bitsandbytes as bnb # noqa: F401 notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) def main(): print("Test basic notebook can be ran") test_can_initialize() if is_bnb_available(): print("Test problematic imports (bnb)") test_problematic_imports() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_notebook.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_notebook.py", "repo_id": "accelerate", "token_count": 502 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device if is_transformers_available(): from transformers.modeling_outputs import ( CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, SequenceClassifierOutput, ) if is_megatron_lm_available(): from megatron import ( get_args, get_num_microbatches, get_tensorboard_writer, get_timers, get_tokenizer, mpu, print_rank_0, print_rank_last, ) from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler from megatron.global_vars import set_global_variables from megatron.initialize import ( _compile_dependencies, _init_autoresume, _set_random_seed, set_jit_fusion_options, write_args_to_tensorboard, ) from megatron.model import BertModel, Float16Module, GPTModel, ModelType, T5Model from megatron.model import DistributedDataParallel as LocalDDP from megatron.model.classification import Classification from megatron.optimizer import get_megatron_optimizer from megatron.schedules import get_forward_backward_func from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor from megatron.text_generation.generation import ( beam_search_and_return_on_first_stage, generate_tokens_probs_and_return_on_first_stage, ) from megatron.tokenizer.tokenizer import _vocab_size_with_padding from megatron.training import get_model, get_optimizer_param_scheduler, training_log from megatron.utils import ( average_losses_across_data_parallel_group, calc_params_l2_norm, get_ltor_masks_and_position_ids, unwrap_model, ) # model utilities def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): """Build the model.""" args = get_args() mode = "pre-training" if args.pretraining_flag else "fine-tuning" if args.rank == 0: print(f"Building {args.model_type_name} model in the {mode} mode.") print( "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." ) if args.model_type_name == "bert": if args.pretraining_flag: num_tokentypes = 2 if args.bert_binary_head else 0 model = BertModel( num_tokentypes=num_tokentypes, add_binary_head=args.bert_binary_head, parallel_output=True, pre_process=pre_process, post_process=post_process, ) else: model = Classification( num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process ) elif args.model_type_name == "gpt": model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process) elif args.model_type_name == "t5": model = T5Model( num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder, ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") return model def prepare_model(accelerator): accelerator.print("Preparing model") args = get_args() if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: raise ValueError( "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." ) custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) else: if args.model_type_name in ("bert", "gpt"): model_type = ModelType.encoder_or_decoder elif args.model_type_name == "t5": model_type = ModelType.encoder_and_decoder if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1: args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2 model = get_model(model_provider_func, model_type) return model # dataloader utilities class MegatronLMDummyDataLoader: """ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training Args: **dataset_kwargs: Megatron data arguments. """ def __init__(self, **dataset_kwargs): parser = argparse.ArgumentParser() parser = _add_data_args(parser) parser = _add_validation_args(parser) data_args = parser.parse_known_args() self.dataset_args = vars(data_args[0]) self.dataset_args.update(dataset_kwargs) self.dataset_args["megatron_dataset_flag"] = True def set_megatron_data_args(self): args = get_args() for key, value in self.dataset_args.items(): setattr(args, key, value) def get_train_valid_test_datasets_provider(self): def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() dataset_args = { "data_prefix": args.data_path, "data_impl": args.data_impl, "splits_string": args.split, "train_valid_test_num_samples": train_val_test_num_samples, "skip_warmup": (not args.mmap_warmup), "seed": args.seed, } if args.model_type_name == "bert": dataset_args.update( { "max_seq_length": args.seq_length, "masked_lm_prob": args.mask_prob, "short_seq_prob": args.short_seq_prob, "binary_head": args.bert_binary_head, } ) elif args.model_type_name == "gpt": dataset_args.update( { "seq_length": args.seq_length, } ) elif args.model_type_name == "t5": dataset_args.update( { "max_seq_length": args.encoder_seq_length, "max_seq_length_dec": args.decoder_seq_length, "masked_lm_prob": args.mask_prob, "short_seq_prob": args.short_seq_prob, "dataset_type": "t5", } ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") if args.model_type_name == "gpt": from megatron.data.gpt_dataset import build_train_valid_test_datasets else: from megatron.data.dataset_utils import build_train_valid_test_datasets train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args) return train_ds, valid_ds, test_ds return train_valid_test_datasets_provider def build_pretraining_data_loader(self, dataset, consumed_samples): if dataset is None: return None args = get_args() micro_batch_size = args.micro_batch_size * args.num_micro_batches # Megatron sampler if args.dataloader_type == "single": batch_sampler = MegatronPretrainingSampler( total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=micro_batch_size, data_parallel_rank=mpu.get_data_parallel_rank(), data_parallel_size=mpu.get_data_parallel_world_size(), ) elif args.dataloader_type == "cyclic": batch_sampler = MegatronPretrainingRandomSampler( dataset, total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=micro_batch_size, data_parallel_rank=mpu.get_data_parallel_rank(), data_parallel_size=mpu.get_data_parallel_world_size(), data_sharding=args.data_sharding, ) else: raise Exception(f"{args.dataloader_type} dataloader type is not supported.") # Torch dataloader. return torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True ) def build_train_valid_test_data_iterators(self): def cyclic_iter(iter): while True: yield from iter args = get_args() (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) print_rank_0("> building train, validation, and test datasets ...") # Backward compatibility, assume fixed batch size. if args.iteration > 0 and args.consumed_train_samples == 0: assert args.train_samples is None, "only backward compatiblity support for iteration-based training" args.consumed_train_samples = args.iteration * args.global_batch_size if args.iteration > 0 and args.consumed_valid_samples == 0: if args.train_samples is None: args.consumed_valid_samples = ( (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size ) # Data loader only on rank 0 of each model parallel group. if mpu.get_tensor_model_parallel_rank() == 0: # Number of train/valid/test samples. if args.train_samples: train_samples = args.train_samples else: train_samples = args.train_iters * args.global_batch_size eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters test_iters = args.eval_iters train_val_test_num_samples = [ train_samples, eval_iters * args.global_batch_size, test_iters * args.global_batch_size, ] print_rank_0(" > datasets target sizes (minimum size):") print_rank_0(f" train: {train_val_test_num_samples[0]}") print_rank_0(f" validation: {train_val_test_num_samples[1]}") print_rank_0(f" test: {train_val_test_num_samples[2]}") # Build the datasets. train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider() train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples) # Build dataloders. train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples) valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples) test_dataloader = self.build_pretraining_data_loader(test_ds, 0) # Flags to know if we need to do training/validation/testing. do_train = train_dataloader is not None and args.train_iters > 0 do_valid = valid_dataloader is not None and args.eval_iters > 0 do_test = test_dataloader is not None and args.eval_iters > 0 # Need to broadcast num_tokens and num_type_tokens. flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)]) else: flags = torch.cuda.LongTensor([0, 0, 0]) # Broadcast num tokens. torch.distributed.broadcast( flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group() ) args.do_train = flags[0].item() args.do_valid = flags[1].item() args.do_test = flags[2].item() # Build iterators. dl_type = args.dataloader_type assert dl_type in ["single", "cyclic"] if train_dataloader is not None: train_data_iterator = ( iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader)) ) else: train_data_iterator = None if valid_dataloader is not None: valid_data_iterator = ( iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader)) ) else: valid_data_iterator = None if test_dataloader is not None: test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader)) else: test_data_iterator = None return train_data_iterator, valid_data_iterator, test_data_iterator def prepare_data_loader(accelerator, dataloader): accelerator.print("Preparing dataloader") args = get_args() if not args.megatron_dataset_flag: from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader args = get_args() micro_batch_size = args.micro_batch_size * args.num_micro_batches kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} if kwargs["batch_size"] is None: if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): kwargs["sampler"].batch_size = micro_batch_size else: del kwargs["sampler"] del kwargs["shuffle"] del kwargs["batch_size"] kwargs["batch_sampler"].batch_size = micro_batch_size else: del kwargs["batch_sampler"] kwargs["batch_size"] = micro_batch_size dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) return prepare_data_loader( dataloader, accelerator.device, num_processes=mpu.get_data_parallel_world_size(), process_index=mpu.get_data_parallel_rank(), split_batches=accelerator.split_batches, put_on_device=True, rng_types=accelerator.rng_types.copy(), dispatch_batches=accelerator.dispatch_batches, ) else: if args.consumed_samples is not None: ( args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples, ) = args.consumed_samples else: args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 ( train_data_iterator, valid_data_iterator, test_data_iterator, ) = dataloader.build_train_valid_test_data_iterators() return train_data_iterator, valid_data_iterator, test_data_iterator # optimizer utilities class MegatronLMOptimizerWrapper(AcceleratedOptimizer): def __init__(self, optimizer): super().__init__(optimizer, device_placement=False, scaler=None) def zero_grad(self, set_to_none=None): pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed def step(self): pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed @property def step_was_skipped(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" return self.optimizer.skipped_iter def prepare_optimizer(accelerator, model): accelerator.print("Preparing optimizer") args = get_args() optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) return optimizer # scheduler utilities class MegatronLMDummyScheduler: """ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training loop when scheduler config is specified in the deepspeed config file. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. total_num_steps (int): Total number of steps. warmup_num_steps (int): Number of steps for warmup. **kwargs: Other arguments. """ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): self.optimizer = optimizer self.total_num_steps = total_num_steps self.warmup_num_steps = warmup_num_steps self.kwargs = kwargs class MegatronLMSchedulerWrapper(AcceleratedScheduler): def __init__(self, scheduler, optimizers): super().__init__(scheduler, optimizers) def step(self, *args, **kwargs): return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed def prepare_scheduler(accelerator, optimizer, scheduler): accelerator.print("Preparing scheduler") scheduler = get_optimizer_param_scheduler(optimizer) return scheduler class AbstractTrainStep(ABC): """Abstract class for batching, forward pass and loss handler.""" def __init__(self, name): super().__init__() self.name = name def get_batch_func(self): pass def get_forward_step_func(self): pass def get_loss_func(self): pass class BertTrainStep(AbstractTrainStep): """ Bert train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, args): super().__init__("BertTrainStep") self.get_batch = self.get_batch_func(args.megatron_dataset_flag) self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels) self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head) if not args.model_return_dict: self.model_output_class = None else: self.model_output_class = SequenceClassifierOutput def get_batch_func(self, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Build the batch.""" # Items and their type. keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = mpu.broadcast_data(keys, data, datatype) # Unpack. tokens = data_b["text"].long() types = data_b["types"].long() sentence_order = data_b["is_random"].long() loss_mask = data_b["loss_mask"].float() lm_labels = data_b["labels"].long() padding_mask = data_b["padding_mask"].long() return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask def get_batch_transformer(data_iterator): """Build the batch.""" data = next(data_iterator) data = send_to_device(data, torch.cuda.current_device()) # Unpack. tokens = data["input_ids"].long() padding_mask = data["attention_mask"].long() if "token_type_ids" in data: types = data["token_type_ids"].long() else: types = None if "labels" in data: lm_labels = data["labels"].long() loss_mask = (data["labels"] != -100).to(torch.float) else: lm_labels = None loss_mask = None if "next_sentence_label" in data: sentence_order = data["next_sentence_label"].long() else: sentence_order = None return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask if megatron_dataset_flag: return get_batch_megatron else: return get_batch_transformer def get_loss_func(self, pretraining_flag, num_labels): def loss_func_pretrain(loss_mask, sentence_order, output_tensor): lm_loss_, sop_logits = output_tensor lm_loss_ = lm_loss_.float() loss_mask = loss_mask.float() lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() if sop_logits is not None: sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) sop_loss = sop_loss.float() loss = lm_loss + sop_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss]) return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]} else: loss = lm_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss]) return loss, {"lm loss": averaged_losses[0]} def loss_func_finetune(labels, logits): if num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)): loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, num_labels), labels.view(-1)) else: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) averaged_losses = average_losses_across_data_parallel_group([loss]) return loss, {"loss": averaged_losses[0]} if pretraining_flag: return loss_func_pretrain else: return loss_func_finetune def get_forward_step_func(self, pretraining_flag, bert_binary_head): def forward_step(data_iterator, model): """Forward step.""" tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator) if not bert_binary_head: types = None # Forward pass through the model. if pretraining_flag: output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels) return output_tensor, partial(self.loss_func, loss_mask, sentence_order) else: logits = model(tokens, padding_mask, tokentype_ids=types) return logits, partial(self.loss_func, labels) return forward_step class GPTTrainStep(AbstractTrainStep): """ GPT train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, args): super().__init__("GPTTrainStep") self.get_batch = self.get_batch_func(args.megatron_dataset_flag) self.loss_func = self.get_loss_func() self.forward_step = self.get_forward_step_func() self.eod_token = args.padded_vocab_size - 1 if args.vocab_file is not None: tokenizer = get_tokenizer() self.eod_token = tokenizer.eod self.reset_position_ids = args.reset_position_ids self.reset_attention_mask = args.reset_attention_mask self.eod_mask_loss = args.eod_mask_loss if not args.model_return_dict: self.model_output_class = None else: self.model_output_class = CausalLMOutputWithCrossAttentions def get_batch_func(self, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Generate a batch""" # Items and their type. keys = ["text"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = mpu.broadcast_data(keys, data, datatype) # Unpack. tokens_ = data_b["text"].long() labels = tokens_[:, 1:].contiguous() tokens = tokens_[:, :-1].contiguous() # Get the masks and postition ids. attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss ) return tokens, labels, loss_mask, attention_mask, position_ids def get_batch_transformer(data_iterator): data = next(data_iterator) data = {"input_ids": data["input_ids"]} data = send_to_device(data, torch.cuda.current_device()) tokens_ = data["input_ids"].long() padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token tokens_ = torch.concat([tokens_, padding], dim=1) labels = tokens_[:, 1:].contiguous() tokens = tokens_[:, :-1].contiguous() # Get the masks and postition ids. attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True ) return tokens, labels, loss_mask, attention_mask, position_ids if megatron_dataset_flag: return get_batch_megatron else: return get_batch_transformer def get_loss_func(self): args = get_args() def loss_func(loss_mask, output_tensor): if args.return_logits: losses, logits = output_tensor else: losses = output_tensor losses = losses.float() loss_mask = loss_mask.view(-1).float() loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() # Reduce loss for logging. averaged_loss = average_losses_across_data_parallel_group([loss]) output_dict = {"lm loss": averaged_loss[0]} if args.return_logits: output_dict.update({"logits": logits}) return loss, output_dict return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) output_tensor = model(tokens, position_ids, attention_mask, labels=labels) return output_tensor, partial(self.loss_func, loss_mask) return forward_step class T5TrainStep(AbstractTrainStep): """ T5 train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, args): super().__init__("T5TrainStep") self.get_batch = self.get_batch_func(args.megatron_dataset_flag) self.loss_func = self.get_loss_func() self.forward_step = self.get_forward_step_func() if not args.model_return_dict: self.model_output_class = None else: self.model_output_class = Seq2SeqLMOutput @staticmethod def attn_mask_postprocess(attention_mask): # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = attention_mask.unsqueeze(2) # [b, s, s] attention_mask_bss = attention_mask_b1s * attention_mask_bs1 # Convert attention mask to binary: extended_attention_mask = attention_mask_bss < 0.5 return extended_attention_mask @staticmethod def get_decoder_mask(seq_length, device): attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)) attention_mask = attention_mask < 0.5 return attention_mask @staticmethod def get_enc_dec_mask(attention_mask, dec_seq_length, device): batch_size, _ = attention_mask.shape # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device) attention_mask_bss = attention_mask_bs1 * attention_mask_b1s extended_attention_mask = attention_mask_bss < 0.5 return extended_attention_mask def get_batch_func(self, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Build the batch.""" keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = mpu.broadcast_data(keys, data, datatype) # Unpack. tokens_enc = data_b["text_enc"].long() tokens_dec = data_b["text_dec"].long() labels = data_b["labels"].long() loss_mask = data_b["loss_mask"].float() enc_mask = data_b["enc_mask"] < 0.5 dec_mask = data_b["dec_mask"] < 0.5 enc_dec_mask = data_b["enc_dec_mask"] < 0.5 return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask def get_batch_transformer(data_iterator): """Build the batch.""" data = next(data_iterator) data = send_to_device(data, torch.cuda.current_device()) tokens_enc = data["input_ids"].long() labels = data["labels"].long() loss_mask = (labels != -100).to(torch.float) if "decoder_input_ids" in data: tokens_dec = data["decoder_input_ids"].long() else: tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long) tokens_dec[..., 1:] = labels[..., :-1].clone() tokens_dec[..., 0] = 0 tokens_dec.masked_fill_(tokens_dec == -100, 0) enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long()) dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device) enc_dec_mask = T5TrainStep.get_enc_dec_mask( data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device ) return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask if megatron_dataset_flag: return get_batch_megatron else: return get_batch_transformer def get_loss_func(self): def loss_func(loss_mask, output_tensor): lm_loss_ = output_tensor.float() lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() loss = lm_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss]) return loss, {"lm loss": averaged_losses[0]} return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch( data_iterator ) # Forward model lm_labels output_tensor = model( tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels ) return output_tensor, partial(self.loss_func, loss_mask) return forward_step # intialize megatron setup def initialize(accelerator, extra_args_provider=None, args_defaults={}): accelerator.print("Initializing Megatron-LM") assert torch.cuda.is_available(), "Megatron requires CUDA." # Parse arguments args = parse_args(extra_args_provider, ignore_unknown_args=True) # Set defaults for key, value in args_defaults.items(): if getattr(args, key, None) is not None: if args.rank == 0: print( f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}", flush=True, ) setattr(args, key, value) if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): assert args.load is not None, "--use-checkpoints-args requires --load argument" load_args_from_checkpoint(args) validate_args(args) # set global args, build tokenizer, and set adlr-autoresume, # tensorboard-writer, and timers. set_global_variables(args) # torch.distributed initialization def finish_mpu_init(): args = get_args() # Pytorch distributed. device_count = torch.cuda.device_count() args.rank = torch.distributed.get_rank() args.world_size = torch.distributed.get_world_size() if device_count > 0: device = args.rank % device_count if args.local_rank is not None: assert args.local_rank == device, "expected local-rank to be the same as rank % device-count." else: args.local_rank = device # Set the tensor model-parallel, pipeline model-parallel, and # data-parallel communicators. if mpu.model_parallel_is_initialized(): print("model parallel is already initialized") else: mpu.initialize_model_parallel( args.tensor_model_parallel_size, args.pipeline_model_parallel_size, args.virtual_pipeline_model_parallel_size, args.pipeline_model_parallel_split_rank, ) # Random seeds for reproducibility. if args.rank == 0: print(f"> setting random seeds to {args.seed} ...") _set_random_seed(args.seed, args.data_parallel_random_init) args = get_args() # Megatron's MPU is the master. Complete initialization right away. finish_mpu_init() # Autoresume. _init_autoresume() # Compile dependencies. _compile_dependencies() # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() args = get_args() args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: args.bert_binary_head = True else: args.bert_binary_head = False args.iteration = 0 class MegatronEngine(torch.nn.Module): """ Megatron-LM model wrapper Args: accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use. model: Megatron-LM model optimizer: Megatron-LM optimizer lr_scheduler: Megatron-LM lr scheduler """ def __init__(self, accelerator, model, optimizer, scheduler): super().__init__() self.module = model self.base_model = model[0] self.optimizer = optimizer self.scheduler = scheduler args = get_args() if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None: self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class( args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs ) elif args.model_type_name == "bert": self.train_step_handler = BertTrainStep(args) elif args.model_type_name == "gpt": self.train_step_handler = GPTTrainStep(args) elif args.model_type_name == "t5": self.train_step_handler = T5TrainStep(args) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") self.optimizer.skipped_iter = False # Tracking loss. self.total_loss_dict = {} self.eval_total_loss_dict = {} self.iteration = 0 self.report_memory_flag = True if args.tensorboard_dir is not None: write_args_to_tensorboard() def train(self): for model_module in self.module: model_module.train() self.log_eval_results() def eval(self): for model_module in self.module: model_module.eval() def train_step(self, **batch_data): """ Training step for Megatron-LM Args: batch_data (:obj:`dict`): The batch data to train on. """ args = get_args() timers = get_timers() if len(batch_data) > 0: data_chunks = [] if args.num_micro_batches > 1: for i in range(0, args.num_micro_batches): data_chunks.append( { k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items() } ) else: data_chunks = [batch_data] if len(self.module) > 1: batch_data_iterator = ( [iter(data_chunks) for _ in range(len(self.module))] if len(batch_data) > 0 else [None] * len(self.module) ) else: batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None # Set grad to zero. if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp: for partition in self.module: partition.zero_grad_buffer() self.optimizer.zero_grad() # Forward pass. forward_backward_func = get_forward_backward_func() losses_reduced = forward_backward_func( self.train_step_handler.forward_step, batch_data_iterator, self.module, self.optimizer, None, forward_only=False, ) # Empty unused memory. if args.empty_unused_memory_level >= 1: torch.cuda.empty_cache() # Reduce gradients. timers("backward-reduce-model-grads").start() self.optimizer.reduce_model_grads(args, timers) timers("backward-reduce-model-grads").stop() # Update parameters. timers("optimizer").start() update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers) timers("optimizer").stop() # Gather params. if update_successful: timers("backward-gather-model-params").start() self.optimizer.gather_model_params(args, timers) timers("backward-gather-model-params").stop() # Update learning rate. if update_successful: if self.scheduler is not None: increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size self.scheduler.step(increment=increment) skipped_iter = 0 else: skipped_iter = 1 self.optimizer.skipped_iter = not update_successful # Empty unused memory. if args.empty_unused_memory_level >= 2: torch.cuda.empty_cache() args.consumed_train_samples += ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # Average loss across microbatches. loss_reduced = {} for key in losses_reduced[0]: losses_reduced_for_key = [x[key] for x in losses_reduced] if len(losses_reduced_for_key[0].shape) == 0: loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) else: loss_reduced[key] = torch.concat(losses_reduced_for_key) return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad return {}, skipped_iter, grad_norm, num_zeros_in_grad def eval_step(self, **batch_data): """ Evaluation step for Megatron-LM Args: batch_data (:obj:`dict`): The batch data to evaluate on. """ args = get_args() data_chunks = [] if args.num_micro_batches > 1: for i in range(0, args.num_micro_batches): data_chunks.append( {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()} ) else: data_chunks = [batch_data] if len(self.module) > 1: batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))] else: batch_data_iterator = iter(data_chunks) forward_backward_func = get_forward_backward_func() loss_dicts = forward_backward_func( self.train_step_handler.forward_step, batch_data_iterator, self.module, optimizer=None, timers=None, forward_only=True, ) # Empty unused memory if args.empty_unused_memory_level >= 1: torch.cuda.empty_cache() args.consumed_valid_samples += ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # Average loss across microbatches. loss_reduced = {} for key in loss_dicts[0]: losses_reduced_for_key = [x[key] for x in loss_dicts] if len(losses_reduced_for_key[0].shape) == 0: loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) else: loss_reduced[key] = torch.concat(losses_reduced_for_key) return loss_reduced else: return {} def forward(self, **batch_data): # During training, we use train_step() # model(**batch_data) performs following operations by delegating it to `self.train_step`: # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism # 2. Set grad to zero. # 3. forward pass and backward pass using Pipeline Parallelism # 4. Empty unused memory. # 5. Reduce gradients. # 6. Update parameters. # 7. Gather params when using Distributed Optimizer (Data Parallelism). # 8. Update learning rate if scheduler is specified. # 9. Empty unused memory. # 10. Average loss across microbatches and across DP ranks. # # During evaluation, we use eval_step() args = get_args() if self.module[0].training: loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data) self.iteration += 1 if args.tensorboard_dir is not None: # Logging. loss_scale = self.optimizer.get_loss_scale().item() params_norm = None if args.log_params_norm: params_norm = calc_params_l2_norm(self.model) self.report_memory_flag = training_log( loss_dict, self.total_loss_dict, self.optimizer.param_groups[0]["lr"], self.iteration, loss_scale, self.report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad, ) else: loss_dict = self.eval_step(**batch_data) if args.tensorboard_dir is not None: for key in loss_dict: self.eval_total_loss_dict[key] = ( self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] ) self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get( key + "_num_iters", torch.cuda.FloatTensor([0.0]) ) + torch.cuda.FloatTensor([1.0]) loss = torch.tensor(0.0, device=args.local_rank) for key in loss_dict: if len(loss_dict[key].shape) == 0: loss += loss_dict[key] logits = None if "logits" in loss_dict: logits = loss_dict["logits"] # loss = reduce(loss) if self.train_step_handler.model_output_class is not None: return self.train_step_handler.model_output_class(loss=loss, logits=logits) return loss def log_eval_results(self): args = get_args() if args.tensorboard_dir is None or self.iteration == 0: return args = get_args() writer = get_tensorboard_writer() string = f"validation loss at iteration {self.iteration} | " for key in self.eval_total_loss_dict: if key.endswith("_num_iters"): continue value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"] string += f"{key} value: {value} | " ppl = math.exp(min(20, value.item())) if args.pretraining_flag: string += f"{key} PPL: {ppl} | " if writer: writer.add_scalar(f"{key} validation", value.item(), self.iteration) if args.pretraining_flag: writer.add_scalar(f"{key} validation ppl", ppl, self.iteration) length = len(string) + 1 print_rank_last("-" * length) print_rank_last(string) print_rank_last("-" * length) self.eval_total_loss_dict = {} def save_checkpoint(self, output_dir): self.log_eval_results() args = get_args() args.save = output_dir torch.distributed.barrier() save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler) torch.distributed.barrier() def load_checkpoint(self, input_dir): args = get_args() args.load = input_dir args.consumed_train_samples = 0 args.consumed_valid_samples = 0 torch.distributed.barrier() iteration = load_checkpoint(self.module, self.optimizer, self.scheduler) torch.distributed.barrier() self.iteration = iteration if args.fp16 and self.iteration == 0: self.optimizer.reload_model_params() def megatron_generate( self, inputs, attention_mask=None, max_length=None, max_new_tokens=None, num_beams=None, temperature=None, top_k=None, top_p=None, length_penalty=None, **kwargs, ): """ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along with sampling. Refer the Megatron-LM repo for more details Args: inputs (torch.Tensor): input ids attention_mask (torch.Tensor, optional): attention mask. Defaults to None. max_length (int, optional): max length of the generated sequence. Defaults to None. Either this or max_new_tokens should be provided. max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None. Either this or max_length should be provided. num_beams (int, optional): number of beams to use for beam search. Defaults to None. temperature (float, optional): temperature for sampling. Defaults to 1.0. top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0. top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0. length_penalty (float, optional): length penalty for beam search. Defaults to None. kwargs: additional key-value arguments """ # checking if required arguments are passed args = get_args() if args.model_type_name != "gpt": raise NotImplementedError("Generate method is not implemented for this model") if args.data_parallel_size > 1: raise ValueError("Generate method requires data parallelism to be 1") if args.sequence_parallel: raise ValueError("Generate method requires sequence parallelism to be False") if args.recompute_granularity is not None: raise ValueError("Checkpoint activations cannot be set for inference") if args.vocab_file is None: raise ValueError("Vocab file is required for inference") # Prepare inputs if max_length is None and max_new_tokens is None: raise ValueError("`max_length` or `max_new_tokens` are required for inference") if temperature is None: temperature = 1.0 elif not (0.0 < temperature <= 100.0): raise ValueError("temperature must be a positive number less than or equal to 100.0") if top_k is None: top_k = 0 elif not (0 <= top_k <= 1000): raise ValueError("top_k must be a positive number less than or equal to 1000") if top_p is None: top_p = 0.0 elif top_p > 0.0 and top_k > 0.0: raise ValueError("top_p and top_k sampling cannot be set together") else: if not (0.0 <= top_p <= 1.0): raise ValueError("top_p must be less than or equal to 1.0") top_p_decay = kwargs.get("top_p_decay", 0.0) if not (0.0 <= top_p_decay <= 1.0): raise ValueError("top_p_decay must be less than or equal to 1.0") top_p_bound = kwargs.get("top_p_bound", 0.0) if not (0.0 <= top_p_bound <= 1.0): raise ValueError("top_p_bound must be less than or equal to 1.0") add_BOS = kwargs.get("add_BOS", False) if not (isinstance(add_BOS, bool)): raise ValueError("add_BOS must be a boolean") beam_width = num_beams if beam_width is not None: if not isinstance(beam_width, int): raise ValueError("beam_width must be an integer") if beam_width < 1: raise ValueError("beam_width must be greater than 0") if inputs.shape[0] > 1: return "When doing beam_search, batch size must be 1" tokenizer = get_tokenizer() stop_token = kwargs.get("stop_token", tokenizer.eod) if stop_token is not None: if not isinstance(stop_token, int): raise ValueError("stop_token must be an integer") if length_penalty is None: length_penalty = 1.0 sizes_list = None prompts_tokens_tensor = None prompts_length_tensor = None if torch.distributed.get_rank() == 0: # Get the prompts length. if attention_mask is None: prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0]) else: prompts_length_tensor = attention_mask.sum(axis=-1).cuda() if max_new_tokens is None: max_new_tokens = max_length - inputs.shape[1] if max_new_tokens <= 0: raise ValueError("max_new_tokens must be greater than 0") if add_BOS: max_length = max_new_tokens + inputs.shape[1] + 1 # making sure that `max_length` is a multiple of 4 to leverage fused kernels max_length = 4 * math.ceil(max_length / 4) max_new_tokens = max_length - (inputs.shape[1] + 1) padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) prompts_tokens_tensor = torch.concat( [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1 ) else: # making sure that `max_length` is a multiple of 4 to leverage fused kernels max_length = max_new_tokens + inputs.shape[1] max_length = 4 * math.ceil(max_length / 4) max_new_tokens = max_length - inputs.shape[1] padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1) # We need the sizes of these tensors for the boradcast sizes_list = [ prompts_tokens_tensor.size(0), # Batch size prompts_tokens_tensor.size(1), ] # Sequence lenght # First, broadcast the sizes. sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0) # Now that we have the sizes, we can boradcast the tokens # and length tensors. sizes = sizes_tensor.tolist() context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0) context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0) # Run the inference random_seed = kwargs.get("random_seed", 0) torch.random.manual_seed(random_seed) unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module)) if beam_width is not None: tokens, _ = beam_search_and_return_on_first_stage( unwrapped_model, context_tokens_tensor, context_length_tensor, beam_width, stop_token=stop_token, num_return_gen=1, length_penalty=length_penalty, ) else: tokens, _, _ = generate_tokens_probs_and_return_on_first_stage( unwrapped_model, context_tokens_tensor, context_length_tensor, return_output_log_probs=False, top_k=top_k, top_p=top_p, top_p_decay=top_p_decay, top_p_bound=top_p_bound, temperature=temperature, use_eod_token_for_early_termination=True, ) return tokens # other utilities def avg_losses_across_data_parallel_group(losses): """ Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group. """ return average_losses_across_data_parallel_group(losses) def gather_across_data_parallel_groups(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks. """ def _gpu_gather_one(tensor): if tensor.ndim == 0: tensor = tensor.clone()[None] output_tensors = [ torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) ] torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) return torch.cat(output_tensors, dim=0) return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
accelerate/src/accelerate/utils/megatron_lm.py/0
{ "file_path": "accelerate/src/accelerate/utils/megatron_lm.py", "repo_id": "accelerate", "token_count": 26949 }
9
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import tempfile from unittest.mock import patch import pytest import torch from parameterized import parameterized from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights, load_checkpoint_and_dispatch from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_device, require_non_cpu, slow, torch_device from accelerate.test_utils.testing import AccelerateTestCase, require_non_torch_xla from accelerate.utils import patch_environment from accelerate.utils.modeling import load_checkpoint_in_model def create_components(): model = torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3]))) valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6]))) return model, optimizer, scheduler, train_dl, valid_dl class ModelForTest(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(3, 4) self.batchnorm = torch.nn.BatchNorm1d(4) self.linear2 = torch.nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) def get_signature(model): return (model.weight.abs().sum() + model.bias.abs().sum()).item() def load_random_weights(model): state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict() model.load_state_dict(state) def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param.args[0] is True else "use_pytorch" return f"{func.__name__}_{param_based_name}" class AcceleratorTester(AccelerateTestCase): # Should be removed after 1.0.0 release def test_deprecated_values(self): # Test defaults accelerator = Accelerator() assert accelerator.split_batches is False, "split_batches should be False by default" assert accelerator.dispatch_batches is None, "dispatch_batches should be None by default" assert accelerator.even_batches is True, "even_batches should be True by default" assert accelerator.use_seedable_sampler is False, "use_seedable_sampler should be False by default" # Pass some arguments only with pytest.warns(FutureWarning) as cm: accelerator = Accelerator( dispatch_batches=True, split_batches=False, ) deprecation_warning = str(cm.list[0].message) assert accelerator.split_batches is False, "split_batches should be True" assert accelerator.dispatch_batches is True, "dispatch_batches should be True" assert accelerator.even_batches is True, "even_batches should be True by default" assert accelerator.use_seedable_sampler is False, "use_seedable_sampler should be False by default" assert "dispatch_batches" in deprecation_warning assert "split_batches" in deprecation_warning assert "even_batches" not in deprecation_warning assert "use_seedable_sampler" not in deprecation_warning # Pass in some arguments, but with their defaults with pytest.warns(FutureWarning) as cm: accelerator = Accelerator( even_batches=True, use_seedable_sampler=False, ) deprecation_warning = str(cm.list[0].message) assert "even_batches" in deprecation_warning assert accelerator.even_batches is True assert "use_seedable_sampler" in deprecation_warning assert accelerator.use_seedable_sampler is False @require_non_cpu def test_accelerator_can_be_reinstantiated(self): _ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type in ["cuda", "mps", "npu", "xpu", "xla"] with self.assertRaises(ValueError): _ = Accelerator(cpu=True) def test_mutable_states(self): accelerator = Accelerator() state = GradientState() assert state.num_steps == 1 accelerator.gradient_accumulation_steps = 4 assert state.num_steps == 4 assert state.sync_gradients is True accelerator.sync_gradients = False assert state.sync_gradients is False GradientState._reset_state() def test_prepared_objects_are_referenced(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert prepared_model in accelerator._models assert prepared_optimizer in accelerator._optimizers assert prepared_scheduler in accelerator._schedulers assert prepared_train_dl in accelerator._dataloaders assert prepared_valid_dl in accelerator._dataloaders def test_free_memory_dereferences_prepared_components(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) accelerator.free_memory() assert len(accelerator._models) == 0 assert len(accelerator._optimizers) == 0 assert len(accelerator._schedulers) == 0 assert len(accelerator._dataloaders) == 0 @require_non_torch_xla def test_env_var_device(self): """Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device.""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*args, **kwargs): pass with patch("torch.cuda.set_device", noop), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"): accelerator = Accelerator() assert str(accelerator.state.device) == "cuda:64" @parameterized.expand((True, False), name_func=parameterized_custom_name_func) def test_save_load_model(self, use_safetensors): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # make sure loaded weights match accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model(self, use_safetensors): accelerator = Accelerator() model = torch.nn.Linear(10, 10) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model_offload(self, use_safetensors): accelerator = Accelerator() device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} inputs = torch.randn(3, 3) model = ModelForTest() expected = model(inputs) with tempfile.TemporaryDirectory() as tmp_dir: accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load and save offloaded model load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir) accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load weights that were saved from the offloaded model load_checkpoint_and_dispatch(model, tmp_dir) output = model(inputs) assert torch.allclose(expected, output, atol=1e-5) @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_load_model_with_hooks(self, use_safetensors): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) # saving hook def save_config(models, weights, output_dir): config = {"class_name": models[0].__class__.__name__} with open(os.path.join(output_dir, "data.json"), "w") as f: json.dump(config, f) # loading hook def load_config(models, input_dir): with open(os.path.join(input_dir, "data.json")) as f: config = json.load(f) models[0].class_name = config["class_name"] save_hook = accelerator.register_save_state_pre_hook(save_config) load_hook = accelerator.register_load_state_pre_hook(load_config) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is loaded from config assert model.class_name == model.__class__.__name__ # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks removed load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks removed accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is NOT loaded from config assert model.class_name != model.__class__.__name__ def test_accelerator_none(self): """Just test that passing None to accelerator.prepare() works.""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = None # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert dummy_obj is None def test_is_accelerator_prepared(self): """Checks that `_is_accelerator_prepared` is set properly""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = [1, 2, 3] # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert ( getattr(dummy_obj, "_is_accelerate_prepared", False) is False ), "Dummy object should have `_is_accelerate_prepared` set to `True`" assert ( getattr(model, "_is_accelerate_prepared", False) is True ), "Model is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(optimizer, "_is_accelerate_prepared", False) is True ), "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(scheduler, "_is_accelerate_prepared", False) is True ), "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(train_dl, "_is_accelerate_prepared", False) is True ), "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(valid_dl, "_is_accelerate_prepared", False) is True ), "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" @require_non_torch_xla @slow @require_bnb def test_accelerator_bnb(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map={"": 0}, ) accelerator = Accelerator() # This should work model = accelerator.prepare(model) @require_non_torch_xla @slow @require_bnb def test_accelerator_bnb_cpu_error(self): """Tests that the accelerator can be used with the BNB library. This should fail as we are trying to load a model that is loaded between cpu and gpu""" from transformers import AutoModelForCausalLM accelerator = Accelerator() with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = "cpu" model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True ) # This should not work and get value error with self.assertRaises(ValueError): model = accelerator.prepare(model) @require_non_torch_xla @slow @require_bnb @require_multi_device def test_accelerator_bnb_multi_device(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM if torch_device == "cuda": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_GPU} elif torch_device == "npu": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_NPU} else: raise ValueError(f"{torch_device} is not supported in test_accelerator_bnb_multi_device.") with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should not work and get value error with self.assertRaises(ValueError): _ = accelerator.prepare(model) PartialState._reset_state() @require_non_torch_xla @slow @require_bnb @require_multi_device def test_accelerator_bnb_multi_device_no_distributed(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should work _ = accelerator.prepare(model) @require_non_cpu def test_accelerator_cpu_flag_prepare(self): model = torch.nn.Linear(10, 10) sgd = torch.optim.SGD(model.parameters(), lr=0.01) accelerator = Accelerator(cpu=True) _ = accelerator.prepare(sgd) @require_non_cpu def test_can_unwrap_model_fp16(self): # test for a regression introduced in #872 # before the fix, after unwrapping with keep_fp32_wrapper=False, there would be the following error: # Linear.forward() missing 1 required positional argument: 'input' model = create_components()[0] accelerator = Accelerator(mixed_precision="fp16") inputs = torch.randn(10, 2).to(torch_device) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) def test_can_unwrap_model(self): model = create_components()[0] accelerator = Accelerator(mixed_precision="no", cpu=True) inputs = torch.randn(10, 2) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs)
accelerate/tests/test_accelerator.py/0
{ "file_path": "accelerate/tests/test_accelerator.py", "repo_id": "accelerate", "token_count": 7862 }
10
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from accelerate import debug_launcher from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, device_count, execute_subprocess_async, path_in_accelerate_package, require_cpu, require_huggingface_suite, require_multi_device, require_single_device, ) from accelerate.utils import patch_environment @require_huggingface_suite class MetricTester(unittest.TestCase): def setUp(self): self.test_file_path = path_in_accelerate_package("test_utils", "scripts", "external_deps", "test_metrics.py") from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 self.test_metrics = test_metrics @require_cpu def test_metric_cpu_noop(self): debug_launcher(self.test_metrics.main, num_processes=1) @require_cpu def test_metric_cpu_multi(self): debug_launcher(self.test_metrics.main) @require_single_device def test_metric_accelerator(self): self.test_metrics.main() @require_multi_device def test_metric_accelerator_multi(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.test_file_path] with patch_environment(omp_num_threads=1, ACCELERATE_LOG_LEVEL="INFO"): execute_subprocess_async(cmd)
accelerate/tests/test_metrics.py/0
{ "file_path": "accelerate/tests/test_metrics.py", "repo_id": "accelerate", "token_count": 693 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate hf_table_format = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) failed = [] group_info = [] no_error_payload = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} payload = [ { "type": "header", "text": { "type": "plain_text", "text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", "emoji": True, }, } ] total_num_failed = 0 for log in Path().glob("*.log"): section_num_failed = 0 with open(log) as f: for line in f: line = json.loads(line) if line.get("nodeid", "") != "": test = line["nodeid"] if line.get("duration", None) is not None: duration = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) failed = [] log.unlink() message = "" all_files2failed = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" failed_table = [] files2failed = {} for test in failed_tests: data = test[0].split("::") data[0] = data[0].split("/")[-1] if data[0] not in files2failed: files2failed[data[0]] = [data[1:]] else: files2failed[data[0]] += [data[1:]] failed_table.append(data) files = [test[0] for test in failed_table] individual_files = list(set(files)) # Count number of instances in failed_tests table = [] for file in individual_files: table.append([file, len(files2failed[file])]) failed_table = tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += f"\n```\n{failed_table}\n```" all_files2failed.append(files2failed) if len(message) > 3000: err = "Too many failed tests, please see the full report in the Action results." offset = len(err) + 10 message = message[: 3000 - offset] + f"\n...\n```\n{err}" print(f"### {message}") else: message = "No failed tests! 🤗" print(f"## {message}") payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient client = WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": md_report = { "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) action_button = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) response = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) ts = response.data["ts"] for failed_file in all_files2failed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name test_class = "" for i, row in enumerate(test_failures): if row[0] != test_class: test_class = row[0] else: test_failures[i][0] = "" payload = { "type": "section", "text": { "type": "mrkdwn", "text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
accelerate/utils/log_reports.py/0
{ "file_path": "accelerate/utils/log_reports.py", "repo_id": "accelerate", "token_count": 3046 }
12
# Model arguments model_name_or_path: BramVanroy/gpt2-sft-dutch model_revision: main torch_dtype: bfloat16 # Data training arguments # For definitions, see: src/h4/training/config.py dataset_mixer: BramVanroy/ultra_feedback_dutch: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # DPOTrainer arguments bf16: true beta: 0.1 do_eval: true evaluation_strategy: steps eval_steps: 100 gradient_accumulation_steps: 8 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: gpt2-dpo-dutch learning_rate: 5.0e-7 log_level: info logging_steps: 10 lr_scheduler_type: cosine max_length: 1024 max_prompt_length: 512 num_train_epochs: 1 optim: adamw_torch output_dir: data/gpt2-dpo-dutch per_device_train_batch_size: 8 per_device_eval_batch_size: 8 push_to_hub: true save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1 report_to: - wandb
alignment-handbook/recipes/gpt2-nl/dpo/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/gpt2-nl/dpo/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 375 }
13
# Model arguments model_name_or_path: HuggingFaceH4/zephyr-7b-gemma-sft-v0.1 torch_dtype: bfloat16 # Data training arguments # For definitions, see: src/h4/training/config.py dataset_mixer: argilla/dpo-mix-7k: 1.0 dataset_splits: - train - test preprocessing_num_workers: 12 # DPOTrainer arguments bf16: true beta: 0.05 do_eval: true evaluation_strategy: steps eval_steps: 100 gradient_accumulation_steps: 8 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: zephyr-7b-gemma-dpo learning_rate: 5.0e-7 log_level: info logging_steps: 10 lr_scheduler_type: cosine max_length: 1024 max_prompt_length: 512 num_train_epochs: 2 optim: adamw_torch output_dir: data/zephyr-7b-gemma-dpo per_device_train_batch_size: 2 per_device_eval_batch_size: 4 push_to_hub: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-gemma/dpo/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-gemma/dpo/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 366 }
14
# Model arguments model_name_or_path: mistralai/Mistral-7B-v0.1 model_revision: main torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments dataset_mixer: HuggingFaceH4/ultrachat_200k: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 2 gradient_checkpointing: true hub_model_id: zephyr-7b-sft-full hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/zephyr-7b-sft-full overwrite_output_dir: true per_device_eval_batch_size: 16 per_device_train_batch_size: 32 push_to_hub: true remove_unused_columns: true report_to: - tensorboard save_strategy: "no" save_total_limit: null seed: 42
alignment-handbook/tests/fixtures/config_sft_full.yaml/0
{ "file_path": "alignment-handbook/tests/fixtures/config_sft_full.yaml", "repo_id": "alignment-handbook", "token_count": 357 }
15
# candle [![discord server](https://dcbadge.vercel.app/api/server/hugging-face-879548962464493619)](https://discord.gg/hugging-face-879548962464493619) [![Latest version](https://img.shields.io/crates/v/candle-core.svg)](https://crates.io/crates/candle-core) [![Documentation](https://docs.rs/candle-core/badge.svg)](https://docs.rs/candle-core) ![License](https://img.shields.io/crates/l/candle-core.svg) Candle is a minimalist ML framework for Rust with a focus on performance (including GPU support) and ease of use. Try our online demos: [whisper](https://huggingface.co/spaces/lmz/candle-whisper), [LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2), [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm), [yolo](https://huggingface.co/spaces/lmz/candle-yolo), [Segment Anything](https://huggingface.co/spaces/radames/candle-segment-anything-wasm). ## Get started Make sure that you have [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) correctly installed as described in [**Installation**](https://huggingface.github.io/candle/guide/installation.html). Let's see how to run a simple matrix multiplication. Write the following to your `myapp/src/main.rs` file: ```rust use candle_core::{Device, Tensor}; fn main() -> Result<(), Box<dyn std::error::Error>> { let device = Device::Cpu; let a = Tensor::randn(0f32, 1., (2, 3), &device)?; let b = Tensor::randn(0f32, 1., (3, 4), &device)?; let c = a.matmul(&b)?; println!("{c}"); Ok(()) } ``` `cargo run` should display a tensor of shape `Tensor[[2, 4], f32]`. Having installed `candle` with Cuda support, simply define the `device` to be on GPU: ```diff - let device = Device::Cpu; + let device = Device::new_cuda(0)?; ``` For more advanced examples, please have a look at the following section. ## Check out our examples These online demos run entirely in your browser: - [yolo](https://huggingface.co/spaces/lmz/candle-yolo): pose estimation and object recognition. - [whisper](https://huggingface.co/spaces/lmz/candle-whisper): speech recognition. - [LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2): text generation. - [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm): text generation. - [Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm): text generation. - [Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm): Image segmentation. - [BLIP](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning): image captioning. We also provide a some command line based examples using state of the art models: - [LLaMA and LLaMA-v2](./candle-examples/examples/llama/): general LLM, includes the SOLAR-10.7B variant. - [Falcon](./candle-examples/examples/falcon/): general LLM. - [Gemma](./candle-examples/examples/gemma/): 2b and 7b general LLMs from Google Deepmind. - [Phi-1, Phi-1.5, and Phi-2](./candle-examples/examples/phi/): 1.3b and 2.7b general LLMs with performance on par with LLaMA-v2 7b. - [StableLM-3B-4E1T](./candle-examples/examples/stable-lm/): a 3b general LLM pre-trained on 1T tokens of English and code datasets. Also supports StableLM-2, a 1.6b LLM trained on 2T tokens, as well as the code variants. - [Mamba](./candle-examples/examples/mamba/): an inference only implementation of the Mamba state space model. - [Mistral7b-v0.1](./candle-examples/examples/mistral/): a 7b general LLM with better performance than all publicly available 13b models as of 2023-09-28. - [Mixtral8x7b-v0.1](./candle-examples/examples/mixtral/): a sparse mixture of experts 8x7b general LLM with better performance than a Llama 2 70B model with much faster inference. - [StarCoder](./candle-examples/examples/bigcode/) and [StarCoder2](./candle-examples/examples/starcoder2/): LLM specialized to code generation. - [Qwen1.5](./candle-examples/examples/qwen/): Bilingual (English/Chinese) LLMs. - [RWKV v5 and v6](./candle-examples/examples/rwkv/): An RNN with transformer level LLM performance. - [Replit-code-v1.5](./candle-examples/examples/replit-code/): a 3.3b LLM specialized for code completion. - [Yi-6B / Yi-34B](./candle-examples/examples/yi/): two bilingual (English/Chinese) general LLMs with 6b and 34b parameters. - [Quantized LLaMA](./candle-examples/examples/quantized/): quantized version of the LLaMA model using the same quantization techniques as [llama.cpp](https://github.com/ggerganov/llama.cpp). <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/quantized/assets/aoc.gif" width="600"> - [Stable Diffusion](./candle-examples/examples/stable-diffusion/): text to image generative model, support for the 1.5, 2.1, SDXL 1.0 and Turbo versions. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" width="200"> - [Wuerstchen](./candle-examples/examples/wuerstchen/): another text to image generative model. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" width="200"> - [yolo-v3](./candle-examples/examples/yolo-v3/) and [yolo-v8](./candle-examples/examples/yolo-v8/): object detection and pose estimation models. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.od.jpg" width="200"><img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.pose.jpg" width="200"> - [segment-anything](./candle-examples/examples/segment-anything/): image segmentation model with prompt. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/sam_merged.jpg" width="200"> - [SegFormer](./candle-examples/examples/segformer/): transformer based semantic segmantation model. - [Whisper](./candle-examples/examples/whisper/): speech recognition model. - [EnCodec](./candle-examples/examples/encodec/): high-quality audio compression model using residual vector quantization. - [MetaVoice](./candle-examples/examples/metavoice/): foundational model for text-to-speech. - [T5](./candle-examples/examples/t5), [Bert](./candle-examples/examples/bert/), [JinaBert](./candle-examples/examples/jina-bert/) : useful for sentence embeddings. - [DINOv2](./candle-examples/examples/dinov2/): computer vision model trained using self-supervision (can be used for imagenet classification, depth evaluation, segmentation). - [VGG](./candle-examples/examples/vgg/), [RepVGG](./candle-examples/examples/repvgg): computer vision models. - [BLIP](./candle-examples/examples/blip/): image to text model, can be used to generate captions for an image. - [TrOCR](./candle-examples/examples/trocr/): a transformer OCR model, with dedicated submodels for hand-writing and printed recognition. - [Marian-MT](./candle-examples/examples/marian-mt/): neural machine translation model, generates the translated text from the input text. Run them using commands like: ``` cargo run --example quantized --release ``` In order to use **CUDA** add `--features cuda` to the example command line. If you have cuDNN installed, use `--features cudnn` for even more speedups. There are also some wasm examples for whisper and [llama2.c](https://github.com/karpathy/llama2.c). You can either build them with `trunk` or try them online: [whisper](https://huggingface.co/spaces/lmz/candle-whisper), [llama2](https://huggingface.co/spaces/lmz/candle-llama2), [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm), [Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm), [Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm). For LLaMA2, run the following command to retrieve the weight files and start a test server: ```bash cd candle-wasm-examples/llama2-c wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/model.bin wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/tokenizer.json trunk serve --release --port 8081 ``` And then head over to [http://localhost:8081/](http://localhost:8081/). <!--- ANCHOR: useful_libraries ---> ## Useful External Resources - [`candle-tutorial`](https://github.com/ToluClassics/candle-tutorial): A very detailed tutorial showing how to convert a PyTorch model to Candle. - [`candle-lora`](https://github.com/EricLBuehler/candle-lora): Efficient and ergonomic LoRA implementation for Candle. `candle-lora` has out-of-the-box LoRA support for many models from Candle, which can be found [here](https://github.com/EricLBuehler/candle-lora/tree/master/candle-lora-transformers/examples). - [`optimisers`](https://github.com/KGrewal1/optimisers): A collection of optimisers including SGD with momentum, AdaGrad, AdaDelta, AdaMax, NAdam, RAdam, and RMSprop. - [`candle-vllm`](https://github.com/EricLBuehler/candle-vllm): Efficient platform for inference and serving local LLMs including an OpenAI compatible API server. - [`candle-ext`](https://github.com/mokeyish/candle-ext): An extension library to Candle that provides PyTorch functions not currently available in Candle. - [`kalosm`](https://github.com/floneum/floneum/tree/master/interfaces/kalosm): A multi-modal meta-framework in Rust for interfacing with local pre-trained models with support for controlled generation, custom samplers, in-memory vector databases, audio transcription, and more. - [`candle-sampling`](https://github.com/EricLBuehler/candle-sampling): Sampling techniques for Candle. - [`gpt-from-scratch-rs`](https://github.com/jeroenvlek/gpt-from-scratch-rs): A port of Andrej Karpathy's _Let's build GPT_ tutorial on YouTube showcasing the Candle API on a toy problem. - [`candle-einops`](https://github.com/tomsanbear/candle-einops): A pure rust implementation of the python [einops](https://github.com/arogozhnikov/einops) library. If you have an addition to this list, please submit a pull request. <!--- ANCHOR_END: useful_libraries ---> <!--- ANCHOR: features ---> ## Features - Simple syntax, looks and feels like PyTorch. - Model training. - Embed user-defined ops/kernels, such as [flash-attention v2](https://github.com/huggingface/candle/blob/89ba005962495f2bfbda286e185e9c3c7f5300a3/candle-flash-attn/src/lib.rs#L152). - Backends. - Optimized CPU backend with optional MKL support for x86 and Accelerate for macs. - CUDA backend for efficiently running on GPUs, multiple GPU distribution via NCCL. - WASM support, run your models in a browser. - Included models. - Language Models. - LLaMA v1 and v2 with variants such as SOLAR-10.7B. - Falcon. - StarCoder, StarCoder2. - Phi 1, 1.5, and 2. - Mamba, Minimal Mamba - Gemma 2b and 7b. - Mistral 7b v0.1. - Mixtral 8x7b v0.1. - StableLM-3B-4E1T, StableLM-2-1.6B, Stable-Code-3B. - Replit-code-v1.5-3B. - Bert. - Yi-6B and Yi-34B. - Qwen1.5. - RWKV v5 and v6. - Quantized LLMs. - Llama 7b, 13b, 70b, as well as the chat and code variants. - Mistral 7b, and 7b instruct. - Mixtral 8x7b. - Zephyr 7b a and b (Mistral-7b based). - OpenChat 3.5 (Mistral-7b based). - Text to text. - T5 and its variants: FlanT5, UL2, MADLAD400 (translation), CoEdit (Grammar correction). - Marian MT (Machine Translation). - Text to image. - Stable Diffusion v1.5, v2.1, XL v1.0. - Wurstchen v2. - Image to text. - BLIP. - TrOCR. - Audio. - Whisper, multi-lingual speech-to-text. - EnCodec, audio compression model. - MetaVoice-1B, text-to-speech model. - Computer Vision Models. - DINOv2, ConvMixer, EfficientNet, ResNet, ViT, VGG, RepVGG, ConvNeXT, ConvNeXTv2, MobileOne, EfficientVit (MSRA). - yolo-v3, yolo-v8. - Segment-Anything Model (SAM). - SegFormer. - File formats: load models from safetensors, npz, ggml, or PyTorch files. - Serverless (on CPU), small and fast deployments. - Quantization support using the llama.cpp quantized types. <!--- ANCHOR_END: features ---> ## How to use <!--- ANCHOR: cheatsheet ---> Cheatsheet: | | Using PyTorch | Using Candle | |------------|------------------------------------------|------------------------------------------------------------------| | Creation | `torch.Tensor([[1, 2], [3, 4]])` | `Tensor::new(&[[1f32, 2.], [3., 4.]], &Device::Cpu)?` | | Creation | `torch.zeros((2, 2))` | `Tensor::zeros((2, 2), DType::F32, &Device::Cpu)?` | | Indexing | `tensor[:, :4]` | `tensor.i((.., ..4))?` | | Operations | `tensor.view((2, 2))` | `tensor.reshape((2, 2))?` | | Operations | `a.matmul(b)` | `a.matmul(&b)?` | | Arithmetic | `a + b` | `&a + &b` | | Device | `tensor.to(device="cuda")` | `tensor.to_device(&Device::new_cuda(0)?)?` | | Dtype | `tensor.to(dtype=torch.float16)` | `tensor.to_dtype(&DType::F16)?` | | Saving | `torch.save({"A": A}, "model.bin")` | `candle::safetensors::save(&HashMap::from([("A", A)]), "model.safetensors")?` | | Loading | `weights = torch.load("model.bin")` | `candle::safetensors::load("model.safetensors", &device)` | <!--- ANCHOR_END: cheatsheet ---> ## Structure - [candle-core](./candle-core): Core ops, devices, and `Tensor` struct definition - [candle-nn](./candle-nn/): Tools to build real models - [candle-examples](./candle-examples/): Examples of using the library in realistic settings - [candle-kernels](./candle-kernels/): CUDA custom kernels - [candle-datasets](./candle-datasets/): Datasets and data loaders. - [candle-transformers](./candle-transformers): transformers-related utilities. - [candle-flash-attn](./candle-flash-attn): Flash attention v2 layer. - [candle-onnx](./candle-onnx/): ONNX model evaluation. ## FAQ ### Why should I use Candle? Candle's core goal is to *make serverless inference possible*. Full machine learning frameworks like PyTorch are very large, which makes creating instances on a cluster slow. Candle allows deployment of lightweight binaries. Secondly, Candle lets you *remove Python* from production workloads. Python overhead can seriously hurt performance, and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches. Finally, Rust is cool! A lot of the HF ecosystem already has Rust crates, like [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers). ### Other ML frameworks - [dfdx](https://github.com/coreylowman/dfdx) is a formidable crate, with shapes being included in types. This prevents a lot of headaches by getting the compiler to complain about shape mismatches right off the bat. However, we found that some features still require nightly, and writing code can be a bit daunting for non rust experts. We're leveraging and contributing to other core crates for the runtime so hopefully both crates can benefit from each other. - [burn](https://github.com/burn-rs/burn) is a general crate that can leverage multiple backends so you can choose the best engine for your workload. - [tch-rs](https://github.com/LaurentMazare/tch-rs.git) Bindings to the torch library in Rust. Extremely versatile, but they bring in the entire torch library into the runtime. The main contributor of `tch-rs` is also involved in the development of `candle`. ### Common Errors #### Missing symbols when compiling with the mkl feature. If you get some missing symbols when compiling binaries/tests using the mkl or accelerate features, e.g. for mkl you get: ``` = note: /usr/bin/ld: (....o): in function `blas::sgemm': .../blas-0.22.0/src/lib.rs:1944: undefined reference to `sgemm_' collect2: error: ld returned 1 exit status = note: some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified = note: use the `-l` flag to specify native libraries to link = note: use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo ``` or for accelerate: ``` Undefined symbols for architecture arm64: "_dgemm_", referenced from: candle_core::accelerate::dgemm::h1b71a038552bcabe in libcandle_core... "_sgemm_", referenced from: candle_core::accelerate::sgemm::h2cf21c592cba3c47 in libcandle_core... ld: symbol(s) not found for architecture arm64 ``` This is likely due to a missing linker flag that was needed to enable the mkl library. You can try adding the following for mkl at the top of your binary: ```rust extern crate intel_mkl_src; ``` or for accelerate: ```rust extern crate accelerate_src; ``` #### Cannot run the LLaMA examples: access to source requires login credentials ``` Error: request error: https://huggingface.co/meta-llama/Llama-2-7b-hf/resolve/main/tokenizer.json: status code 401 ``` This is likely because you're not permissioned for the LLaMA-v2 model. To fix this, you have to register on the huggingface-hub, accept the [LLaMA-v2 model conditions](https://huggingface.co/meta-llama/Llama-2-7b-hf), and set up your authentication token. See issue [#350](https://github.com/huggingface/candle/issues/350) for more details. #### Missing cute/cutlass headers when compiling flash-attn ``` In file included from kernels/flash_fwd_launch_template.h:11:0, from kernels/flash_fwd_hdim224_fp16_sm80.cu:5: kernels/flash_fwd_kernel.h:8:10: fatal error: cute/algorithm/copy.hpp: No such file or directory #include <cute/algorithm/copy.hpp> ^~~~~~~~~~~~~~~~~~~~~~~~~ compilation terminated. Error: nvcc error while compiling: ``` [cutlass](https://github.com/NVIDIA/cutlass) is provided as a git submodule so you may want to run the following command to check it in properly. ```bash git submodule update --init ``` #### Compiling with flash-attention fails ``` /usr/include/c++/11/bits/std_function.h:530:146: error: parameter packs not expanded with ‘...’: ``` This is a bug in gcc-11 triggered by the Cuda compiler. To fix this, install a different, supported gcc version - for example gcc-10, and specify the path to the compiler in the CANDLE_NVCC_CCBIN environment variable. ``` env CANDLE_NVCC_CCBIN=/usr/lib/gcc/x86_64-linux-gnu/10 cargo ... ``` #### Linking error on windows when running rustdoc or mdbook tests ``` Couldn't compile the test. ---- .\candle-book\src\inference\hub.md - Using_the_hub::Using_in_a_real_model_ (line 50) stdout ---- error: linking with `link.exe` failed: exit code: 1181 //very long chain of linking = note: LINK : fatal error LNK1181: cannot open input file 'windows.0.48.5.lib' ``` Make sure you link all native libraries that might be located outside a project target, e.g., to run mdbook tests, you should run: ``` mdbook test candle-book -L .\target\debug\deps\ ` -L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.42.2\lib ` -L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.48.5\lib ``` #### Extremely slow model load time with WSL This may be caused by the models being loaded from `/mnt/c`, more details on [stackoverflow](https://stackoverflow.com/questions/68972448/why-is-wsl-extremely-slow-when-compared-with-native-windows-npm-yarn-processing). #### Tracking down errors You can set `RUST_BACKTRACE=1` to be provided with backtraces when a candle error is generated.
candle/README.md/0
{ "file_path": "candle/README.md", "repo_id": "candle", "token_count": 7636 }
16
# Pytorch cheatsheet {{#include ../../../README.md:cheatsheet}}
candle/candle-book/src/guide/cheatsheet.md/0
{ "file_path": "candle/candle-book/src/guide/cheatsheet.md", "repo_id": "candle", "token_count": 26 }
17
//! Implement conversion traits for tensors use crate::{DType, Device, Error, Tensor, WithDType}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::convert::TryFrom; impl<T: WithDType> TryFrom<&Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec1::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec2::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec3::<T>() } } impl<T: WithDType> TryFrom<Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<T>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<T>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<Vec<T>>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<&[T]> for Tensor { type Error = Error; fn try_from(v: &[T]) -> Result<Self, Self::Error> { Tensor::from_slice(v, v.len(), &Device::Cpu) } } impl<T: WithDType> TryFrom<Vec<T>> for Tensor { type Error = Error; fn try_from(v: Vec<T>) -> Result<Self, Self::Error> { let len = v.len(); Tensor::from_vec(v, len, &Device::Cpu) } } macro_rules! from_tensor { ($typ:ident) => { impl TryFrom<&Tensor> for $typ { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_scalar::<$typ>() } } impl TryFrom<Tensor> for $typ { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { $typ::try_from(&tensor) } } impl TryFrom<$typ> for Tensor { type Error = Error; fn try_from(v: $typ) -> Result<Self, Self::Error> { Tensor::new(v, &Device::Cpu) } } }; } from_tensor!(f64); from_tensor!(f32); from_tensor!(f16); from_tensor!(bf16); from_tensor!(i64); from_tensor!(u32); from_tensor!(u8); impl Tensor { pub fn write_bytes<W: std::io::Write>(&self, f: &mut W) -> crate::Result<()> { use byteorder::{LittleEndian, WriteBytesExt}; let vs = self.flatten_all()?; match self.dtype() { DType::BF16 => { let vs = vs.to_vec1::<bf16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F16 => { let vs = vs.to_vec1::<f16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F32 => { // TODO: Avoid using a buffer when data is already on the CPU. for v in vs.to_vec1::<f32>()? { f.write_f32::<LittleEndian>(v)? } } DType::F64 => { for v in vs.to_vec1::<f64>()? { f.write_f64::<LittleEndian>(v)? } } DType::U32 => { for v in vs.to_vec1::<u32>()? { f.write_u32::<LittleEndian>(v)? } } DType::I64 => { for v in vs.to_vec1::<i64>()? { f.write_i64::<LittleEndian>(v)? } } DType::U8 => { let vs = vs.to_vec1::<u8>()?; f.write_all(&vs)?; } } Ok(()) } }
candle/candle-core/src/convert.rs/0
{ "file_path": "candle/candle-core/src/convert.rs", "repo_id": "candle", "token_count": 2242 }
18
use crate::{Error, Tensor}; use std::ops::{ Bound, Range, RangeBounds, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }; impl Tensor { /// Intended to be use by the trait `.i()` /// /// ``` /// # use candle_core::{Tensor, DType, Device, IndexOp}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.i(0..1)?; /// assert_eq!(c.shape().dims(), &[1, 3]); /// /// let c = a.i(0)?; /// assert_eq!(c.shape().dims(), &[3]); /// /// let c = a.i((.., ..2) )?; /// assert_eq!(c.shape().dims(), &[2, 2]); /// /// let c = a.i((.., ..=2))?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// /// # Ok::<(), candle_core::Error>(()) /// ``` fn index(&self, indexers: &[TensorIndexer]) -> Result<Self, Error> { let mut x = self.clone(); let dims = self.shape().dims(); let mut current_dim = 0; for (i, indexer) in indexers.iter().enumerate() { x = match indexer { TensorIndexer::Select(n) => x.narrow(current_dim, *n, 1)?.squeeze(current_dim)?, TensorIndexer::Narrow(left_bound, right_bound) => { let start = match left_bound { Bound::Included(n) => *n, Bound::Excluded(n) => *n + 1, Bound::Unbounded => 0, }; let stop = match right_bound { Bound::Included(n) => *n + 1, Bound::Excluded(n) => *n, Bound::Unbounded => dims[i], }; let out = x.narrow(current_dim, start, stop.saturating_sub(start))?; current_dim += 1; out } TensorIndexer::IndexSelect(indexes) => { if indexes.rank() != 1 { crate::bail!("multi-dimensional tensor indexing is not supported") } let out = x.index_select(&indexes.to_device(x.device())?, current_dim)?; current_dim += 1; out } TensorIndexer::Err(e) => crate::bail!("indexing error {e:?}"), }; } Ok(x) } } #[derive(Debug)] /// Generic structure used to index a slice of the tensor pub enum TensorIndexer { /// This selects the elements for which an index has some specific value. Select(usize), /// This is a regular slice, purely indexing a chunk of the tensor Narrow(Bound<usize>, Bound<usize>), /// Indexing via a 1d tensor IndexSelect(Tensor), Err(Error), } impl From<usize> for TensorIndexer { fn from(index: usize) -> Self { TensorIndexer::Select(index) } } impl From<&[u32]> for TensorIndexer { fn from(index: &[u32]) -> Self { match Tensor::new(index, &crate::Device::Cpu) { Ok(tensor) => TensorIndexer::IndexSelect(tensor), Err(e) => TensorIndexer::Err(e), } } } impl From<Vec<u32>> for TensorIndexer { fn from(index: Vec<u32>) -> Self { let len = index.len(); match Tensor::from_vec(index, len, &crate::Device::Cpu) { Ok(tensor) => TensorIndexer::IndexSelect(tensor), Err(e) => TensorIndexer::Err(e), } } } impl From<&Tensor> for TensorIndexer { fn from(tensor: &Tensor) -> Self { TensorIndexer::IndexSelect(tensor.clone()) } } trait RB: RangeBounds<usize> {} impl RB for Range<usize> {} impl RB for RangeFrom<usize> {} impl RB for RangeFull {} impl RB for RangeInclusive<usize> {} impl RB for RangeTo<usize> {} impl RB for RangeToInclusive<usize> {} impl<T: RB> From<T> for TensorIndexer { fn from(range: T) -> Self { use std::ops::Bound::*; let start = match range.start_bound() { Included(idx) => Included(*idx), Excluded(idx) => Excluded(*idx), Unbounded => Unbounded, }; let end = match range.end_bound() { Included(idx) => Included(*idx), Excluded(idx) => Excluded(*idx), Unbounded => Unbounded, }; TensorIndexer::Narrow(start, end) } } /// Trait used to implement multiple signatures for ease of use of the slicing /// of a tensor pub trait IndexOp<T> { /// Returns a slicing iterator which are the chunks of data necessary to /// reconstruct the desired tensor. fn i(&self, index: T) -> Result<Tensor, Error>; } impl<T> IndexOp<T> for Tensor where T: Into<TensorIndexer>, { fn i(&self, index: T) -> Result<Tensor, Error> { self.index(&[index.into()]) } } macro_rules! index_op_tuple { ($($t:ident),+) => { #[allow(non_snake_case)] impl<$($t),*> IndexOp<($($t,)*)> for Tensor where $($t: Into<TensorIndexer>,)* { fn i(&self, ($($t,)*): ($($t,)*)) -> Result<Tensor, Error> { self.index(&[$($t.into(),)*]) } } }; } index_op_tuple!(A); index_op_tuple!(A, B); index_op_tuple!(A, B, C); index_op_tuple!(A, B, C, D); index_op_tuple!(A, B, C, D, E); index_op_tuple!(A, B, C, D, E, F); index_op_tuple!(A, B, C, D, E, F, G);
candle/candle-core/src/indexer.rs/0
{ "file_path": "candle/candle-core/src/indexer.rs", "repo_id": "candle", "token_count": 2652 }
19
use crate::{CpuStorage, Device, Result, Shape, Storage, Tensor}; use k_quants::*; use std::borrow::Cow; #[cfg(target_feature = "avx")] pub mod avx; mod dummy_cuda; mod dummy_metal; pub mod ggml_file; pub mod gguf_file; pub mod k_quants; #[cfg(feature = "metal")] pub mod metal; #[cfg(not(feature = "metal"))] mod metal { pub use super::dummy_metal::*; } #[cfg(feature = "cuda")] pub mod cuda; #[cfg(not(feature = "cuda"))] mod cuda { pub use super::dummy_cuda::*; } #[cfg(target_feature = "neon")] pub mod neon; #[cfg(target_feature = "simd128")] pub mod simd128; pub mod utils; use half::f16; pub use k_quants::GgmlType; pub struct QTensor { storage: QStorage, shape: Shape, } impl Device { fn qzeros(&self, elem_count: usize, dtype: GgmlDType) -> Result<QStorage> { match self { Device::Cpu => { let storage = dtype.cpu_zeros(elem_count); Ok(QStorage::Cpu(storage)) } Device::Metal(metal) => { let storage = metal::QMetalStorage::zeros(metal, elem_count, dtype)?; Ok(QStorage::Metal(storage)) } Device::Cuda(cuda) => { let storage = cuda::QCudaStorage::zeros(cuda, elem_count, dtype)?; Ok(QStorage::Cuda(storage)) } } } } pub enum QStorage { Cpu(Box<dyn QuantizedType>), Metal(metal::QMetalStorage), Cuda(cuda::QCudaStorage), } impl QStorage { fn block_size(&self) -> usize { match self { QStorage::Cpu(storage) => storage.block_size(), QStorage::Metal(storage) => storage.dtype().block_size(), QStorage::Cuda(storage) => storage.dtype().block_size(), } } fn dtype(&self) -> GgmlDType { match self { QStorage::Cpu(storage) => storage.dtype(), QStorage::Metal(storage) => storage.dtype(), QStorage::Cuda(storage) => storage.dtype(), } } fn device(&self) -> Device { match self { QStorage::Cpu(_storage) => Device::Cpu, QStorage::Metal(storage) => Device::Metal(storage.device().clone()), QStorage::Cuda(storage) => Device::Cuda(storage.device().clone()), } } fn size_in_bytes(&self) -> usize { match self { QStorage::Cpu(storage) => storage.storage_size_in_bytes(), QStorage::Metal(storage) => storage.storage_size_in_bytes(), QStorage::Cuda(storage) => storage.storage_size_in_bytes(), } } fn quantize(&mut self, src: &Storage) -> Result<()> { match (self, src) { (QStorage::Cpu(storage), Storage::Cpu(src)) => { storage.from_float(src.as_slice::<f32>()?)?; } (QStorage::Metal(storage), Storage::Metal(src)) => storage.quantize(src)?, (QStorage::Cuda(storage), Storage::Cuda(src)) => storage.quantize(src)?, _ => crate::bail!("Invalid dequantize storage locations do not match"), } Ok(()) } fn dequantize(&self, elem_count: usize) -> Result<Storage> { match self { QStorage::Cpu(storage) => Ok(Storage::Cpu(storage.dequantize(elem_count)?)), QStorage::Metal(storage) => Ok(Storage::Metal(storage.dequantize(elem_count)?)), QStorage::Cuda(storage) => Ok(Storage::Cuda(storage.dequantize(elem_count)?)), } } fn data(&self) -> Result<Cow<[u8]>> { match self { QStorage::Cpu(storage) => { let data_ptr = storage.as_ptr(); let size_in_bytes = storage.storage_size_in_bytes(); let data = unsafe { std::slice::from_raw_parts(data_ptr, size_in_bytes) }; Ok(Cow::from(data)) } QStorage::Metal(_) | QStorage::Cuda(_) => { crate::bail!("not implemented"); } } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum GgmlDType { F32, F16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, Q2K, Q3K, Q4K, Q5K, Q6K, Q8K, } impl GgmlDType { pub(crate) fn from_u32(u: u32) -> Result<Self> { let dtype = match u { 0 => Self::F32, 1 => Self::F16, 2 => Self::Q4_0, 3 => Self::Q4_1, 6 => Self::Q5_0, 7 => Self::Q5_1, 8 => Self::Q8_0, 9 => Self::Q8_1, 10 => Self::Q2K, 11 => Self::Q3K, 12 => Self::Q4K, 13 => Self::Q5K, 14 => Self::Q6K, 15 => Self::Q8K, _ => crate::bail!("unknown dtype for tensor {u}"), }; Ok(dtype) } pub(crate) fn to_u32(self) -> u32 { match self { Self::F32 => 0, Self::F16 => 1, Self::Q4_0 => 2, Self::Q4_1 => 3, Self::Q5_0 => 6, Self::Q5_1 => 7, Self::Q8_0 => 8, Self::Q8_1 => 9, Self::Q2K => 10, Self::Q3K => 11, Self::Q4K => 12, Self::Q5K => 13, Self::Q6K => 14, Self::Q8K => 15, } } /// The block dtype pub fn cpu_zeros(&self, elem_count: usize) -> Box<dyn QuantizedType> { match self { Self::F32 => Box::new(vec![f32::zeros(); elem_count]), Self::F16 => Box::new(vec![f16::zeros(); elem_count]), Self::Q4_0 => Box::new(vec![BlockQ4_0::zeros(); elem_count / BlockQ4_0::BLCK_SIZE]), Self::Q4_1 => Box::new(vec![BlockQ4_1::zeros(); elem_count / BlockQ4_1::BLCK_SIZE]), Self::Q5_0 => Box::new(vec![BlockQ5_0::zeros(); elem_count / BlockQ5_0::BLCK_SIZE]), Self::Q5_1 => Box::new(vec![BlockQ5_1::zeros(); elem_count / BlockQ5_1::BLCK_SIZE]), Self::Q8_0 => Box::new(vec![BlockQ8_0::zeros(); elem_count / BlockQ8_0::BLCK_SIZE]), Self::Q8_1 => Box::new(vec![BlockQ8_1::zeros(); elem_count / BlockQ8_1::BLCK_SIZE]), Self::Q2K => Box::new(vec![BlockQ2K::zeros(); elem_count / BlockQ2K::BLCK_SIZE]), Self::Q3K => Box::new(vec![BlockQ3K::zeros(); elem_count / BlockQ3K::BLCK_SIZE]), Self::Q4K => Box::new(vec![BlockQ4K::zeros(); elem_count / BlockQ4K::BLCK_SIZE]), Self::Q5K => Box::new(vec![BlockQ5K::zeros(); elem_count / BlockQ5K::BLCK_SIZE]), Self::Q6K => Box::new(vec![BlockQ6K::zeros(); elem_count / BlockQ6K::BLCK_SIZE]), Self::Q8K => Box::new(vec![BlockQ8K::zeros(); elem_count / BlockQ8K::BLCK_SIZE]), } } /// The type size for blocks in bytes. pub fn type_size(&self) -> usize { use k_quants::*; match self { Self::F32 => 4, Self::F16 => 2, Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(), Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(), Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(), Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(), // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932 Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(), Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(), Self::Q2K => std::mem::size_of::<BlockQ2K>(), Self::Q3K => std::mem::size_of::<BlockQ3K>(), Self::Q4K => std::mem::size_of::<BlockQ4K>(), Self::Q5K => std::mem::size_of::<BlockQ5K>(), Self::Q6K => std::mem::size_of::<BlockQ6K>(), Self::Q8K => std::mem::size_of::<BlockQ8K>(), } } /// The block size, i.e. the number of elements stored in each block. pub fn block_size(&self) -> usize { match self { Self::F32 => 1, Self::F16 => 1, Self::Q4_0 => k_quants::QK4_0, Self::Q4_1 => k_quants::QK4_1, Self::Q5_0 => k_quants::QK5_0, Self::Q5_1 => k_quants::QK5_1, Self::Q8_0 => k_quants::QK8_0, Self::Q8_1 => k_quants::QK8_1, Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => k_quants::QK_K, } } } // A version of GgmlType without `vec_dot` so that it can be dyn boxed. pub trait QuantizedType: Send + Sync { fn dtype(&self) -> GgmlDType; fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()>; fn dequantize(&self, elem_count: usize) -> Result<CpuStorage>; fn storage_size_in_bytes(&self) -> usize; fn as_ptr(&self) -> *const u8; fn block_size(&self) -> usize; #[allow(clippy::wrong_self_convention)] fn from_float(&mut self, xs: &[f32]) -> Result<()>; fn size(&self) -> usize; } impl<T: k_quants::GgmlType + Send + Sync> QuantizedType for Vec<T> { fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()> { k_quants::matmul(mkn, lhs, self.as_slice(), dst) } fn size(&self) -> usize { self.len() * core::mem::size_of::<T>() } fn from_float(&mut self, xs: &[f32]) -> Result<()> { T::from_float(xs, self) } fn dtype(&self) -> GgmlDType { T::DTYPE } fn block_size(&self) -> usize { T::BLCK_SIZE } fn dequantize(&self, elem_count: usize) -> Result<CpuStorage> { let mut ys = vec![0.0f32; elem_count]; T::to_float(self.as_slice(), &mut ys)?; Ok(CpuStorage::F32(ys)) } fn storage_size_in_bytes(&self) -> usize { self.len() * std::mem::size_of::<T>() } fn as_ptr(&self) -> *const u8 { self.as_ptr() as *const u8 } } impl std::fmt::Debug for QTensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "QTensor[{:?}; {:?}]", self.shape, self.dtype()) } } fn check_shape(shape: &Shape, block_size: usize) -> Result<()> { let dims = shape.dims(); if dims.is_empty() { crate::bail!("scalar tensor cannot be quantized {shape:?}") } if dims[dims.len() - 1] % block_size != 0 { crate::bail!( "quantized tensor must have their last dim divisible by block size {shape:?} {}", block_size ) } Ok(()) } impl QTensor { pub fn new<S: Into<Shape>>(storage: QStorage, shape: S) -> Result<Self> { let shape = shape.into(); check_shape(&shape, storage.block_size())?; Ok(Self { storage, shape }) } pub fn quantize(src: &Tensor, dtype: GgmlDType) -> Result<Self> { let shape = src.shape(); let block_size = dtype.block_size(); check_shape(shape, block_size)?; let src = src.to_dtype(crate::DType::F32)?.flatten_all()?; let elem_count = shape.elem_count(); if elem_count % block_size != 0 { crate::bail!( "tensor size ({shape:?}) is not divisible by block size {}", block_size ) } let mut storage = src.device().qzeros(elem_count, dtype)?; storage.quantize(&src.storage())?; Ok(Self { storage, shape: shape.clone(), }) } pub fn dtype(&self) -> GgmlDType { self.storage.dtype() } pub fn device(&self) -> Device { self.storage.device() } pub fn rank(&self) -> usize { self.shape.rank() } pub fn shape(&self) -> &Shape { &self.shape } pub fn dequantize(&self, device: &Device) -> Result<Tensor> { let storage = self.storage.dequantize(self.shape.elem_count())?; let none = crate::op::BackpropOp::none(); let is_variable = false; crate::tensor::from_storage(storage, self.shape.clone(), none, is_variable) .to_device(device) } pub fn storage_size_in_bytes(&self) -> usize { self.storage.size_in_bytes() } pub fn data(&self) -> Result<Cow<'_, [u8]>> { self.storage.data() } } #[derive(Clone, Debug)] pub enum QMatMul { QTensor(std::sync::Arc<QTensor>), Tensor(Tensor), } thread_local! { static DEQUANTIZE_ALL: bool = { match std::env::var("CANDLE_DEQUANTIZE_ALL") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } impl QMatMul { pub fn from_arc(qtensor: std::sync::Arc<QTensor>) -> Result<Self> { let dequantize = match qtensor.dtype() { GgmlDType::F32 | GgmlDType::F16 => true, _ => DEQUANTIZE_ALL.with(|b| *b), }; let t = if dequantize { let tensor = qtensor.dequantize(&qtensor.device())?; Self::Tensor(tensor) } else { Self::QTensor(qtensor) }; Ok(t) } pub fn from_qtensor(qtensor: QTensor) -> Result<Self> { Self::from_arc(std::sync::Arc::new(qtensor)) } } impl crate::CustomOp1 for QTensor { fn name(&self) -> &'static str { "qmatmul" } fn cpu_fwd( &self, storage: &crate::CpuStorage, layout: &crate::Layout, ) -> Result<(crate::CpuStorage, Shape)> { if !layout.is_contiguous() { crate::bail!("input tensor is not contiguous {layout:?}") } let src_shape = layout.shape(); // self is transposed so n is first then k. let (n, k) = self.shape.dims2()?; if src_shape.rank() < 2 { crate::bail!("input tensor has only one dimension {layout:?}") } let mut dst_shape = src_shape.dims().to_vec(); let last_k = dst_shape.pop().unwrap(); if last_k != k { crate::bail!("input tensor {layout:?} incompatible with {:?}", self.shape) } dst_shape.push(n); let dst_shape = Shape::from(dst_shape); #[allow(clippy::infallible_destructuring_match)] let self_storage = match &self.storage { QStorage::Cpu(storage) => storage, QStorage::Metal(_) | QStorage::Cuda(_) => crate::bail!("Invalid storage"), }; let slice = storage.as_slice::<f32>()?; let slice = &slice[layout.start_offset()..layout.start_offset() + src_shape.elem_count()]; let mut dst_storage = vec![0f32; dst_shape.elem_count()]; self_storage.matmul_t((dst_shape.elem_count() / n, k, n), slice, &mut dst_storage)?; Ok((crate::CpuStorage::F32(dst_storage), dst_shape)) } fn metal_fwd( &self, storage: &crate::MetalStorage, layout: &crate::Layout, ) -> Result<(crate::MetalStorage, Shape)> { let self_storage = match &self.storage { QStorage::Metal(metal) => metal, _ => unreachable!("Cannot call metal matmul on non metal QTensor"), }; self_storage.fwd(&self.shape, storage, layout) } fn cuda_fwd( &self, storage: &crate::CudaStorage, layout: &crate::Layout, ) -> Result<(crate::CudaStorage, Shape)> { let self_storage = match &self.storage { QStorage::Cuda(cuda) => cuda, _ => unreachable!("Cannot call cuda matmul on non cuda QTensor"), }; self_storage.fwd(&self.shape, storage, layout) } } impl crate::Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::QTensor(t) => xs.apply_op1_no_bwd(t.as_ref()), Self::Tensor(w) => { let w = match *xs.dims() { [b1, b2, _, _] => w.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => w.broadcast_left(bsize)?.t()?, _ => w.t()?, }; xs.matmul(&w) } } } }
candle/candle-core/src/quantized/mod.rs/0
{ "file_path": "candle/candle-core/src/quantized/mod.rs", "repo_id": "candle", "token_count": 8178 }
20
use anyhow::Result; use candle_core::{DType, Device::Cpu, Tensor}; #[test] fn display_scalar() -> Result<()> { let t = Tensor::new(1234u32, &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[1234]\nTensor[[], u32]"); let t = t.to_dtype(DType::F32)?.neg()?; let s = format!("{}", (&t / 10.0)?); assert_eq!(&s, "[-123.4000]\nTensor[[], f32]"); let s = format!("{}", (&t / 1e8)?); assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]"); let s = format!("{}", (&t * 1e8)?); assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]"); let s = format!("{}", (&t * 0.)?); assert_eq!(&s, "[0.]\nTensor[[], f32]"); Ok(()) } #[test] fn display_vector() -> Result<()> { let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[]\nTensor[[0], u32]"); let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?; let s = format!("{t}"); assert_eq!( &s, "[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]" ); let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.] Tensor[[50], f32]"#; assert_eq!(&s, expected); let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?; let s = format!("{t}"); assert_eq!( &s, "[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]" ); Ok(()) } #[test] fn display_multi_dim() -> Result<()> { let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]] Tensor[[200, 100], f32]"#; assert_eq!(&s, expected); let t = t.reshape(&[2, 1, 1, 100, 100])?; let t = format!("\n{t}"); let expected = r#" [[[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]], [[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]]] Tensor[[2, 1, 1, 100, 100], f32]"#; assert_eq!(&t, expected); Ok(()) }
candle/candle-core/tests/display_tests.rs/0
{ "file_path": "candle/candle-core/tests/display_tests.rs", "repo_id": "candle", "token_count": 1395 }
21
[package] name = "candle-datasets" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] byteorder = { workspace = true } candle = { workspace = true } candle-nn = { workspace = true } hf-hub = { workspace = true} intel-mkl-src = { workspace = true, optional = true } memmap2 = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } rand = { workspace = true } thiserror = { workspace = true } parquet = { workspace = true} image = { workspace = true }
candle/candle-datasets/Cargo.toml/0
{ "file_path": "candle/candle-datasets/Cargo.toml", "repo_id": "candle", "token_count": 201 }
22
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::bigcode::{Config, GPTBigCode}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: GPTBigCode, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, } impl TextGeneration { fn new( model: GPTBigCode, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); print!("{prompt}"); std::io::stdout().flush()?; let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let (context_size, past_len) = if self.model.config().use_cache && index > 0 { (1, tokens.len().saturating_sub(1)) } else { (tokens.len(), 0) }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, past_len)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({:.3} token/s)", sample_len as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "bigcode/starcoderbase-1b")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] weight_file: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => ["model.safetensors"] .iter() .map(|f| repo.get(f)) .collect::<std::result::Result<Vec<_>, _>>()?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let config = Config::starcoder_1b(); let model = GPTBigCode::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/bigcode/main.rs/0
{ "file_path": "candle/candle-examples/examples/bigcode/main.rs", "repo_id": "candle", "token_count": 2134 }
23
# candle-efficientvit [EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention](https://arxiv.org/abs/2305.07027). This candle implementation uses a pre-trained EfficientViT (from Microsoft Research Asia) network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example efficientvit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which m1 loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 69.80% unicycle, monocycle : 13.03% bicycle-built-for-two, tandem bicycle, tandem: 9.28% crash helmet : 2.25% alp : 0.46% ```
candle/candle-examples/examples/efficientvit/README.md/0
{ "file_path": "candle/candle-examples/examples/efficientvit/README.md", "repo_id": "candle", "token_count": 273 }
24
use candle::backend::BackendStorage; use candle::{CpuStorage, CustomOp1, DType, Device, IndexOp, Layout, Result, Shape, Tensor, D}; use candle_nn::{Embedding, Linear, Module, RmsNorm}; use cudarc::nccl::safe::{Comm, ReduceOp}; use half::f16; use serde::Deserialize; use std::rc::Rc; use std::sync::{Arc, Mutex}; use super::MAX_SEQ_LEN; use candle_nn::var_builder::ShardedVarBuilder as VarBuilder; struct TensorParallelColumnLinear { linear: Linear, } impl TensorParallelColumnLinear { fn new(linear: Linear) -> Self { Self { linear } } fn forward(&self, x: &Tensor) -> Result<Tensor> { self.linear.forward(x) } } struct TensorParallelRowLinear { linear: Linear, comm: Rc<Comm>, } struct AllReduce { comm: Rc<Comm>, } /// This is actually not safe: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/threadsafety.html /// But for this example purposes, this will work unsafe impl Sync for AllReduce {} /// This is actually not safe: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/threadsafety.html /// But for this example purposes, this will work unsafe impl Send for AllReduce {} impl CustomOp1 for AllReduce { fn name(&self) -> &'static str { "allreduce" } fn cpu_fwd(&self, _s: &CpuStorage, _l: &Layout) -> Result<(CpuStorage, Shape)> { todo!("implement allreduce for cpu is not necessary for single node"); } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s: &candle::CudaStorage, l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::WrapErr; let elem_count = l.shape().elem_count(); let dev = s.device().clone(); let s = s.as_cuda_slice::<f16>()?; // let s = match l.contiguous_offsets() { // None => Err(Error::Wrapped("input has to be contiguous".into()))?, // Some((o1, o2)) => s.slice(o1..o2), // }; let mut dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?; self.comm.all_reduce(s, &mut dst, &ReduceOp::Sum).unwrap(); let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev); Ok((dst, l.shape().clone())) } } fn all_reduce_sum(x: &Tensor, comm: &Rc<Comm>) -> Result<Tensor> { x.apply_op1(AllReduce { comm: comm.clone() }) } impl TensorParallelRowLinear { fn new(linear: Linear, comm: Rc<Comm>) -> Self { Self { linear, comm } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = self.linear.forward(x)?; all_reduce_sum(&x, &self.comm) } } fn shard(dim: usize, rank: usize, world_size: usize) -> candle_nn::var_builder::Shard { candle_nn::var_builder::Shard { dim, rank, world_size, } } impl TensorParallelColumnLinear { fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weight = vb.get_with_hints((), "weight", shard(0, rank, size))?; Ok(Self::new(Linear::new(weight, None))) } fn load_multi(vb: VarBuilder, prefixes: &[&str], comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weights: Vec<_> = prefixes .iter() .map(|p| vb.pp(p).get_with_hints((), "weight", shard(0, rank, size))) .collect::<Result<Vec<_>>>()?; let weight = Tensor::cat(&weights, 0)?; Ok(Self::new(Linear::new(weight, None))) } } impl TensorParallelRowLinear { fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weight = vb.get_with_hints((), "weight", shard(1, rank, size))?; Ok(Self::new(Linear::new(weight, None), comm)) } } #[derive(Deserialize)] pub struct Config { pub hidden_size: usize, pub intermediate_size: usize, pub vocab_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, #[serde(default = "default_rope")] pub rope_theta: f32, } fn default_rope() -> f32 { 10_000.0 } #[derive(Clone)] pub struct Cache { #[allow(clippy::type_complexity)] kvs: Arc<Mutex<Vec<Option<(Tensor, Tensor)>>>>, cos: Tensor, sin: Tensor, } impl Cache { pub fn new(dtype: DType, config: &Config, device: &Device) -> Result<Self> { // precompute freqs_cis let n_elem = config.hidden_size / config.num_attention_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / config.rope_theta.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; // This is different from the paper, see: // https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112 let idx_theta = Tensor::cat(&[&idx_theta, &idx_theta], D::Minus1)?; let cos = idx_theta.cos()?.to_dtype(dtype)?; let sin = idx_theta.sin()?.to_dtype(dtype)?; Ok(Self { kvs: Arc::new(Mutex::new(vec![None; config.num_hidden_layers])), cos, sin, }) } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } fn linear(size1: usize, size2: usize, vb: VarBuilder) -> Result<Linear> { let weight = vb.get((size2, size1), "weight")?; Ok(Linear::new(weight, None)) } fn embedding(cfg: &Config, vb: VarBuilder) -> Result<Embedding> { let embeddings = vb.get((cfg.vocab_size, cfg.hidden_size), "weight")?; Ok(Embedding::new(embeddings, cfg.hidden_size)) } struct CausalSelfAttention { qkv_proj: TensorParallelColumnLinear, o_proj: TensorParallelRowLinear, num_attention_heads: usize, num_key_value_heads: usize, head_dim: usize, cache: Cache, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (b_sz, _, seq_len, hidden_size) = x.shape().dims4()?; let cos = self.cache.cos.narrow(0, index_pos, seq_len)?; let sin = self.cache.sin.narrow(0, index_pos, seq_len)?; let cos = cos.broadcast_as((b_sz, 1, seq_len, hidden_size))?; let sin = sin.broadcast_as((b_sz, 1, seq_len, hidden_size))?; let x1 = x.narrow(D::Minus1, 0, hidden_size / 2)?; let x2 = x.narrow(D::Minus1, hidden_size / 2, hidden_size / 2)?; let rotate_x = Tensor::cat(&[&x2.neg()?, &x1], D::Minus1)?; let rope = (x.broadcast_mul(&cos)? + rotate_x.broadcast_mul(&sin)?)?; Ok(rope) } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let (b_sz, seq_len, _) = x.shape().dims3()?; let qkv = self.qkv_proj.forward(x)?; let hidden_size = self.num_attention_heads * self.head_dim; let q = qkv.i((.., .., ..self.num_attention_heads * self.head_dim))?; let k = qkv.i(( .., .., self.num_attention_heads * self.head_dim ..self.num_attention_heads * self.head_dim + self.num_key_value_heads * self.head_dim, ))?; let v = qkv.i(( .., .., self.num_attention_heads * self.head_dim + self.num_key_value_heads * self.head_dim.., ))?; // todo!("Q {:?} K {:?} V {:?} - x {:?}", q.shape(), k.shape(), v.shape(), x.shape()); let q = q .reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)?; let q = self.apply_rotary_emb(&q, index_pos)?; let mut k = self.apply_rotary_emb(&k, index_pos)?; let mut cache = self.cache.kvs.lock().unwrap(); if let Some((cache_k, cache_v)) = &cache[block_idx] { k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?; let k_seq_len = k.dims()[1]; if k_seq_len > MAX_SEQ_LEN { k = k .narrow(D::Minus1, k_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } let v_seq_len = v.dims()[1]; if v_seq_len > 2 * MAX_SEQ_LEN { v = v .narrow(D::Minus1, v_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } } cache[block_idx] = Some((k.clone(), v.clone())); let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); let y = candle_flash_attn::flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)? .transpose(1, 2)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.num_attention_heads / self.num_key_value_heads; if n_rep == 1 { Ok(x) } else { let (b_sz, n_kv_head, seq_len, head_dim) = x.shape().dims4()?; let x = x .unsqueeze(2)? .expand((b_sz, n_kv_head, n_rep, seq_len, head_dim))? .reshape((b_sz, n_kv_head, n_rep, seq_len, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let qkv_proj = TensorParallelColumnLinear::load_multi( vb.clone(), &["q_proj", "k_proj", "v_proj"], comm.clone(), )?; let o_proj = TensorParallelRowLinear::load(vb.pp("o_proj"), comm.clone())?; Ok(Self { qkv_proj, o_proj, num_attention_heads: cfg.num_attention_heads / comm.world_size(), num_key_value_heads: cfg.num_key_value_heads / comm.world_size(), head_dim: cfg.hidden_size / cfg.num_attention_heads, cache: cache.clone(), }) } } struct Mlp { c_fc1: TensorParallelColumnLinear, c_fc2: TensorParallelColumnLinear, c_proj: TensorParallelRowLinear, } impl Mlp { fn new( c_fc1: TensorParallelColumnLinear, c_fc2: TensorParallelColumnLinear, c_proj: TensorParallelRowLinear, ) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, _cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let c_fc1 = TensorParallelColumnLinear::load(vb.pp("gate_proj"), comm.clone())?; let c_fc2 = TensorParallelColumnLinear::load(vb.pp("up_proj"), comm.clone())?; let c_proj = TensorParallelRowLinear::load(vb.pp("down_proj"), comm)?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> { let weight = vb.get_with_hints(size, "weight", shard(0, 0, 1))?; Ok(RmsNorm::new(weight, eps)) } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg, comm.clone())?; let mlp = Mlp::load(vb.pp("mlp"), cfg, comm)?; let input_layernorm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, } impl Llama { fn new(wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear) -> Self { Self { wte, blocks, ln_f, lm_head, } } pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.shape().dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx)?; } let x = self.ln_f.forward(&x)?; let x = x.i((.., seq_len - 1, ..))?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let wte = embedding(cfg, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; let norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.num_hidden_layers) .map(|i| { Block::load( vb.pp(&format!("model.layers.{i}")), cache, cfg, comm.clone(), ) .unwrap() }) .collect(); Ok(Self::new(wte, blocks, norm, lm_head)) } }
candle/candle-examples/examples/llama_multiprocess/model.rs/0
{ "file_path": "candle/candle-examples/examples/llama_multiprocess/model.rs", "repo_id": "candle", "token_count": 7542 }
25
# candle-mobileone [MobileOne: An Improved One millisecond Mobile Backbone](https://arxiv.org/abs/2206.04040). This candle implementation uses a pre-trained MobileOne network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example mobileone --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which s2 loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 79.33% bicycle-built-for-two, tandem bicycle, tandem: 15.32% crash helmet : 2.58% unicycle, monocycle : 1.70% alp : 0.21% ```
candle/candle-examples/examples/mobileone/README.md/0
{ "file_path": "candle/candle-examples/examples/mobileone/README.md", "repo_id": "candle", "token_count": 254 }
26
import gymnasium as gym import numpy as np from collections import deque from PIL import Image from multiprocessing import Process, Pipe # atari_wrappers.py class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset() if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(0) if done: obs = self.env.reset() return obs class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self): self.env.reset() obs, _, done, _ = self.env.step(1) if done: self.env.reset() obs, _, done, _ = self.env.step(2) if done: self.env.reset() return obs class ImageSaver(gym.Wrapper): def __init__(self, env, img_path, rank): gym.Wrapper.__init__(self, env) self._cnt = 0 self._img_path = img_path self._rank = rank def step(self, action): step_result = self.env.step(action) obs, _, _, _ = step_result img = Image.fromarray(obs, 'RGB') img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt)) self._cnt += 1 return step_result class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so its important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset() else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = deque(maxlen=2) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for _ in range(self._skip): obs, reward, done, info = self.env.step(action) self._obs_buffer.append(obs) total_reward += reward if done: break max_frame = np.max(np.stack(self._obs_buffer), axis=0) return max_frame, total_reward, done, info def reset(self): """Clear past frame buffer and init. to first obs. from inner env.""" self._obs_buffer.clear() obs = self.env.reset() self._obs_buffer.append(obs) return obs class ClipRewardEnv(gym.RewardWrapper): def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.res = 84 self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8') def observation(self, obs): frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32')) frame = np.array(Image.fromarray(frame).resize((self.res, self.res), resample=Image.BILINEAR), dtype=np.uint8) return frame.reshape((self.res, self.res, 1)) class FrameStack(gym.Wrapper): def __init__(self, env, k): """Buffer observations and stack across channels (last axis).""" gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape assert shp[2] == 1 # can only stack 1-channel frames self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8') def reset(self): """Clear buffer and re-fill by duplicating the first observation.""" ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self.observation() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self.observation(), reward, done, info def observation(self): assert len(self.frames) == self.k return np.concatenate(self.frames, axis=2) def wrap_deepmind(env, episode_life=True, clip_rewards=True): """Configure environment for DeepMind-style Atari. Note: this does not include frame stacking!""" assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip if episode_life: env = EpisodicLifeEnv(env) env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) return env # envs.py def make_env(env_id, img_dir, seed, rank): def _thunk(): env = gym.make(env_id) env.reset(seed=(seed + rank)) if img_dir is not None: env = ImageSaver(env, img_dir, rank) env = wrap_deepmind(env) env = WrapPyTorch(env) return env return _thunk class WrapPyTorch(gym.ObservationWrapper): def __init__(self, env=None): super(WrapPyTorch, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32') def observation(self, observation): return observation.transpose(2, 0, 1) # vecenv.py class VecEnv(object): """ Vectorized environment base class """ def step(self, vac): """ Apply sequence of actions to sequence of environments actions -> (observations, rewards, news) where 'news' is a boolean vector indicating whether each element is new. """ raise NotImplementedError def reset(self): """ Reset all environments """ raise NotImplementedError def close(self): pass # subproc_vec_env.py def worker(remote, env_fn_wrapper): env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns): """ envs: list of gym environments to run in subprocesses """ nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn))) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() def step(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() @property def num_envs(self): return len(self.remotes) # Create the environment. def make(env_name, img_dir, num_processes): envs = SubprocVecEnv([ make_env(env_name, img_dir, 1337, i) for i in range(num_processes) ]) return envs
candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py", "repo_id": "candle", "token_count": 4740 }
27
# candle-segformer - [HuggingFace Segformer Model Card][segformer] - [`mit-b0` - An encoder only pretrained model][encoder] - [`segformer-b0-finetuned-ade-512-512` - A fine tuned model for segmentation][ade512] ## How to run the example If you want you can use the example images from this [pull request][pr], download them and supply the path to the image as an argument to the example. ```bash # run the image classification task cargo run --example segformer classify <path-to-image> # run the segmentation task cargo run --example segformer segment <path-to-image> ``` Example output for classification: ```text classification logits [3.275261e-5, 0.0008562019, 0.0008868563, 0.9977506, 0.0002465068, 0.0002241473, 2.846596e-6] label: hamburger ``` [pr]: https://github.com/huggingface/candle/pull/1617 [segformer]: https://huggingface.co/docs/transformers/model_doc/segformer [encoder]: https://huggingface.co/nvidia/mit-b0 [ade512]: https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512
candle/candle-examples/examples/segformer/README.md/0
{ "file_path": "candle/candle-examples/examples/segformer/README.md", "repo_id": "candle", "token_count": 357 }
28
use candle::{IndexOp, Result, Tensor, D}; use tokenizers::Tokenizer; const LANGUAGES: [(&str, &str); 99] = [ ("en", "english"), ("zh", "chinese"), ("de", "german"), ("es", "spanish"), ("ru", "russian"), ("ko", "korean"), ("fr", "french"), ("ja", "japanese"), ("pt", "portuguese"), ("tr", "turkish"), ("pl", "polish"), ("ca", "catalan"), ("nl", "dutch"), ("ar", "arabic"), ("sv", "swedish"), ("it", "italian"), ("id", "indonesian"), ("hi", "hindi"), ("fi", "finnish"), ("vi", "vietnamese"), ("he", "hebrew"), ("uk", "ukrainian"), ("el", "greek"), ("ms", "malay"), ("cs", "czech"), ("ro", "romanian"), ("da", "danish"), ("hu", "hungarian"), ("ta", "tamil"), ("no", "norwegian"), ("th", "thai"), ("ur", "urdu"), ("hr", "croatian"), ("bg", "bulgarian"), ("lt", "lithuanian"), ("la", "latin"), ("mi", "maori"), ("ml", "malayalam"), ("cy", "welsh"), ("sk", "slovak"), ("te", "telugu"), ("fa", "persian"), ("lv", "latvian"), ("bn", "bengali"), ("sr", "serbian"), ("az", "azerbaijani"), ("sl", "slovenian"), ("kn", "kannada"), ("et", "estonian"), ("mk", "macedonian"), ("br", "breton"), ("eu", "basque"), ("is", "icelandic"), ("hy", "armenian"), ("ne", "nepali"), ("mn", "mongolian"), ("bs", "bosnian"), ("kk", "kazakh"), ("sq", "albanian"), ("sw", "swahili"), ("gl", "galician"), ("mr", "marathi"), ("pa", "punjabi"), ("si", "sinhala"), ("km", "khmer"), ("sn", "shona"), ("yo", "yoruba"), ("so", "somali"), ("af", "afrikaans"), ("oc", "occitan"), ("ka", "georgian"), ("be", "belarusian"), ("tg", "tajik"), ("sd", "sindhi"), ("gu", "gujarati"), ("am", "amharic"), ("yi", "yiddish"), ("lo", "lao"), ("uz", "uzbek"), ("fo", "faroese"), ("ht", "haitian creole"), ("ps", "pashto"), ("tk", "turkmen"), ("nn", "nynorsk"), ("mt", "maltese"), ("sa", "sanskrit"), ("lb", "luxembourgish"), ("my", "myanmar"), ("bo", "tibetan"), ("tl", "tagalog"), ("mg", "malagasy"), ("as", "assamese"), ("tt", "tatar"), ("haw", "hawaiian"), ("ln", "lingala"), ("ha", "hausa"), ("ba", "bashkir"), ("jw", "javanese"), ("su", "sundanese"), ]; /// Returns the token id for the selected language. pub fn detect_language( model: &mut super::Model, tokenizer: &Tokenizer, mel: &Tensor, ) -> Result<u32> { let (_bsize, _, seq_len) = mel.dims3()?; let mel = mel.narrow( 2, 0, usize::min(seq_len, model.config().max_source_positions), )?; let device = mel.device(); let language_token_ids = LANGUAGES .iter() .map(|(t, _)| crate::token_id(tokenizer, &format!("<|{t}|>"))) .collect::<Result<Vec<_>>>()?; let sot_token = crate::token_id(tokenizer, crate::m::SOT_TOKEN)?; let audio_features = model.encoder_forward(&mel, true)?; let tokens = Tensor::new(&[[sot_token]], device)?; let language_token_ids = Tensor::new(language_token_ids.as_slice(), device)?; let ys = model.decoder_forward(&tokens, &audio_features, true)?; let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; let logits = logits.index_select(&language_token_ids, 0)?; let probs = candle_nn::ops::softmax(&logits, D::Minus1)?; let probs = probs.to_vec1::<f32>()?; let mut probs = LANGUAGES.iter().zip(probs.iter()).collect::<Vec<_>>(); probs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for ((_, language), p) in probs.iter().take(5) { println!("{language}: {p}") } let language = crate::token_id(tokenizer, &format!("<|{}|>", probs[0].0 .0))?; Ok(language) }
candle/candle-examples/examples/whisper/multilingual.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/multilingual.rs", "repo_id": "candle", "token_count": 1846 }
29
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include "flash_fwd_launch_template.h" template<> void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim128<cutlass::half_t>(params, stream); }
candle/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu", "repo_id": "candle", "token_count": 135 }
30
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include "static_switch.h" #include "flash.h" #include "flash_fwd_kernel.h" template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Return_softmax> __global__ void flash_fwd_kernel(Flash_fwd_params params) { static_assert(!(Is_causal && Is_local)); // If Is_local is true, Is_causal should be false flash::compute_attn<Kernel_traits, Is_dropout, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Return_softmax>(params); } template<typename Kernel_traits, bool Is_dropout, bool Is_causal> void run_flash_fwd(Flash_fwd_params &params, cudaStream_t stream) { constexpr size_t smem_size = Kernel_traits::kSmemSize; // printf("smem_size = %d\n", smem_size); // Work-around for gcc 7. It doesn't like nested BOOL_SWITCH. // https://github.com/kokkos/kokkos-kernels/issues/349 // https://github.com/HazyResearch/flash-attention/issues/21 const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM; dim3 grid(num_m_block, params.b, params.h); const bool is_even_MN = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0 && params.seqlen_q % Kernel_traits::kBlockM == 0; const bool is_even_K = params.d == Kernel_traits::kHeadDim; const bool return_softmax = params.p_ptr != nullptr; BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] { BOOL_SWITCH(is_even_K, IsEvenKConst, [&] { BOOL_SWITCH((params.window_size_left >= 0 || params.window_size_right >= 0) && !Is_causal, Is_local, [&] { BOOL_SWITCH(return_softmax, ReturnSoftmaxConst, [&] { BOOL_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] { // Will only return softmax if dropout, to reduce compilation time. // If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates. // If return_softmax, set IsEvenMNConst to false to reduce number of templates // If head dim > 128, set IsEvenMNConst to false to reduce number of templates // If Is_local, set Is_causal to false auto kernel = &flash_fwd_kernel<Kernel_traits, Is_dropout, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && IsEvenKConst && !Is_local && !ReturnSoftmaxConst && Kernel_traits::kHeadDim <= 128, IsEvenKConst, ReturnSoftmaxConst && Is_dropout>; // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, false, true, true, false>; // printf("IsEvenMNConst = %d, IsEvenKConst = %d, Is_local = %d, Is_causal = %d, ReturnSoftmaxConst = %d, Is_dropout = %d\n", int(IsEvenMNConst), int(IsEvenKConst), int(Is_local), int(Is_causal), int(ReturnSoftmaxConst), int(Is_dropout)); // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, true, true, false>; // int ctas_per_sm; // cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( // &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params); }); }); }); }); }); } template<typename T> void run_mha_fwd_hdim32(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 32; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim64(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 64; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { // Using 8 warps is 18% slower for seqlen=2k, 2 warps is 5% slower // Using block size (64 x 256) is 27% slower for seqlen=2k // Using block size (256 x 64) is 85% slower for seqlen=2k, because of register spilling run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } }); }); } template<typename T> void run_mha_fwd_hdim96(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 96; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // These two are always slower // run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 128, 4, true, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<96, 64, 128, 4, true, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim128(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 128; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 32 (48 KB smem) is the fastest for non-causal since we get 2 CTAs per SM. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // 1st ones are good for H100, A100 // 2nd one is good for A6000 bc we get slightly better occupancy } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } }); }); } template<typename T> void run_mha_fwd_hdim160(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 160; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, H100, 128 x 32 is the fastest. // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 64 with 8 warps is the fastest for non-causal. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim192(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 192; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim224(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 224; int device; cudaGetDevice(&device); int max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); // printf("max_smem_per_block = %d\n", max_smem_per_block); BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64)) { // 112 KB run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // We can't do 128 x 32 with 8 warps because with headdim 224, kBlockKSmem = 32. // If we have N = 32, there are only 1024 elements to load at once, where each load // is 8 elements. This means we can only use 128 threads and not 256 threads. // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim256(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 256; int device; cudaGetDevice(&device); int max_smem_per_sm, max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_sm, cudaDevAttrMaxSharedMemoryPerMultiprocessor, device); status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); // printf("max_smem_per_sm = %d, max_smem_per_block = %d\n", max_smem_per_sm, max_smem_per_block); BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, we want to run with 128 x 64 (128KB smem). // For H100 we want to run with 64 x 64 (96KB smem) since then we can get 2 CTAs per SM. if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64) && max_smem_per_sm < 4 * Headdim * (64 + 2 * 64)) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // 64 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // 96 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); }
candle/candle-flash-attn/kernels/flash_fwd_launch_template.h/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_fwd_launch_template.h", "repo_id": "candle", "token_count": 7583 }
31
#include "cuda_utils.cuh" #include<stdint.h> template <typename S, typename T> __device__ void cast_( const size_t numel, const size_t num_dims, const size_t *info, const S *inp, T *out ) { const size_t *dims = info; const size_t *strides = info + num_dims; if (info == nullptr || is_contiguous(num_dims, dims, strides)) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { out[i] = inp[i]; } } else { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { unsigned strided_i = get_strided_index(i, num_dims, dims, strides); out[i] = inp[strided_i]; } } } template <typename S, typename T, typename I> __device__ void cast_through( const size_t numel, const size_t num_dims, const size_t *info, const S *inp, T *out ) { const size_t *dims = info; const size_t *strides = info + num_dims; if (info == nullptr || is_contiguous(num_dims, dims, strides)) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { out[i] = static_cast<T>(static_cast<I>(inp[i])); } } else { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { unsigned strided_i = get_strided_index(i, num_dims, dims, strides); out[i] = static_cast<T>(static_cast<I>(inp[strided_i])); } } } #define CAST_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const SRC_TYPENAME *inp, \ DST_TYPENAME *out \ ) { \ cast_<SRC_TYPENAME, DST_TYPENAME>(numel, num_dims, info, inp, out); \ } \ #define CAST_THROUGH_OP(SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const SRC_TYPENAME *inp, \ DST_TYPENAME *out \ ) { \ cast_through<SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME>(numel, num_dims, info, inp, out); \ } \ #if __CUDA_ARCH__ >= 800 CAST_OP(__nv_bfloat16, __nv_bfloat16, cast_bf16_bf16) CAST_OP(__nv_bfloat16, uint32_t, cast_bf16_u32) CAST_OP(__nv_bfloat16, float, cast_bf16_f32) CAST_OP(__nv_bfloat16, double, cast_bf16_f64) CAST_OP(uint8_t, __nv_bfloat16, cast_u8_bf16) CAST_OP(uint32_t, __nv_bfloat16, cast_u32_bf16) CAST_OP(float, __nv_bfloat16, cast_f32_bf16) CAST_OP(double, __nv_bfloat16, cast_f64_bf16) CAST_THROUGH_OP(__nv_bfloat16, uint8_t, float, cast_bf16_u8) CAST_THROUGH_OP(__nv_bfloat16, __half, float, cast_bf16_f16) CAST_THROUGH_OP(__half, __nv_bfloat16, float, cast_f16_bf16) #endif #if __CUDA_ARCH__ >= 530 CAST_OP(__half, __half, cast_f16_f16) CAST_THROUGH_OP(__half, uint8_t, float, cast_f16_u8) CAST_OP(__half, uint32_t, cast_f16_u32) CAST_OP(__half, float, cast_f16_f32) CAST_OP(__half, double, cast_f16_f64) CAST_OP(uint8_t, __half, cast_u8_f16 ) CAST_OP(uint32_t, __half, cast_u32_f16) CAST_OP(float, __half, cast_f32_f16) CAST_OP(double, __half, cast_f64_f16) #endif CAST_OP(uint32_t, uint32_t, cast_u32_u32) CAST_OP(uint32_t, uint8_t, cast_u32_u8 ) CAST_OP(uint32_t, int64_t, cast_u32_i64 ) CAST_OP(uint32_t, float, cast_u32_f32) CAST_OP(uint32_t, double, cast_u32_f64) CAST_OP(uint8_t, uint32_t, cast_u8_u32) CAST_OP(uint8_t, uint8_t, cast_u8_u8 ) CAST_OP(uint8_t, int64_t, cast_u8_i64 ) CAST_OP(uint8_t, float, cast_u8_f32) CAST_OP(uint8_t, double, cast_u8_f64) CAST_OP(int64_t, uint32_t, cast_i64_u32) CAST_OP(int64_t, uint8_t, cast_i64_u8 ) CAST_OP(int64_t, int64_t, cast_i64_i64 ) CAST_OP(int64_t, float, cast_i64_f32) CAST_OP(int64_t, double, cast_i64_f64) CAST_OP(float, uint8_t, cast_f32_u8 ) CAST_OP(float, uint32_t, cast_f32_u32) CAST_OP(float, int64_t, cast_f32_i64 ) CAST_OP(float, float, cast_f32_f32) CAST_OP(float, double, cast_f32_f64) CAST_OP(double, uint8_t, cast_f64_u8 ) CAST_OP(double, uint32_t, cast_f64_u32) CAST_OP(double, int64_t, cast_f64_i64 ) CAST_OP(double, float, cast_f64_f32) CAST_OP(double, double, cast_f64_f64)
candle/candle-kernels/src/cast.cu/0
{ "file_path": "candle/candle-kernels/src/cast.cu", "repo_id": "candle", "token_count": 2171 }
32
#include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) template <typename T> METAL_FUNC void im2col( constant size_t &dst_numel, constant size_t &h_out, constant size_t &w_out, constant size_t &h_k, constant size_t &w_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, h_out, w_out, c_in, h_k, w_k) // src: (b_size, c_in, h_in, w_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; const size_t dst_s4 = w_k; const size_t dst_s3 = h_k * dst_s4; const size_t dst_s2 = c_in * dst_s3; const size_t dst_s1 = w_out * dst_s2; const size_t dst_s0 = h_out * dst_s1; size_t tmp_tid = tid; const size_t b_idx = tmp_tid / dst_s0; tmp_tid -= b_idx * dst_s0; const size_t h_idx = tmp_tid / dst_s1; tmp_tid -= h_idx * dst_s1; const size_t w_idx = tmp_tid / dst_s2; tmp_tid -= w_idx * dst_s2; const size_t c_idx = tmp_tid / dst_s3; tmp_tid -= c_idx * dst_s3; const size_t h_k_idx = tmp_tid / dst_s4; tmp_tid -= h_k_idx * dst_s4; const size_t w_k_idx = tmp_tid; size_t src_h_idx = h_idx * stride + h_k_idx * dilation; size_t src_w_idx = w_idx * stride + w_k_idx * dilation; if (src_h_idx < padding || src_h_idx >= h_in + padding) { dst[tid] = static_cast<T>(0); } else if (src_w_idx < padding || src_w_idx >= w_in + padding) { dst[tid] = static_cast<T>(0); } else { src_h_idx -= padding; src_w_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_h_idx * src_strides[2] + src_w_idx * src_strides[3]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void im2col1d( constant size_t &dst_numel, constant size_t &l_out, constant size_t &l_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, l_out, c_in, l_k) // src: (b_size, c_in, l_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; const size_t dst_s2 = l_k; const size_t dst_s1 = c_in * dst_s2; const size_t dst_s0 = l_out * dst_s1; size_t tmp_dst_i = tid; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t l_idx = tmp_dst_i / dst_s1; tmp_dst_i -= l_idx * dst_s1; const size_t c_idx = tmp_dst_i / dst_s2; tmp_dst_i -= c_idx * dst_s2; const size_t l_k_idx = tmp_dst_i; size_t src_l_idx = l_idx * stride + l_k_idx * dilation; if (src_l_idx < padding || src_l_idx >= l_in + padding) { dst[tid] = static_cast<T>(0); } else { src_l_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_l_idx * src_strides[2]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void upsample_nearest2d( constant size_t &w_out, constant size_t &h_out, constant float &w_scale, constant float &h_scale, constant size_t *src_dims, constant size_t *src_s, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, w_in, h_in) const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; if (tid >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; size_t src_w = static_cast<size_t>(dst_w * w_scale); size_t src_h = static_cast<size_t>(dst_h * h_scale); if (src_w >= w_in) { src_w = w_in - 1; } if (src_h >= h_in) { src_h = h_in - 1; } const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; dst[tid] = src[src_i]; } #define IM2COL_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &h_out, \ constant size_t &w_out, \ constant size_t &h_k, \ constant size_t &w_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col<T>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define IM2COL1D_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &l_out, \ constant size_t &l_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col1d<T>(dst_numel, l_out, l_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_out, \ constant size_t &h_out, \ constant float &w_scale, \ constant float &h_scale, \ constant size_t *dims, \ constant size_t *strides, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, dims, strides, src, dst, tid); \ } \ template <typename T, typename A> METAL_FUNC void avg_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; d += static_cast<A>(src[src_idx]); } } dst[tid] = static_cast<T>(d / (w_k * h_k)); } #define AVGPOOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ avg_pool2d<TYPENAME, TYPEACC>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ template <typename T> METAL_FUNC void max_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; T d = 0; bool set = false; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; if (set) { d = MAX(d, src[src_idx]); } else { d = src[src_idx]; set = true; } } } dst[tid] = d; } #define MAXPOOL2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ max_pool2d<TYPENAME>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ // Naive implementation of conv_transpose1d. template <typename T, typename A> METAL_FUNC void conv_transpose1d( constant size_t &l_out, constant size_t &stride, constant size_t &padding, constant size_t &out_padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, constant size_t *k_dims, constant size_t *k_strides, device const T *src, device const T *k, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, l_in) // kernel: (c_in, c_out, l_k) const size_t l_k = k_dims[2]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (tid >= src_dims[0] * c_out * l_out) { return; } const size_t b_idx = tid / (l_out * c_out); const size_t dst_c_idx = (tid / l_out) % c_out; const size_t out_x = tid % l_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (int k_x = 0; k_x < (int)l_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= l_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_strides[1] + inp_x * src_strides[2]; const size_t k_idx = src_c_idx * k_strides[0] + dst_c_idx * k_strides[1] + k_x * k_strides[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(k[k_idx]); } } dst[tid] = static_cast<T>(d); } #define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &l_out, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &out_padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ constant size_t *k_dims, \ constant size_t *k_strides, \ device const TYPENAME *src, \ device const TYPENAME *k, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ conv_transpose1d<TYPENAME, TYPEACC>(l_out, stride, padding, out_padding, dilation, src_dims, src_strides, k_dims, k_strides, src, k, dst, tid); \ } \ IM2COL_OP(float, im2col_f32) IM2COL_OP(uint8_t, im2col_u8) IM2COL_OP(uint32_t, im2col_u32) IM2COL1D_OP(float, im2col1d_f32) IM2COL1D_OP(uint8_t, im2col1d_u8) IM2COL1D_OP(uint32_t, im2col1d_u32) UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32) UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8) UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32) MAXPOOL2D_OP(float, max_pool2d_f32) MAXPOOL2D_OP(half, max_pool2d_f16) MAXPOOL2D_OP(uint32_t, max_pool2d_u32) MAXPOOL2D_OP(uint8_t, max_pool2d_u8) #if defined(__HAVE_BFLOAT__) MAXPOOL2D_OP(bfloat, max_pool2d_bf16) #endif AVGPOOL2D_OP(float, float, avg_pool2d_f32) AVGPOOL2D_OP(half, float, avg_pool2d_f16) AVGPOOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32) AVGPOOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8) #if defined(__HAVE_BFLOAT__) AVGPOOL2D_OP(bfloat, float, avg_pool2d_bf16) #endif CONVT1D_OP(float, float, conv_transpose1d_f32) CONVT1D_OP(half, float, conv_transpose1d_f16) CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8) CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32) #if defined(__HAVE_BFLOAT__) CONVT1D_OP(bfloat, float, conv_transpose1d_bf16) #endif
candle/candle-metal-kernels/src/conv.metal/0
{ "file_path": "candle/candle-metal-kernels/src/conv.metal", "repo_id": "candle", "token_count": 6489 }
33
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, Device, Result, Tensor}; use candle_nn::{linear, AdamW, Linear, Module, Optimizer, ParamsAdamW, VarBuilder, VarMap}; fn gen_data() -> Result<(Tensor, Tensor)> { // Generate some sample linear data. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; Ok((sample_xs, sample_ys)) } fn main() -> Result<()> { let (sample_xs, sample_ys) = gen_data()?; // Use backprop to run a linear regression between samples and get the coefficients back. let varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, DType::F32, &Device::Cpu); let model = linear(2, 1, vb.pp("linear"))?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(varmap.all_vars(), params)?; for step in 0..10000 { let ys = model.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; println!("{step} {}", loss.to_vec0::<f32>()?); } Ok(()) }
candle/candle-nn/examples/basic_optimizer.rs/0
{ "file_path": "candle/candle-nn/examples/basic_optimizer.rs", "repo_id": "candle", "token_count": 595 }
34
//! Recurrent Neural Networks use candle::{DType, Device, IndexOp, Result, Tensor}; /// Trait for Recurrent Neural Networks. #[allow(clippy::upper_case_acronyms)] pub trait RNN { type State: Clone; /// A zero state from which the recurrent network is usually initialized. fn zero_state(&self, batch_dim: usize) -> Result<Self::State>; /// Applies a single step of the recurrent network. /// /// The input should have dimensions [batch_size, features]. fn step(&self, input: &Tensor, state: &Self::State) -> Result<Self::State>; /// Applies multiple steps of the recurrent network. /// /// The input should have dimensions [batch_size, seq_len, features]. /// The initial state is the result of applying zero_state. fn seq(&self, input: &Tensor) -> Result<Vec<Self::State>> { let batch_dim = input.dim(0)?; let state = self.zero_state(batch_dim)?; self.seq_init(input, &state) } /// Applies multiple steps of the recurrent network. /// /// The input should have dimensions [batch_size, seq_len, features]. fn seq_init(&self, input: &Tensor, init_state: &Self::State) -> Result<Vec<Self::State>> { let (_b_size, seq_len, _features) = input.dims3()?; let mut output = Vec::with_capacity(seq_len); for seq_index in 0..seq_len { let input = input.i((.., seq_index, ..))?; let state = if seq_index == 0 { self.step(&input, init_state)? } else { self.step(&input, &output[seq_index - 1])? }; output.push(state); } Ok(output) } /// Converts a sequence of state to a tensor. fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor>; } /// The state for a LSTM network, this contains two tensors. #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] pub struct LSTMState { h: Tensor, c: Tensor, } impl LSTMState { /// The hidden state vector, which is also the output of the LSTM. pub fn h(&self) -> &Tensor { &self.h } /// The cell state vector. pub fn c(&self) -> &Tensor { &self.c } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy)] pub struct LSTMConfig { pub w_ih_init: super::Init, pub w_hh_init: super::Init, pub b_ih_init: Option<super::Init>, pub b_hh_init: Option<super::Init>, pub layer_idx: usize, } impl Default for LSTMConfig { fn default() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: Some(super::Init::Const(0.)), b_hh_init: Some(super::Init::Const(0.)), layer_idx: 0, } } } impl LSTMConfig { pub fn default_no_bias() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: None, b_hh_init: None, layer_idx: 0, } } } /// A Long Short-Term Memory (LSTM) layer. /// /// <https://en.wikipedia.org/wiki/Long_short-term_memory> #[allow(clippy::upper_case_acronyms, unused)] #[derive(Clone, Debug)] pub struct LSTM { w_ih: Tensor, w_hh: Tensor, b_ih: Option<Tensor>, b_hh: Option<Tensor>, hidden_dim: usize, config: LSTMConfig, device: Device, dtype: DType, } /// Creates a LSTM layer. pub fn lstm( in_dim: usize, hidden_dim: usize, config: LSTMConfig, vb: crate::VarBuilder, ) -> Result<LSTM> { let layer_idx = config.layer_idx; let w_ih = vb.get_with_hints( (4 * hidden_dim, in_dim), &format!("weight_ih_l{layer_idx}"), // Only a single layer is supported. config.w_ih_init, )?; let w_hh = vb.get_with_hints( (4 * hidden_dim, hidden_dim), &format!("weight_hh_l{layer_idx}"), // Only a single layer is supported. config.w_hh_init, )?; let b_ih = match config.b_ih_init { Some(init) => { Some(vb.get_with_hints(4 * hidden_dim, &format!("bias_ih_l{layer_idx}"), init)?) } None => None, }; let b_hh = match config.b_hh_init { Some(init) => { Some(vb.get_with_hints(4 * hidden_dim, &format!("bias_hh_l{layer_idx}"), init)?) } None => None, }; Ok(LSTM { w_ih, w_hh, b_ih, b_hh, hidden_dim, config, device: vb.device().clone(), dtype: vb.dtype(), }) } impl RNN for LSTM { type State = LSTMState; fn zero_state(&self, batch_dim: usize) -> Result<Self::State> { let zeros = Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?; Ok(Self::State { h: zeros.clone(), c: zeros.clone(), }) } fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> { let w_ih = input.matmul(&self.w_ih.t()?)?; let w_hh = in_state.h.matmul(&self.w_hh.t()?)?; let w_ih = match &self.b_ih { None => w_ih, Some(b_ih) => w_ih.broadcast_add(b_ih)?, }; let w_hh = match &self.b_hh { None => w_hh, Some(b_hh) => w_hh.broadcast_add(b_hh)?, }; let chunks = (&w_ih + &w_hh)?.chunk(4, 1)?; let in_gate = crate::ops::sigmoid(&chunks[0])?; let forget_gate = crate::ops::sigmoid(&chunks[1])?; let cell_gate = chunks[2].tanh()?; let out_gate = crate::ops::sigmoid(&chunks[3])?; let next_c = ((forget_gate * &in_state.c)? + (in_gate * cell_gate)?)?; let next_h = (out_gate * next_c.tanh()?)?; Ok(LSTMState { c: next_c, h: next_h, }) } fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> { let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>(); Tensor::stack(&states, 1) } } /// The state for a GRU network, this contains a single tensor. #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] pub struct GRUState { h: Tensor, } impl GRUState { /// The hidden state vector, which is also the output of the LSTM. pub fn h(&self) -> &Tensor { &self.h } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy)] pub struct GRUConfig { pub w_ih_init: super::Init, pub w_hh_init: super::Init, pub b_ih_init: Option<super::Init>, pub b_hh_init: Option<super::Init>, } impl Default for GRUConfig { fn default() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: Some(super::Init::Const(0.)), b_hh_init: Some(super::Init::Const(0.)), } } } impl GRUConfig { pub fn default_no_bias() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: None, b_hh_init: None, } } } /// A Gated Recurrent Unit (GRU) layer. /// /// <https://en.wikipedia.org/wiki/Gated_recurrent_unit> #[allow(clippy::upper_case_acronyms, unused)] #[derive(Clone, Debug)] pub struct GRU { w_ih: Tensor, w_hh: Tensor, b_ih: Option<Tensor>, b_hh: Option<Tensor>, hidden_dim: usize, config: GRUConfig, device: Device, dtype: DType, } /// Creates a GRU layer. pub fn gru( in_dim: usize, hidden_dim: usize, config: GRUConfig, vb: crate::VarBuilder, ) -> Result<GRU> { let w_ih = vb.get_with_hints( (3 * hidden_dim, in_dim), "weight_ih_l0", // Only a single layer is supported. config.w_ih_init, )?; let w_hh = vb.get_with_hints( (3 * hidden_dim, hidden_dim), "weight_hh_l0", // Only a single layer is supported. config.w_hh_init, )?; let b_ih = match config.b_ih_init { Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_ih_l0", init)?), None => None, }; let b_hh = match config.b_hh_init { Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_hh_l0", init)?), None => None, }; Ok(GRU { w_ih, w_hh, b_ih, b_hh, hidden_dim, config, device: vb.device().clone(), dtype: vb.dtype(), }) } impl RNN for GRU { type State = GRUState; fn zero_state(&self, batch_dim: usize) -> Result<Self::State> { let h = Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?; Ok(Self::State { h }) } fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> { let w_ih = input.matmul(&self.w_ih.t()?)?; let w_hh = in_state.h.matmul(&self.w_hh.t()?)?; let w_ih = match &self.b_ih { None => w_ih, Some(b_ih) => w_ih.broadcast_add(b_ih)?, }; let w_hh = match &self.b_hh { None => w_hh, Some(b_hh) => w_hh.broadcast_add(b_hh)?, }; let chunks_ih = w_ih.chunk(3, 1)?; let chunks_hh = w_hh.chunk(3, 1)?; let r_gate = crate::ops::sigmoid(&(&chunks_ih[0] + &chunks_hh[0])?)?; let z_gate = crate::ops::sigmoid(&(&chunks_ih[1] + &chunks_hh[1])?)?; let n_gate = (&chunks_ih[2] + (r_gate * &chunks_hh[2])?)?.tanh(); let next_h = ((&z_gate * &in_state.h)? - ((&z_gate - 1.)? * n_gate)?)?; Ok(GRUState { h: next_h }) } fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> { let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>(); Tensor::cat(&states, 1) } }
candle/candle-nn/src/rnn.rs/0
{ "file_path": "candle/candle-nn/src/rnn.rs", "repo_id": "candle", "token_count": 4874 }
35
use candle::Result; use prost::Message; pub mod onnx { include!(concat!(env!("OUT_DIR"), "/onnx.rs")); } pub mod eval; pub use eval::{dtype, simple_eval}; pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> { let buf = std::fs::read(p)?; onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap) }
candle/candle-onnx/src/lib.rs/0
{ "file_path": "candle/candle-onnx/src/lib.rs", "repo_id": "candle", "token_count": 154 }
36
from .module import Module from .container import Sequential, ModuleList, ModuleDict from .sparse import Embedding from .normalization import LayerNorm from .linear import Linear
candle/candle-pyo3/py_src/candle/nn/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/__init__.py", "repo_id": "candle", "token_count": 43 }
37
use ::candle::Tensor; use pyo3::prelude::*; #[derive(Clone, Debug)] /// Represents an absolute shape e.g. (1, 2, 3) pub struct PyShape(Vec<usize>); impl<'source> pyo3::FromPyObject<'source> for PyShape { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; if tuple.len() == 1 { let first_element = tuple.get_item(0)?; let dims: Vec<usize> = pyo3::FromPyObject::extract(first_element)?; Ok(PyShape(dims)) } else { let dims: Vec<usize> = pyo3::FromPyObject::extract(tuple)?; Ok(PyShape(dims)) } } } impl From<PyShape> for ::candle::Shape { fn from(val: PyShape) -> Self { val.0.into() } } #[derive(Clone, Debug)] /// Represents a shape with a hole in it e.g. (1, -1, 3) pub struct PyShapeWithHole(Vec<isize>); impl<'source> pyo3::FromPyObject<'source> for PyShapeWithHole { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; let dims: Vec<isize> = if tuple.len() == 1 { let first_element = tuple.get_item(0)?; pyo3::FromPyObject::extract(first_element)? } else { pyo3::FromPyObject::extract(tuple)? }; // Ensure we have only positive numbers and at most one "hole" (-1) let negative_ones = dims.iter().filter(|&&x| x == -1).count(); let any_invalid_dimensions = dims.iter().any(|&x| x < -1 || x == 0); if negative_ones > 1 || any_invalid_dimensions { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {:?}", dims ))); } Ok(PyShapeWithHole(dims)) } } impl PyShapeWithHole { /// Returns `true` if the shape is absolute e.g. (1, 2, 3) pub fn is_absolute(&self) -> bool { self.0.iter().all(|x| *x > 0) } /// Convert a relative shape to an absolute shape e.g. (1, -1) -> (1, 12) pub fn to_absolute(&self, t: &Tensor) -> PyResult<PyShape> { if self.is_absolute() { return Ok(PyShape( self.0.iter().map(|x| *x as usize).collect::<Vec<usize>>(), )); } let mut elements = t.elem_count(); let mut new_dims: Vec<usize> = vec![]; for dim in self.0.iter() { if *dim > 0 { new_dims.push(*dim as usize); elements /= *dim as usize; } else if *dim == -1 { new_dims.push(elements); } else { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {}", dim ))); } } Ok(PyShape(new_dims)) } }
candle/candle-pyo3/src/shape.rs/0
{ "file_path": "candle/candle-pyo3/src/shape.rs", "repo_id": "candle", "token_count": 1646 }
38
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{embedding, Embedding, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum HiddenAct { Gelu, GeluApproximate, Relu, } struct HiddenActLayer { act: HiddenAct, span: tracing::Span, } impl HiddenActLayer { fn new(act: HiddenAct) -> Self { let span = tracing::span!(tracing::Level::TRACE, "hidden-act"); Self { act, span } } fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let _enter = self.span.enter(); match self.act { // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213 HiddenAct::Gelu => xs.gelu_erf(), HiddenAct::GeluApproximate => xs.gelu(), HiddenAct::Relu => xs.relu(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "lowercase")] enum PositionEmbeddingType { #[default] Absolute, } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/configuration_bert.py#L1 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, hidden_size: usize, num_hidden_layers: usize, num_attention_heads: usize, intermediate_size: usize, pub hidden_act: HiddenAct, hidden_dropout_prob: f64, max_position_embeddings: usize, type_vocab_size: usize, initializer_range: f64, layer_norm_eps: f64, pad_token_id: usize, #[serde(default)] position_embedding_type: PositionEmbeddingType, #[serde(default)] use_cache: bool, classifier_dropout: Option<f64>, model_type: Option<String>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 30522, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: HiddenAct::Gelu, hidden_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, classifier_dropout: None, model_type: Some("bert".to_string()), } } } impl Config { fn _all_mini_lm_l6_v2() -> Self { // https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/blob/main/config.json Self { vocab_size: 30522, hidden_size: 384, num_hidden_layers: 6, num_attention_heads: 12, intermediate_size: 1536, hidden_act: HiddenAct::Gelu, hidden_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, classifier_dropout: None, model_type: Some("bert".to_string()), } } } struct Dropout { #[allow(dead_code)] pr: f64, } impl Dropout { fn new(pr: f64) -> Self { Self { pr } } } impl Module for Dropout { fn forward(&self, x: &Tensor) -> Result<Tensor> { // TODO Ok(x.clone()) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L180 struct BertEmbeddings { word_embeddings: Embedding, position_embeddings: Option<Embedding>, token_type_embeddings: Embedding, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertEmbeddings { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = embedding( config.vocab_size, config.hidden_size, vb.pp("word_embeddings"), )?; let position_embeddings = embedding( config.max_position_embeddings, config.hidden_size, vb.pp("position_embeddings"), )?; let token_type_embeddings = embedding( config.type_vocab_size, config.hidden_size, vb.pp("token_type_embeddings"), )?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; Ok(Self { word_embeddings, position_embeddings: Some(position_embeddings), token_type_embeddings, layer_norm, dropout: Dropout::new(config.hidden_dropout_prob), span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_bsize, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?; let mut embeddings = (&input_embeddings + token_type_embeddings)?; if let Some(position_embeddings) = &self.position_embeddings { // TODO: Proper absolute positions? let position_ids = (0..seq_len as u32).collect::<Vec<_>>(); let position_ids = Tensor::new(&position_ids[..], input_ids.device())?; embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)? } let embeddings = self.layer_norm.forward(&embeddings)?; let embeddings = self.dropout.forward(&embeddings)?; Ok(embeddings) } } struct BertSelfAttention { query: Linear, key: Linear, value: Linear, dropout: Dropout, num_attention_heads: usize, attention_head_size: usize, span: tracing::Span, span_softmax: tracing::Span, } impl BertSelfAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention_head_size = config.hidden_size / config.num_attention_heads; let all_head_size = config.num_attention_heads * attention_head_size; let dropout = Dropout::new(config.hidden_dropout_prob); let hidden_size = config.hidden_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; Ok(Self { query, key, value, dropout, num_attention_heads: config.num_attention_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "self-attn"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let mut new_x_shape = xs.dims().to_vec(); new_x_shape.pop(); new_x_shape.push(self.num_attention_heads); new_x_shape.push(self.attention_head_size); let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?; xs.contiguous() } } impl Module for BertSelfAttention { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let query_layer = self.query.forward(hidden_states)?; let key_layer = self.key.forward(hidden_states)?; let value_layer = self.value.forward(hidden_states)?; let query_layer = self.transpose_for_scores(&query_layer)?; let key_layer = self.transpose_for_scores(&key_layer)?; let value_layer = self.transpose_for_scores(&value_layer)?; let attention_scores = query_layer.matmul(&key_layer.t()?)?; let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?; let attention_probs = { let _enter_sm = self.span_softmax.enter(); candle_nn::ops::softmax(&attention_scores, candle::D::Minus1)? }; let attention_probs = self.dropout.forward(&attention_probs)?; let context_layer = attention_probs.matmul(&value_layer)?; let context_layer = context_layer.transpose(1, 2)?.contiguous()?; let context_layer = context_layer.flatten_from(candle::D::Minus2)?; Ok(context_layer) } } struct BertSelfOutput { dense: Linear, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertSelfOutput { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; let dropout = Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "self-out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L392 struct BertAttention { self_attention: BertSelfAttention, self_output: BertSelfOutput, span: tracing::Span, } impl BertAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let self_attention = BertSelfAttention::load(vb.pp("self"), config)?; let self_output = BertSelfOutput::load(vb.pp("output"), config)?; Ok(Self { self_attention, self_output, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } } impl Module for BertAttention { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let self_outputs = self.self_attention.forward(hidden_states)?; let attention_output = self.self_output.forward(&self_outputs, hidden_states)?; Ok(attention_output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L441 struct BertIntermediate { dense: Linear, intermediate_act: HiddenActLayer, span: tracing::Span, } impl BertIntermediate { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.hidden_size, config.intermediate_size, vb.pp("dense"))?; Ok(Self { dense, intermediate_act: HiddenActLayer::new(config.hidden_act), span: tracing::span!(tracing::Level::TRACE, "inter"), }) } } impl Module for BertIntermediate { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let ys = self.intermediate_act.forward(&hidden_states)?; Ok(ys) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L456 struct BertOutput { dense: Linear, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertOutput { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.intermediate_size, config.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; let dropout = Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L470 struct BertLayer { attention: BertAttention, intermediate: BertIntermediate, output: BertOutput, span: tracing::Span, } impl BertLayer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention = BertAttention::load(vb.pp("attention"), config)?; let intermediate = BertIntermediate::load(vb.pp("intermediate"), config)?; let output = BertOutput::load(vb.pp("output"), config)?; Ok(Self { attention, intermediate, output, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } } impl Module for BertLayer { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let attention_output = self.attention.forward(hidden_states)?; // TODO: Support cross-attention? // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 // TODO: Support something similar to `apply_chunking_to_forward`? let intermediate_output = self.intermediate.forward(&attention_output)?; let layer_output = self .output .forward(&intermediate_output, &attention_output)?; Ok(layer_output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556 struct BertEncoder { layers: Vec<BertLayer>, span: tracing::Span, } impl BertEncoder { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.num_hidden_layers) .map(|index| BertLayer::load(vb.pp(&format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(BertEncoder { layers, span }) } } impl Module for BertEncoder { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states)? } Ok(hidden_states) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 pub struct BertModel { embeddings: BertEmbeddings, encoder: BertEncoder, pub device: Device, span: tracing::Span, } impl BertModel { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let (embeddings, encoder) = match ( BertEmbeddings::load(vb.pp("embeddings"), config), BertEncoder::load(vb.pp("encoder"), config), ) { (Ok(embeddings), Ok(encoder)) => (embeddings, encoder), (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( BertEmbeddings::load(vb.pp(&format!("{model_type}.embeddings")), config), BertEncoder::load(vb.pp(&format!("{model_type}.encoder")), config), ) { (embeddings, encoder) } else { return Err(err); } } else { return Err(err); } } }; Ok(Self { embeddings, encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?; let sequence_output = self.encoder.forward(&embedding_output)?; Ok(sequence_output) } }
candle/candle-transformers/src/models/bert.rs/0
{ "file_path": "candle/candle-transformers/src/models/bert.rs", "repo_id": "candle", "token_count": 7941 }
39
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::linear_no_bias as linear; use candle_nn::{embedding, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder}; use std::collections::HashMap; #[derive(Debug, Clone)] pub struct Config { pub dim: usize, // transformer dimension pub hidden_dim: usize, // for ffn layers pub n_layers: usize, // number of layers pub n_heads: usize, // number of query heads pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery) pub vocab_size: usize, // vocabulary size, usually 256 (byte-level) pub seq_len: usize, // max sequence length pub norm_eps: f64, } impl Config { pub fn tiny_260k() -> Self { Self { dim: 64, hidden_dim: 768, n_layers: 5, n_heads: 8, n_kv_heads: 4, vocab_size: 32000, seq_len: 512, norm_eps: 1e-5, } } pub fn tiny_15m() -> Self { Self { dim: 288, hidden_dim: 768, n_layers: 6, n_heads: 6, n_kv_heads: 6, vocab_size: 32000, seq_len: 256, norm_eps: 1e-5, } } pub fn tiny_42m() -> Self { Self { dim: 512, hidden_dim: 768, n_layers: 8, n_heads: 8, n_kv_heads: 8, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } pub fn tiny_110m() -> Self { Self { dim: 768, hidden_dim: 768, n_layers: 12, n_heads: 12, n_kv_heads: 12, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } } #[derive(Debug, Clone)] pub struct Cache { masks: HashMap<usize, Tensor>, pub use_kv_cache: bool, pub kvs: Vec<Option<(Tensor, Tensor)>>, pub cos: Tensor, pub sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_elem = cfg.dim / cfg.n_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), vb.device())?; let idx_theta = Tensor::arange(0, cfg.seq_len as u32, vb.device())? .to_dtype(DType::F32)? .reshape((cfg.seq_len, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let precomputed_cos = idx_theta.cos()?; let precomputed_sin = idx_theta.sin()?; let freq_cis_real = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real") .unwrap_or(precomputed_cos); let freq_cis_imag = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag") .unwrap_or(precomputed_sin); let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; Ok(Self { masks: HashMap::new(), use_kv_cache, kvs: vec![None; cfg.n_layers], cos, sin, device: vb.device().clone(), }) } pub fn mask(&mut self, t: usize) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl Llama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c.rs", "repo_id": "candle", "token_count": 6423 }
40
use super::llama2_c::{Cache, Config}; use crate::quantized_nn::{linear_no_bias as linear, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, IndexOp, Module, Result, Tensor, D}; fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct QLlama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl QLlama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/quantized_llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_llama2_c.rs", "repo_id": "candle", "token_count": 4375 }
41
use candle::{IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use super::transformer::TwoWayTransformer; #[derive(Debug)] struct MlpMaskDecoder { layers: Vec<super::Linear>, sigmoid_output: bool, span: tracing::Span, } impl MlpMaskDecoder { fn new( input_dim: usize, hidden_dim: usize, output_dim: usize, num_layers: usize, sigmoid_output: bool, vb: VarBuilder, ) -> Result<Self> { let mut layers = Vec::with_capacity(num_layers); let vb = vb.pp("layers"); for i in 0..num_layers { let in_dim = if i == 0 { input_dim } else { hidden_dim }; let out_dim = if i + 1 == num_layers { output_dim } else { hidden_dim }; let layer = super::linear(vb.pp(i), in_dim, out_dim, true)?; layers.push(layer) } let span = tracing::span!(tracing::Level::TRACE, "mlp-mask-decoder"); Ok(Self { layers, sigmoid_output, span, }) } } impl Module for MlpMaskDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (i, layer) in self.layers.iter().enumerate() { xs = layer.forward(&xs)?; if i + 1 < self.layers.len() { xs = xs.relu()? } } if self.sigmoid_output { candle_nn::ops::sigmoid(&xs) } else { Ok(xs) } } } #[derive(Debug)] pub struct MaskDecoder { iou_token: candle_nn::Embedding, mask_tokens: candle_nn::Embedding, iou_prediction_head: MlpMaskDecoder, output_upscaling_conv1: candle_nn::ConvTranspose2d, output_upscaling_ln: super::LayerNorm2d, output_upscaling_conv2: candle_nn::ConvTranspose2d, num_mask_tokens: usize, output_hypernetworks_mlps: Vec<MlpMaskDecoder>, transformer: TwoWayTransformer, span: tracing::Span, } impl MaskDecoder { pub fn new( transformer_dim: usize, num_multimask_outputs: usize, iou_head_depth: usize, iou_head_hidden_dim: usize, vb: VarBuilder, ) -> Result<Self> { let num_mask_tokens = num_multimask_outputs + 1; let iou_prediction_head = MlpMaskDecoder::new( transformer_dim, iou_head_hidden_dim, num_mask_tokens, iou_head_depth, false, vb.pp("iou_prediction_head"), )?; let iou_token = candle_nn::embedding(1, transformer_dim, vb.pp("iou_token"))?; let mask_tokens = candle_nn::embedding(num_mask_tokens, transformer_dim, vb.pp("mask_tokens"))?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let output_upscaling_conv1 = candle_nn::conv_transpose2d( transformer_dim, transformer_dim / 4, 2, cfg, vb.pp("output_upscaling.0"), )?; let output_upscaling_ln = super::LayerNorm2d::new(transformer_dim / 4, 1e-6, vb.pp("output_upscaling.1"))?; let output_upscaling_conv2 = candle_nn::conv_transpose2d( transformer_dim / 4, transformer_dim / 8, 2, cfg, vb.pp("output_upscaling.3"), )?; let mut output_hypernetworks_mlps = Vec::with_capacity(num_mask_tokens); let vb_o = vb.pp("output_hypernetworks_mlps"); for i in 0..num_mask_tokens { let mlp = MlpMaskDecoder::new( transformer_dim, transformer_dim, transformer_dim / 8, 3, false, vb_o.pp(i), )?; output_hypernetworks_mlps.push(mlp) } let transformer = TwoWayTransformer::new( /* depth */ 2, /* embedding_dim */ transformer_dim, /* num_heads */ 8, /* mlp_dim */ 2048, vb.pp("transformer"), )?; let span = tracing::span!(tracing::Level::TRACE, "mask-decoder"); Ok(Self { iou_token, mask_tokens, iou_prediction_head, output_upscaling_conv1, output_upscaling_ln, output_upscaling_conv2, num_mask_tokens, output_hypernetworks_mlps, transformer, span, }) } pub fn forward( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, multimask_output: bool, ) -> Result<(Tensor, Tensor)> { let _enter = self.span.enter(); let (masks, iou_pred) = self.predict_masks( image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, )?; let masks = if multimask_output { masks.i((.., 1..))? } else { masks.i((.., 0..1))? }; let iou_pred = if multimask_output { iou_pred.i((.., 1..))? } else { iou_pred.i((.., 0..1))? }; Ok((masks, iou_pred)) } fn predict_masks( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, ) -> Result<(Tensor, Tensor)> { // Concatenate output tokens. let output_tokens = Tensor::cat( &[self.iou_token.embeddings(), self.mask_tokens.embeddings()], 0, )?; let (d1, d2) = output_tokens.dims2()?; let output_tokens = output_tokens .unsqueeze(0)? .expand((sparse_prompt_embeddings.dim(0)?, d1, d2))?; let tokens = Tensor::cat(&[&output_tokens, sparse_prompt_embeddings], 1)?; // Expand per-image data in batch direction to be per mask let src = repeat_interleave(image_embeddings, tokens.dim(0)?, 0)?; let src = src.broadcast_add(dense_prompt_embeddings)?; let pos_src = repeat_interleave(image_pe, tokens.dim(0)?, 0)?; let (b, c, h, w) = src.dims4()?; // Run the transformer let (hs, src) = self.transformer.forward(&src, &pos_src, &tokens)?; let iou_token_out = hs.i((.., 0))?; let mask_tokens_out = hs.i((.., 1..1 + self.num_mask_tokens))?; // Upscale mask embeddings and predict masks using the masks tokens. let src = src.transpose(1, 2)?.reshape((b, c, h, w))?; let upscaled_embedding = self .output_upscaling_conv1 .forward(&src)? .apply(&self.output_upscaling_ln)? .gelu()? .apply(&self.output_upscaling_conv2)? .gelu()?; let mut hyper_in_list = Vec::with_capacity(self.num_mask_tokens); for (i, mlp) in self.output_hypernetworks_mlps.iter().enumerate() { let h = mlp.forward(&mask_tokens_out.i((.., i))?)?; hyper_in_list.push(h) } let hyper_in = Tensor::stack(hyper_in_list.as_slice(), 1)?.contiguous()?; let (b, c, h, w) = upscaled_embedding.dims4()?; let masks = hyper_in.matmul(&upscaled_embedding.reshape((b, c, h * w))?)?; let masks = masks.reshape((b, (), h, w))?; // Generate mask quality predictions. let iou_pred = self.iou_prediction_head.forward(&iou_token_out)?; Ok((masks, iou_pred)) } } // Equivalent to torch.repeat_interleave fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> { let img = img.unsqueeze(dim + 1)?; let mut dims = img.dims().to_vec(); dims[dim + 1] = repeats; img.broadcast_as(dims)?.flatten(dim, dim + 1) }
candle/candle-transformers/src/models/segment_anything/mask_decoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/mask_decoder.rs", "repo_id": "candle", "token_count": 4213 }
42
//! 2D UNet Building Blocks //! use super::attention::{ AttentionBlock, AttentionBlockConfig, SpatialTransformer, SpatialTransformerConfig, }; use super::resnet::{ResnetBlock2D, ResnetBlock2DConfig}; use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Module, Result, Tensor, D}; use candle_nn as nn; #[derive(Debug)] struct Downsample2D { conv: Option<Conv2d>, padding: usize, span: tracing::Span, } impl Downsample2D { fn new( vs: nn::VarBuilder, in_channels: usize, use_conv: bool, out_channels: usize, padding: usize, ) -> Result<Self> { let conv = if use_conv { let config = nn::Conv2dConfig { stride: 2, padding, ..Default::default() }; let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?; Some(conv) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "downsample2d"); Ok(Self { conv, padding, span, }) } } impl Module for Downsample2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); match &self.conv { None => xs.avg_pool2d(2), Some(conv) => { if self.padding == 0 { let xs = xs .pad_with_zeros(D::Minus1, 0, 1)? .pad_with_zeros(D::Minus2, 0, 1)?; conv.forward(&xs) } else { conv.forward(xs) } } } } } // This does not support the conv-transpose mode. #[derive(Debug)] struct Upsample2D { conv: Conv2d, span: tracing::Span, } impl Upsample2D { fn new(vs: nn::VarBuilder, in_channels: usize, out_channels: usize) -> Result<Self> { let config = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?; let span = tracing::span!(tracing::Level::TRACE, "upsample2d"); Ok(Self { conv, span }) } } impl Upsample2D { fn forward(&self, xs: &Tensor, size: Option<(usize, usize)>) -> Result<Tensor> { let _enter = self.span.enter(); let xs = match size { None => { let (_bsize, _channels, h, w) = xs.dims4()?; xs.upsample_nearest2d(2 * h, 2 * w)? } Some((h, w)) => xs.upsample_nearest2d(h, w)?, }; self.conv.forward(&xs) } } #[derive(Debug, Clone, Copy)] pub struct DownEncoderBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: usize, pub output_scale_factor: f64, pub add_downsample: bool, pub downsample_padding: usize, } impl Default for DownEncoderBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_downsample: true, downsample_padding: 1, } } } #[derive(Debug)] pub struct DownEncoderBlock2D { resnets: Vec<ResnetBlock2D>, downsampler: Option<Downsample2D>, span: tracing::Span, pub config: DownEncoderBlock2DConfig, } impl DownEncoderBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DownEncoderBlock2DConfig, ) -> Result<Self> { let resnets: Vec<_> = { let vs = vs.pp("resnets"); let conv_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, out_channels: Some(out_channels), groups: config.resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels: None, ..Default::default() }; (0..(config.num_layers)) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs.pp(&i.to_string()), in_channels, conv_cfg) }) .collect::<Result<Vec<_>>>()? }; let downsampler = if config.add_downsample { let downsample = Downsample2D::new( vs.pp("downsamplers").pp("0"), out_channels, true, out_channels, config.downsample_padding, )?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "down-enc2d"); Ok(Self { resnets, downsampler, span, config, }) } } impl Module for DownEncoderBlock2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for resnet in self.resnets.iter() { xs = resnet.forward(&xs, None)? } match &self.downsampler { Some(downsampler) => downsampler.forward(&xs), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct UpDecoderBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: usize, pub output_scale_factor: f64, pub add_upsample: bool, } impl Default for UpDecoderBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_upsample: true, } } } #[derive(Debug)] pub struct UpDecoderBlock2D { resnets: Vec<ResnetBlock2D>, upsampler: Option<Upsample2D>, span: tracing::Span, pub config: UpDecoderBlock2DConfig, } impl UpDecoderBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: UpDecoderBlock2DConfig, ) -> Result<Self> { let resnets: Vec<_> = { let vs = vs.pp("resnets"); let conv_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), eps: config.resnet_eps, groups: config.resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels: None, ..Default::default() }; (0..(config.num_layers)) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs.pp(&i.to_string()), in_channels, conv_cfg) }) .collect::<Result<Vec<_>>>()? }; let upsampler = if config.add_upsample { let upsample = Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?; Some(upsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "up-dec2d"); Ok(Self { resnets, upsampler, span, config, }) } } impl Module for UpDecoderBlock2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for resnet in self.resnets.iter() { xs = resnet.forward(&xs, None)? } match &self.upsampler { Some(upsampler) => upsampler.forward(&xs, None), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct UNetMidBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: Option<usize>, pub attn_num_head_channels: Option<usize>, // attention_type "default" pub output_scale_factor: f64, } impl Default for UNetMidBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: Some(32), attn_num_head_channels: Some(1), output_scale_factor: 1., } } } #[derive(Debug)] pub struct UNetMidBlock2D { resnet: ResnetBlock2D, attn_resnets: Vec<(AttentionBlock, ResnetBlock2D)>, span: tracing::Span, pub config: UNetMidBlock2DConfig, } impl UNetMidBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, temb_channels: Option<usize>, config: UNetMidBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let vs_attns = vs.pp("attentions"); let resnet_groups = config .resnet_groups .unwrap_or_else(|| usize::min(in_channels / 4, 32)); let resnet_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, groups: resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?; let attn_cfg = AttentionBlockConfig { num_head_channels: config.attn_num_head_channels, num_groups: resnet_groups, rescale_output_factor: config.output_scale_factor, eps: config.resnet_eps, }; let mut attn_resnets = vec![]; for index in 0..config.num_layers { let attn = AttentionBlock::new(vs_attns.pp(&index.to_string()), in_channels, attn_cfg)?; let resnet = ResnetBlock2D::new( vs_resnets.pp(&(index + 1).to_string()), in_channels, resnet_cfg, )?; attn_resnets.push((attn, resnet)) } let span = tracing::span!(tracing::Level::TRACE, "mid2d"); Ok(Self { resnet, attn_resnets, span, config, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = self.resnet.forward(xs, temb)?; for (attn, resnet) in self.attn_resnets.iter() { xs = resnet.forward(&attn.forward(&xs)?, temb)? } Ok(xs) } } #[derive(Debug, Clone, Copy)] pub struct UNetMidBlock2DCrossAttnConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: Option<usize>, pub attn_num_head_channels: usize, // attention_type "default" pub output_scale_factor: f64, pub cross_attn_dim: usize, pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for UNetMidBlock2DCrossAttnConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: Some(32), attn_num_head_channels: 1, output_scale_factor: 1., cross_attn_dim: 1280, sliced_attention_size: None, // Sliced attention disabled use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct UNetMidBlock2DCrossAttn { resnet: ResnetBlock2D, attn_resnets: Vec<(SpatialTransformer, ResnetBlock2D)>, span: tracing::Span, pub config: UNetMidBlock2DCrossAttnConfig, } impl UNetMidBlock2DCrossAttn { pub fn new( vs: nn::VarBuilder, in_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: UNetMidBlock2DCrossAttnConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let vs_attns = vs.pp("attentions"); let resnet_groups = config .resnet_groups .unwrap_or_else(|| usize::min(in_channels / 4, 32)); let resnet_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, groups: resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?; let n_heads = config.attn_num_head_channels; let attn_cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, num_groups: resnet_groups, context_dim: Some(config.cross_attn_dim), sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let mut attn_resnets = vec![]; for index in 0..config.num_layers { let attn = SpatialTransformer::new( vs_attns.pp(&index.to_string()), in_channels, n_heads, in_channels / n_heads, use_flash_attn, attn_cfg, )?; let resnet = ResnetBlock2D::new( vs_resnets.pp(&(index + 1).to_string()), in_channels, resnet_cfg, )?; attn_resnets.push((attn, resnet)) } let span = tracing::span!(tracing::Level::TRACE, "xa-mid2d"); Ok(Self { resnet, attn_resnets, span, config, }) } pub fn forward( &self, xs: &Tensor, temb: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = self.resnet.forward(xs, temb)?; for (attn, resnet) in self.attn_resnets.iter() { xs = resnet.forward(&attn.forward(&xs, encoder_hidden_states)?, temb)? } Ok(xs) } } #[derive(Debug, Clone, Copy)] pub struct DownBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, // resnet_time_scale_shift: "default" // resnet_act_fn: "swish" pub resnet_groups: usize, pub output_scale_factor: f64, pub add_downsample: bool, pub downsample_padding: usize, } impl Default for DownBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_downsample: true, downsample_padding: 1, } } } #[derive(Debug)] pub struct DownBlock2D { resnets: Vec<ResnetBlock2D>, downsampler: Option<Downsample2D>, span: tracing::Span, pub config: DownBlock2DConfig, } impl DownBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, temb_channels: Option<usize>, config: DownBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let resnet_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), eps: config.resnet_eps, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnets = (0..config.num_layers) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs_resnets.pp(&i.to_string()), in_channels, resnet_cfg) }) .collect::<Result<Vec<_>>>()?; let downsampler = if config.add_downsample { let downsampler = Downsample2D::new( vs.pp("downsamplers").pp("0"), out_channels, true, out_channels, config.downsample_padding, )?; Some(downsampler) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "down2d"); Ok(Self { resnets, downsampler, span, config, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<(Tensor, Vec<Tensor>)> { let _enter = self.span.enter(); let mut xs = xs.clone(); let mut output_states = vec![]; for resnet in self.resnets.iter() { xs = resnet.forward(&xs, temb)?; output_states.push(xs.clone()); } let xs = match &self.downsampler { Some(downsampler) => { let xs = downsampler.forward(&xs)?; output_states.push(xs.clone()); xs } None => xs, }; Ok((xs, output_states)) } } #[derive(Debug, Clone, Copy)] pub struct CrossAttnDownBlock2DConfig { pub downblock: DownBlock2DConfig, pub attn_num_head_channels: usize, pub cross_attention_dim: usize, // attention_type: "default" pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for CrossAttnDownBlock2DConfig { fn default() -> Self { Self { downblock: Default::default(), attn_num_head_channels: 1, cross_attention_dim: 1280, sliced_attention_size: None, use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct CrossAttnDownBlock2D { downblock: DownBlock2D, attentions: Vec<SpatialTransformer>, span: tracing::Span, pub config: CrossAttnDownBlock2DConfig, } impl CrossAttnDownBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: CrossAttnDownBlock2DConfig, ) -> Result<Self> { let downblock = DownBlock2D::new( vs.clone(), in_channels, out_channels, temb_channels, config.downblock, )?; let n_heads = config.attn_num_head_channels; let cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, context_dim: Some(config.cross_attention_dim), num_groups: config.downblock.resnet_groups, sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let vs_attn = vs.pp("attentions"); let attentions = (0..config.downblock.num_layers) .map(|i| { SpatialTransformer::new( vs_attn.pp(&i.to_string()), out_channels, n_heads, out_channels / n_heads, use_flash_attn, cfg, ) }) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "xa-down2d"); Ok(Self { downblock, attentions, span, config, }) } pub fn forward( &self, xs: &Tensor, temb: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Vec<Tensor>)> { let _enter = self.span.enter(); let mut output_states = vec![]; let mut xs = xs.clone(); for (resnet, attn) in self.downblock.resnets.iter().zip(self.attentions.iter()) { xs = resnet.forward(&xs, temb)?; xs = attn.forward(&xs, encoder_hidden_states)?; output_states.push(xs.clone()); } let xs = match &self.downblock.downsampler { Some(downsampler) => { let xs = downsampler.forward(&xs)?; output_states.push(xs.clone()); xs } None => xs, }; Ok((xs, output_states)) } } #[derive(Debug, Clone, Copy)] pub struct UpBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, // resnet_time_scale_shift: "default" // resnet_act_fn: "swish" pub resnet_groups: usize, pub output_scale_factor: f64, pub add_upsample: bool, } impl Default for UpBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_upsample: true, } } } #[derive(Debug)] pub struct UpBlock2D { pub resnets: Vec<ResnetBlock2D>, upsampler: Option<Upsample2D>, span: tracing::Span, pub config: UpBlock2DConfig, } impl UpBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, prev_output_channels: usize, out_channels: usize, temb_channels: Option<usize>, config: UpBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let resnet_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), temb_channels, eps: config.resnet_eps, output_scale_factor: config.output_scale_factor, ..Default::default() }; let resnets = (0..config.num_layers) .map(|i| { let res_skip_channels = if i == config.num_layers - 1 { in_channels } else { out_channels }; let resnet_in_channels = if i == 0 { prev_output_channels } else { out_channels }; let in_channels = resnet_in_channels + res_skip_channels; ResnetBlock2D::new(vs_resnets.pp(&i.to_string()), in_channels, resnet_cfg) }) .collect::<Result<Vec<_>>>()?; let upsampler = if config.add_upsample { let upsampler = Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?; Some(upsampler) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "up2d"); Ok(Self { resnets, upsampler, span, config, }) } pub fn forward( &self, xs: &Tensor, res_xs: &[Tensor], temb: Option<&Tensor>, upsample_size: Option<(usize, usize)>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (index, resnet) in self.resnets.iter().enumerate() { xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?; xs = xs.contiguous()?; xs = resnet.forward(&xs, temb)?; } match &self.upsampler { Some(upsampler) => upsampler.forward(&xs, upsample_size), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct CrossAttnUpBlock2DConfig { pub upblock: UpBlock2DConfig, pub attn_num_head_channels: usize, pub cross_attention_dim: usize, // attention_type: "default" pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for CrossAttnUpBlock2DConfig { fn default() -> Self { Self { upblock: Default::default(), attn_num_head_channels: 1, cross_attention_dim: 1280, sliced_attention_size: None, use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct CrossAttnUpBlock2D { pub upblock: UpBlock2D, pub attentions: Vec<SpatialTransformer>, span: tracing::Span, pub config: CrossAttnUpBlock2DConfig, } impl CrossAttnUpBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, prev_output_channels: usize, out_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: CrossAttnUpBlock2DConfig, ) -> Result<Self> { let upblock = UpBlock2D::new( vs.clone(), in_channels, prev_output_channels, out_channels, temb_channels, config.upblock, )?; let n_heads = config.attn_num_head_channels; let cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, context_dim: Some(config.cross_attention_dim), num_groups: config.upblock.resnet_groups, sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let vs_attn = vs.pp("attentions"); let attentions = (0..config.upblock.num_layers) .map(|i| { SpatialTransformer::new( vs_attn.pp(&i.to_string()), out_channels, n_heads, out_channels / n_heads, use_flash_attn, cfg, ) }) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "xa-up2d"); Ok(Self { upblock, attentions, span, config, }) } pub fn forward( &self, xs: &Tensor, res_xs: &[Tensor], temb: Option<&Tensor>, upsample_size: Option<(usize, usize)>, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (index, resnet) in self.upblock.resnets.iter().enumerate() { xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?; xs = xs.contiguous()?; xs = resnet.forward(&xs, temb)?; xs = self.attentions[index].forward(&xs, encoder_hidden_states)?; } match &self.upblock.upsampler { Some(upsampler) => upsampler.forward(&xs, upsample_size), None => Ok(xs), } } }
candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs", "repo_id": "candle", "token_count": 13815 }
43
use candle::{Result, Tensor}; #[derive(Debug, Clone)] pub struct DDPMWSchedulerConfig { scaler: f64, s: f64, } impl Default for DDPMWSchedulerConfig { fn default() -> Self { Self { scaler: 1f64, s: 0.008f64, } } } pub struct DDPMWScheduler { init_alpha_cumprod: f64, init_noise_sigma: f64, timesteps: Vec<f64>, pub config: DDPMWSchedulerConfig, } impl DDPMWScheduler { pub fn new(inference_steps: usize, config: DDPMWSchedulerConfig) -> Result<Self> { let init_alpha_cumprod = (config.s / (1. + config.s) * std::f64::consts::PI) .cos() .powi(2); let timesteps = (0..=inference_steps) .map(|i| 1. - i as f64 / inference_steps as f64) .collect::<Vec<_>>(); Ok(Self { init_alpha_cumprod, init_noise_sigma: 1.0, timesteps, config, }) } pub fn timesteps(&self) -> &[f64] { &self.timesteps } fn alpha_cumprod(&self, t: f64) -> f64 { let scaler = self.config.scaler; let s = self.config.s; let t = if scaler > 1. { 1. - (1. - t).powf(scaler) } else if scaler < 1. { t.powf(scaler) } else { t }; let alpha_cumprod = ((t + s) / (1. + s) * std::f64::consts::PI * 0.5) .cos() .powi(2) / self.init_alpha_cumprod; alpha_cumprod.clamp(0.0001, 0.9999) } fn previous_timestep(&self, ts: f64) -> f64 { let index = self .timesteps .iter() .enumerate() .map(|(idx, v)| (idx, (v - ts).abs())) .min_by(|x, y| x.1.total_cmp(&y.1)) .unwrap() .0; self.timesteps[index + 1] } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, ts: f64, sample: &Tensor) -> Result<Tensor> { let prev_t = self.previous_timestep(ts); let alpha_cumprod = self.alpha_cumprod(ts); let alpha_cumprod_prev = self.alpha_cumprod(prev_t); let alpha = alpha_cumprod / alpha_cumprod_prev; let mu = (sample - model_output * ((1. - alpha) / (1. - alpha_cumprod).sqrt()))?; let mu = (mu * (1. / alpha).sqrt())?; let std_noise = mu.randn_like(0., 1.)?; let std = std_noise * ((1. - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt(); if prev_t == 0. { Ok(mu) } else { mu + std } } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/wuerstchen/ddpm.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/ddpm.rs", "repo_id": "candle", "token_count": 1537 }
44
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <title>Welcome to Candle!</title> <link data-trunk rel="copy-file" href="tokenizer.json" /> <link data-trunk rel="copy-file" href="model.bin" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="app" data-type="main" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="worker" data-type="worker" /> <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css"> </head> <body></body> </html>
candle/candle-wasm-examples/llama2-c/index.html/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/index.html", "repo_id": "candle", "token_count": 315 }
45
use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
candle/candle-wasm-examples/phi/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/phi/src/lib.rs", "repo_id": "candle", "token_count": 183 }
46
//load the candle Whisper decoder wasm module import init, { Decoder } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "whisper-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Whisper { static instance = {}; // Retrieve the Whisper model. When called for the first time, // this will load the model and save it for future use. static async getInstance(params) { const { weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task, language, } = params; // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [ weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, ] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(mel_filtersURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Decoder( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, quantized, is_multilingual, timestamps, task, language ); } else { self.postMessage({ status: "loading", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, modelID, tokenizerURL, configURL, mel_filtersURL, audioURL, } = event.data; try { self.postMessage({ status: "decoding", message: "Starting Decoder" }); let quantized = false; if (modelID.includes("quantized")) { quantized = true; } let is_multilingual = false; if (modelID.includes("multilingual")) { is_multilingual = true; } let timestamps = true; const decoder = await Whisper.getInstance({ weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task: null, language: null, }); self.postMessage({ status: "decoding", message: "Loading Audio" }); const audioArrayU8 = await fetchArrayBuffer(audioURL); self.postMessage({ status: "decoding", message: "Running Decoder..." }); const segments = decoder.decode(audioArrayU8); // Send the segment back to the main thread as JSON self.postMessage({ status: "complete", message: "complete", output: JSON.parse(segments), }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/whisper/whisperWorker.js/0
{ "file_path": "candle/candle-wasm-examples/whisper/whisperWorker.js", "repo_id": "candle", "token_count": 1215 }
47
Run the tests with: ```bash RUST_LOG=wasm_bindgen_test_runner wasm-pack test --chrome --headless ``` Or: ```bash wasm-pack test --chrome ``` If you get an "invalid session id" failure in headless mode, check that logs and it may well be that your ChromeDriver is not at the same version as your browser.
candle/candle-wasm-tests/README.md/0
{ "file_path": "candle/candle-wasm-tests/README.md", "repo_id": "candle", "token_count": 98 }
48
import { navigating } from "$app/stores"; import { tick } from "svelte"; import { get } from "svelte/store"; const detachedOffset = 10; /** * @param node element to snap scroll to bottom * @param dependency pass in a dependency to update scroll on changes. */ export const snapScrollToBottom = (node: HTMLElement, dependency: unknown) => { let prevScrollValue = node.scrollTop; let isDetached = false; const handleScroll = () => { // if user scrolled up, we detach if (node.scrollTop < prevScrollValue) { isDetached = true; } // if user scrolled back to within 10px of bottom, we reattach if (node.scrollTop - (node.scrollHeight - node.clientHeight) >= -detachedOffset) { isDetached = false; } prevScrollValue = node.scrollTop; }; const updateScroll = async (_options: { force?: boolean } = {}) => { const defaultOptions = { force: false }; const options = { ...defaultOptions, ..._options }; const { force } = options; if (!force && isDetached && !get(navigating)) return; // wait for next tick to ensure that the DOM is updated await tick(); node.scrollTo({ top: node.scrollHeight }); }; node.addEventListener("scroll", handleScroll); if (dependency) { updateScroll({ force: true }); } return { update: updateScroll, destroy: () => { node.removeEventListener("scroll", handleScroll); }, }; };
chat-ui/src/lib/actions/snapScrollToBottom.ts/0
{ "file_path": "chat-ui/src/lib/actions/snapScrollToBottom.ts", "repo_id": "chat-ui", "token_count": 437 }
49
<script lang="ts"> import { page } from "$app/stores"; import { getHref } from "$lib/utils/getHref"; import PaginationArrow from "./PaginationArrow.svelte"; export let classNames = ""; export let numItemsPerPage: number; export let numTotalItems: number; const ELLIPSIS_IDX = -1 as const; $: numTotalPages = Math.ceil(numTotalItems / numItemsPerPage); $: pageIndex = parseInt($page.url.searchParams.get("p") ?? "0"); $: pageIndexes = getPageIndexes(pageIndex, numTotalPages); function getPageIndexes(pageIdx: number, nTotalPages: number) { let pageIdxs: number[] = []; const NUM_EXTRA_BUTTONS = 2; // The number of page links to show on either side of the current page link. const minIdx = 0; const maxIdx = nTotalPages - 1; pageIdxs = [pageIdx]; // forward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx + i; if (newPageIdx > maxIdx) { continue; } pageIdxs.push(newPageIdx); } if (maxIdx - pageIdxs[pageIdxs.length - 1] > 1) { pageIdxs.push(...[ELLIPSIS_IDX, maxIdx]); } else if (maxIdx - pageIdxs[pageIdxs.length - 1] === 1) { pageIdxs.push(maxIdx); } // backward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx - i; if (newPageIdx < minIdx) { continue; } pageIdxs.unshift(newPageIdx); } if (pageIdxs[0] - minIdx > 1) { pageIdxs.unshift(...[minIdx, ELLIPSIS_IDX]); } else if (pageIdxs[0] - minIdx === 1) { pageIdxs.unshift(minIdx); } return pageIdxs; } </script> {#if numTotalPages > 1} <nav> <ul class="flex select-none items-center justify-between space-x-2 text-gray-700 sm:justify-center dark:text-gray-300 {classNames}" > <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex - 1).toString() } })} direction="previous" isDisabled={pageIndex - 1 < 0} /> </li> {#each pageIndexes as pageIdx} <li class="hidden sm:block"> <a class=" rounded-lg px-2.5 py-1 {pageIndex === pageIdx ? 'bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-800 dark:text-yellow-500 dark:ring-gray-700' : ''} " class:pointer-events-none={pageIdx === ELLIPSIS_IDX || pageIndex === pageIdx} href={getHref($page.url, { newKeys: { p: pageIdx.toString() } })} > {pageIdx === ELLIPSIS_IDX ? "..." : pageIdx + 1} </a> </li> {/each} <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex + 1).toString() } })} direction="next" isDisabled={pageIndex + 1 >= numTotalPages} /> </li> </ul> </nav> {/if}
chat-ui/src/lib/components/Pagination.svelte/0
{ "file_path": "chat-ui/src/lib/components/Pagination.svelte", "repo_id": "chat-ui", "token_count": 1210 }
50
<script lang="ts"> import type { Message } from "$lib/types/Message"; import { createEventDispatcher, onDestroy, tick } from "svelte"; import CarbonSendAltFilled from "~icons/carbon/send-alt-filled"; import CarbonExport from "~icons/carbon/export"; import CarbonStopFilledAlt from "~icons/carbon/stop-filled-alt"; import CarbonClose from "~icons/carbon/close"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonCaretDown from "~icons/carbon/caret-down"; import EosIconsLoading from "~icons/eos-icons/loading"; import ChatInput from "./ChatInput.svelte"; import StopGeneratingBtn from "../StopGeneratingBtn.svelte"; import type { Model } from "$lib/types/Model"; import WebSearchToggle from "../WebSearchToggle.svelte"; import LoginModal from "../LoginModal.svelte"; import { page } from "$app/stores"; import FileDropzone from "./FileDropzone.svelte"; import RetryBtn from "../RetryBtn.svelte"; import UploadBtn from "../UploadBtn.svelte"; import file2base64 from "$lib/utils/file2base64"; import type { Assistant } from "$lib/types/Assistant"; import { base } from "$app/paths"; import ContinueBtn from "../ContinueBtn.svelte"; import AssistantIntroduction from "./AssistantIntroduction.svelte"; import ChatMessage from "./ChatMessage.svelte"; import ScrollToBottomBtn from "../ScrollToBottomBtn.svelte"; import { browser } from "$app/environment"; import { snapScrollToBottom } from "$lib/actions/snapScrollToBottom"; import SystemPromptModal from "../SystemPromptModal.svelte"; import ChatIntroduction from "./ChatIntroduction.svelte"; import { useConvTreeStore } from "$lib/stores/convTree"; export let messages: Message[] = []; export let loading = false; export let pending = false; export let shared = false; export let currentModel: Model; export let models: Model[]; export let assistant: Assistant | undefined = undefined; export let preprompt: string | undefined = undefined; export let files: File[] = []; $: isReadOnly = !models.some((model) => model.id === currentModel.id); let loginModalOpen = false; let message: string; let timeout: ReturnType<typeof setTimeout>; let isSharedRecently = false; $: $page.params.id && (isSharedRecently = false); const dispatch = createEventDispatcher<{ message: string; share: void; stop: void; retry: { id: Message["id"]; content?: string }; continue: { id: Message["id"] }; }>(); const handleSubmit = () => { if (loading) return; dispatch("message", message); message = ""; }; let lastTarget: EventTarget | null = null; let onDrag = false; const onDragEnter = (e: DragEvent) => { lastTarget = e.target; onDrag = true; }; const onDragLeave = (e: DragEvent) => { if (e.target === lastTarget) { onDrag = false; } }; const onDragOver = (e: DragEvent) => { e.preventDefault(); }; const convTreeStore = useConvTreeStore(); $: lastMessage = browser && (messages.find((m) => m.id == $convTreeStore.leaf) as Message); $: lastIsError = lastMessage && !loading && (lastMessage.from === "user" || lastMessage.updates?.findIndex((u) => u.type === "status" && u.status === "error") !== -1); $: sources = files.map((file) => file2base64(file)); function onShare() { dispatch("share"); isSharedRecently = true; if (timeout) { clearTimeout(timeout); } timeout = setTimeout(() => { isSharedRecently = false; }, 2000); } onDestroy(() => { if (timeout) { clearTimeout(timeout); } }); let chatContainer: HTMLElement; async function scrollToBottom() { await tick(); chatContainer.scrollTop = chatContainer.scrollHeight; } // If last message is from user, scroll to bottom $: if (lastMessage && lastMessage.from === "user") { scrollToBottom(); } </script> <div class="relative min-h-0 min-w-0"> {#if loginModalOpen} <LoginModal on:close={() => { loginModalOpen = false; }} /> {/if} <div class="scrollbar-custom mr-1 h-full overflow-y-auto" use:snapScrollToBottom={messages.length ? [...messages] : false} bind:this={chatContainer} > <div class="mx-auto flex h-full max-w-3xl flex-col gap-6 px-5 pt-6 sm:gap-8 xl:max-w-4xl"> {#if $page.data?.assistant && !!messages.length} <a class="mx-auto flex items-center gap-1.5 rounded-full border border-gray-100 bg-gray-50 py-1 pl-1 pr-3 text-sm text-gray-800 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-200 dark:hover:bg-gray-700" href="{base}/settings/assistants/{$page.data.assistant._id}" > {#if $page.data?.assistant.avatar} <img src="{base}/settings/assistants/{$page.data?.assistant._id.toString()}/avatar.jpg?hash=${$page .data.assistant.avatar}" alt="Avatar" class="size-5 rounded-full object-cover" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {$page.data?.assistant.name[0]} </div> {/if} {$page.data.assistant.name} </a> {:else if preprompt && preprompt != currentModel.preprompt} <SystemPromptModal preprompt={preprompt ?? ""} /> {/if} {#if messages.length > 0} <div class="flex h-max flex-col gap-6 pb-52"> <ChatMessage {loading} {messages} id={messages[0].id} isAuthor={!shared} readOnly={isReadOnly} model={currentModel} on:retry on:vote on:continue /> </div> {:else if pending} <ChatMessage loading={true} messages={[ { id: "0-0-0-0-0", content: "", from: "assistant", children: [], }, ]} id={"0-0-0-0-0"} isAuthor={!shared} readOnly={isReadOnly} model={currentModel} /> {:else if !assistant} <ChatIntroduction {models} {currentModel} on:message={(ev) => { if ($page.data.loginRequired) { ev.preventDefault(); loginModalOpen = true; } else { dispatch("message", ev.detail); } }} /> {:else} <AssistantIntroduction {assistant} on:message={(ev) => { if ($page.data.loginRequired) { ev.preventDefault(); loginModalOpen = true; } else { dispatch("message", ev.detail); } }} /> {/if} </div> <ScrollToBottomBtn class="bottom-36 right-4 max-md:hidden lg:right-10" scrollNode={chatContainer} /> </div> <div class="dark:via-gray-80 pointer-events-none absolute inset-x-0 bottom-0 z-0 mx-auto flex w-full max-w-3xl flex-col items-center justify-center bg-gradient-to-t from-white via-white/80 to-white/0 px-3.5 py-4 max-md:border-t max-md:bg-white sm:px-5 md:py-8 xl:max-w-4xl dark:border-gray-800 dark:from-gray-900 dark:to-gray-900/0 max-md:dark:bg-gray-900 [&>*]:pointer-events-auto" > {#if sources.length} <div class="flex flex-row flex-wrap justify-center gap-2.5 max-md:pb-3"> {#each sources as source, index} {#await source then src} <div class="relative h-16 w-16 overflow-hidden rounded-lg shadow-lg"> <img src={`data:image/*;base64,${src}`} alt="input content" class="h-full w-full rounded-lg bg-gray-400 object-cover dark:bg-gray-900" /> <!-- add a button on top that deletes this image from sources --> <button class="absolute left-1 top-1" on:click={() => { files = files.filter((_, i) => i !== index); }} > <CarbonClose class="text-md font-black text-gray-300 hover:text-gray-100" /> </button> </div> {/await} {/each} </div> {/if} <div class="w-full"> <div class="flex w-full pb-3"> {#if $page.data.settings?.searchEnabled && !assistant} <WebSearchToggle /> {/if} {#if loading} <StopGeneratingBtn classNames="ml-auto" on:click={() => dispatch("stop")} /> {:else if lastIsError} <RetryBtn classNames="ml-auto" on:click={() => { if (lastMessage && lastMessage.ancestors) { dispatch("retry", { id: lastMessage.id, }); } }} /> {:else} <div class="ml-auto gap-2"> {#if currentModel.multimodal} <UploadBtn bind:files classNames="ml-auto" /> {/if} {#if messages && lastMessage && lastMessage.interrupted && !isReadOnly} <ContinueBtn on:click={() => { if (lastMessage && lastMessage.ancestors) { dispatch("continue", { id: lastMessage?.id, }); } }} /> {/if} </div> {/if} </div> <form on:dragover={onDragOver} on:dragenter={onDragEnter} on:dragleave={onDragLeave} tabindex="-1" aria-label="file dropzone" on:submit|preventDefault={handleSubmit} class="relative flex w-full max-w-4xl flex-1 items-center rounded-xl border bg-gray-100 focus-within:border-gray-300 dark:border-gray-600 dark:bg-gray-700 dark:focus-within:border-gray-500 {isReadOnly ? 'opacity-30' : ''}" > {#if onDrag && currentModel.multimodal} <FileDropzone bind:files bind:onDrag /> {:else} <div class="flex w-full flex-1 border-none bg-transparent"> {#if lastIsError} <ChatInput value="Sorry, something went wrong. Please try again." disabled={true} /> {:else} <ChatInput placeholder="Ask anything" bind:value={message} on:submit={handleSubmit} on:beforeinput={(ev) => { if ($page.data.loginRequired) { ev.preventDefault(); loginModalOpen = true; } }} maxRows={6} disabled={isReadOnly || lastIsError} /> {/if} {#if loading} <button class="btn mx-1 my-1 inline-block h-[2.4rem] self-end rounded-lg bg-transparent p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 md:hidden dark:disabled:opacity-40 enabled:dark:hover:text-gray-100" on:click={() => dispatch("stop")} > <CarbonStopFilledAlt /> </button> <div class="mx-1 my-1 hidden h-[2.4rem] items-center p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 md:flex dark:disabled:opacity-40 enabled:dark:hover:text-gray-100" > <EosIconsLoading /> </div> {:else} <button class="btn mx-1 my-1 h-[2.4rem] self-end rounded-lg bg-transparent p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 dark:disabled:opacity-40 enabled:dark:hover:text-gray-100" disabled={!message || isReadOnly} type="submit" > <CarbonSendAltFilled /> </button> {/if} </div> {/if} </form> <div class="mt-2 flex justify-between self-stretch px-1 text-xs text-gray-400/90 max-md:mb-2 max-sm:gap-2" > <p> Model: {#if !assistant} <a href="{base}/settings/{currentModel.id}" class="hover:underline" >{currentModel.displayName}</a >{:else} {@const model = models.find((m) => m.id === assistant?.modelId)} <a href="{base}/settings/assistants/{assistant._id}" class="inline-flex items-center border-b hover:text-gray-600 dark:border-gray-700 dark:hover:text-gray-300" >{model?.displayName}<CarbonCaretDown class="text-xxs" /></a >{/if} <span class="max-sm:hidden">·</span><br class="sm:hidden" /> Generated content may be inaccurate or false. </p> {#if messages.length} <button class="flex flex-none items-center hover:text-gray-400 max-sm:rounded-lg max-sm:bg-gray-50 max-sm:px-2.5 dark:max-sm:bg-gray-800" type="button" class:hover:underline={!isSharedRecently} on:click={onShare} disabled={isSharedRecently} > {#if isSharedRecently} <CarbonCheckmark class="text-[.6rem] sm:mr-1.5 sm:text-green-600" /> <div class="text-green-600 max-sm:hidden">Link copied to clipboard</div> {:else} <CarbonExport class="text-[.6rem] sm:mr-1.5 sm:text-primary-500" /> <div class="max-sm:hidden">Share this conversation</div> {/if} </button> {/if} </div> </div> </div> </div>
chat-ui/src/lib/components/chat/ChatWindow.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatWindow.svelte", "repo_id": "chat-ui", "token_count": 5436 }
51
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 import { setTimeout } from "node:timers/promises"; import { collections } from "./database"; let closed = false; process.on("SIGINT", () => { closed = true; }); export let abortedGenerations: Map<string, Date> = new Map(); async function maintainAbortedGenerations() { while (!closed) { await setTimeout(1000); try { const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray(); abortedGenerations = new Map( aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt]) ); } catch (err) { console.error(err); } } } maintainAbortedGenerations();
chat-ui/src/lib/server/abortedGenerations.ts/0
{ "file_path": "chat-ui/src/lib/server/abortedGenerations.ts", "repo_id": "chat-ui", "token_count": 267 }
52
import { HF_ACCESS_TOKEN, HF_TOKEN } from "$env/static/private"; import { buildPrompt } from "$lib/buildPrompt"; import { textGenerationStream } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; export const endpointTgiParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("tgi"), url: z.string().url(), accessToken: z.string().default(HF_TOKEN ?? HF_ACCESS_TOKEN), authorization: z.string().optional(), }); export function endpointTgi(input: z.input<typeof endpointTgiParametersSchema>): Endpoint { const { url, accessToken, model, authorization } = endpointTgiParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage }) => { const prompt = await buildPrompt({ messages, preprompt, model, continueMessage, }); return textGenerationStream( { parameters: { ...model.parameters, return_full_text: false }, model: url, inputs: prompt, accessToken, }, { use_cache: false, fetch: async (endpointUrl, info) => { if (info && authorization && !accessToken) { // Set authorization header if it is defined and HF_TOKEN is empty info.headers = { ...info.headers, Authorization: authorization, }; } return fetch(endpointUrl, info); }, } ); }; } export default endpointTgi;
chat-ui/src/lib/server/endpoints/tgi/endpointTgi.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/tgi/endpointTgi.ts", "repo_id": "chat-ui", "token_count": 526 }
53
import { JSDOM, VirtualConsole } from "jsdom"; export async function searchWebLocal(query: string) { const abortController = new AbortController(); setTimeout(() => abortController.abort(), 10000); const htmlString = await fetch("https://www.google.com/search?hl=en&q=" + query, { signal: abortController.signal, }) .then((response) => response.text()) .catch(); const virtualConsole = new VirtualConsole(); virtualConsole.on("error", () => { // No-op to skip console errors. }); // put the html string into a DOM const dom = new JSDOM(htmlString ?? "", { virtualConsole, }); const { document } = dom.window; // get all a documents with href tag const links = document.querySelectorAll("a"); if (!links.length) { throw new Error(`webpage doesn't have any "a" element`); } // take url that start wirth /url?q= // and do not contain google.com links // and strip them up to '&sa=' const linksHref = Array.from(links) .filter((el) => el.href?.startsWith("/url?q=") && !el.href.includes("google.com/")) .map((el) => { const link = el.href; return link.slice("/url?q=".length, link.indexOf("&sa=")); }); // remove duplicate links and map links to the correct object shape return { organic_results: [...new Set(linksHref)].map((link) => ({ link })) }; }
chat-ui/src/lib/server/websearch/searchWebLocal.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/searchWebLocal.ts", "repo_id": "chat-ui", "token_count": 438 }
54
import type { Session } from "./Session"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; export interface MessageEvent extends Pick<Timestamps, "createdAt"> { userId: User["_id"] | Session["sessionId"]; ip?: string; }
chat-ui/src/lib/types/MessageEvent.ts/0
{ "file_path": "chat-ui/src/lib/types/MessageEvent.ts", "repo_id": "chat-ui", "token_count": 80 }
55
import { sum } from "./sum"; export function concatUint8Arrays(arrays: Uint8Array[]): Uint8Array { const totalLength = sum(arrays.map((a) => a.length)); const result = new Uint8Array(totalLength); let offset = 0; for (const array of arrays) { result.set(array, offset); offset += array.length; } return result; }
chat-ui/src/lib/utils/concatUint8Arrays.ts/0
{ "file_path": "chat-ui/src/lib/utils/concatUint8Arrays.ts", "repo_id": "chat-ui", "token_count": 117 }
56
export async function sha256(input: string): Promise<string> { const utf8 = new TextEncoder().encode(input); const hashBuffer = await crypto.subtle.digest("SHA-256", utf8); const hashArray = Array.from(new Uint8Array(hashBuffer)); const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join(""); return hashHex; }
chat-ui/src/lib/utils/sha256.ts/0
{ "file_path": "chat-ui/src/lib/utils/sha256.ts", "repo_id": "chat-ui", "token_count": 119 }
57
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; // function used to insert conversations used for testing export const insertLegacyConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "legacy conversation", model: "", embeddingModel: "", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", }, ], }); return res.insertedId; }; export const insertLinearBranchConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "linear branch conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", ancestors: [], children: ["1-1-1-1-2"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, ], }); return res.insertedId; }; export const insertSideBranchesConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "side branches conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world, root message!", ancestors: [], children: ["1-1-1-1-2", "1-1-1-1-5"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, response to root message!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, response from follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, { id: "1-1-1-1-5", from: "assistant", content: "Hello, alternative assistant answer!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-6", "1-1-1-1-7"], }, { id: "1-1-1-1-6", from: "user", content: "Hello, follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, { id: "1-1-1-1-7", from: "user", content: "Hello, alternative follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, ], }); return res.insertedId; }; describe("inserting conversations", () => { it("should insert a legacy conversation", async () => { const id = await insertLegacyConversation(); expect(id).toBeDefined(); }); it("should insert a linear branch conversation", async () => { const id = await insertLinearBranchConversation(); expect(id).toBeDefined(); }); it("should insert a side branches conversation", async () => { const id = await insertSideBranchesConversation(); expect(id).toBeDefined(); }); });
chat-ui/src/lib/utils/tree/treeHelpers.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/treeHelpers.spec.ts", "repo_id": "chat-ui", "token_count": 1864 }
58
<script lang="ts"> import type { PageData } from "./$types"; import { PUBLIC_APP_ASSETS, PUBLIC_ORIGIN } from "$env/static/public"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import CarbonAdd from "~icons/carbon/add"; import CarbonHelpFilled from "~icons/carbon/help-filled"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import CarbonEarthAmerica from "~icons/carbon/earth-americas-filled"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonSearch from "~icons/carbon/search"; import Pagination from "$lib/components/Pagination.svelte"; import { formatUserCount } from "$lib/utils/formatUserCount"; import { getHref } from "$lib/utils/getHref"; import { debounce } from "$lib/utils/debounce"; import { useSettingsStore } from "$lib/stores/settings"; import IconInternet from "$lib/components/icons/IconInternet.svelte"; import { isDesktop } from "$lib/utils/isDesktop"; export let data: PageData; $: assistantsCreator = $page.url.searchParams.get("user"); $: createdByMe = data.user?.username && data.user.username === assistantsCreator; const SEARCH_DEBOUNCE_DELAY = 400; let filterInputEl: HTMLInputElement; let filterValue = data.query; let isFilterInPorgress = false; const onModelChange = (e: Event) => { const newUrl = getHref($page.url, { newKeys: { modelId: (e.target as HTMLSelectElement).value }, existingKeys: { behaviour: "delete_except", keys: ["user"] }, }); resetFilter(); goto(newUrl); }; const resetFilter = () => { filterValue = ""; isFilterInPorgress = false; }; const filterOnName = debounce(async (value: string) => { filterValue = value; if (isFilterInPorgress) { return; } isFilterInPorgress = true; const newUrl = getHref($page.url, { newKeys: { q: value }, existingKeys: { behaviour: "delete", keys: ["p"] }, }); await goto(newUrl); if (isDesktop(window)) { setTimeout(() => filterInputEl.focus(), 0); } isFilterInPorgress = false; // there was a new filter query before server returned response if (filterValue !== value) { filterOnName(filterValue); } }, SEARCH_DEBOUNCE_DELAY); const settings = useSettingsStore(); </script> <svelte:head> {#if isHuggingChat} <title>HuggingChat - Assistants</title> <meta property="og:title" content="HuggingChat - Assistants" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat assistants made by the community." /> <meta property="og:image" content="{PUBLIC_ORIGIN || $page.url.origin}{base}/{PUBLIC_APP_ASSETS}/assistants-thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom mr-1 h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Assistants</h1> {#if isHuggingChat} <div class="5 ml-1.5 rounded-lg text-xxs uppercase text-gray-500 dark:text-gray-500"> beta </div> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/357" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" > <CarbonHelpFilled /> </a> {/if} </div> <h3 class="text-gray-500">Popular assistants made by the community</h3> <div class="mt-6 flex justify-between gap-2 max-sm:flex-col sm:items-center"> <select class="mt-1 h-[34px] rounded-lg border border-gray-300 bg-gray-50 px-2 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" bind:value={data.selectedModel} on:change={onModelChange} > <option value="">All models</option> {#each data.models.filter((model) => !model.unlisted) as model} <option value={model.name}>{model.name}</option> {/each} </select> <a href={`${base}/settings/assistants/new`} class="flex items-center gap-1 whitespace-nowrap rounded-lg border bg-white py-1 pl-1.5 pr-2.5 shadow-sm hover:bg-gray-50 hover:shadow-none dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-700" > <CarbonAdd />Create New assistant </a> </div> <div class="mt-7 flex items-center gap-x-2 text-sm"> {#if assistantsCreator && !createdByMe} <div class="flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-50 px-3 py-1 dark:border-gray-600 dark:bg-gray-700 dark:text-white" > {assistantsCreator}'s Assistants <a href={getHref($page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} on:click={resetFilter} class="group" ><CarbonClose class="text-xs group-hover:text-gray-800 dark:group-hover:text-gray-300" /></a > </div> {#if isHuggingChat} <a href="https://hf.co/{assistantsCreator}" target="_blank" class="ml-auto flex items-center text-xs text-gray-500 underline hover:text-gray-800 dark:text-gray-400 dark:hover:text-gray-300" ><CarbonArrowUpRight class="mr-1 flex-none text-[0.58rem]" target="_blank" />View {assistantsCreator} on HF</a > {/if} {:else} <a href={getHref($page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} on:click={resetFilter} class="flex items-center gap-1.5 rounded-full border px-3 py-1 {!assistantsCreator ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" > <CarbonEarthAmerica class="text-xs" /> Community </a> {#if data.user?.username} <a href={getHref($page.url, { newKeys: { user: data.user.username }, existingKeys: { behaviour: "delete", keys: ["modelId", "p", "q"] }, })} on:click={resetFilter} class="flex items-center gap-1.5 truncate rounded-full border px-3 py-1 {assistantsCreator && createdByMe ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" >{data.user.username} </a> {/if} {/if} <div class="relative ml-auto flex h-[30px] w-40 items-center rounded-full border px-2 has-[:focus]:border-gray-400 sm:w-64 dark:border-gray-600" > <CarbonSearch class="pointer-events-none absolute left-2 text-xs text-gray-400" /> <input class="h-[30px] w-full bg-transparent pl-5 focus:outline-none" placeholder="Filter by name" value={filterValue} on:input={(e) => filterOnName(e.currentTarget.value)} bind:this={filterInputEl} maxlength="150" type="search" /> </div> </div> <div class="mt-8 grid grid-cols-2 gap-3 sm:gap-5 md:grid-cols-3 lg:grid-cols-4"> {#each data.assistants as assistant (assistant._id)} {@const hasRag = assistant?.rag?.allowAllDomains || !!assistant?.rag?.allowedDomains?.length || !!assistant?.rag?.allowedLinks?.length} <button class="relative flex flex-col items-center justify-center overflow-hidden text-balance rounded-xl border bg-gray-50/50 px-4 py-6 text-center shadow hover:bg-gray-50 hover:shadow-inner max-sm:px-4 sm:h-64 sm:pb-4 xl:pt-8 dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40" on:click={() => { if (data.settings.assistants.includes(assistant._id.toString())) { settings.instantSet({ activeModel: assistant._id.toString() }); goto(`${base}` || "/"); } else { goto(`${base}/assistant/${assistant._id}`); } }} > {#if assistant.userCount && assistant.userCount > 1} <div class="absolute right-3 top-3 flex items-center gap-1 text-xs text-gray-400" title="Number of users" > <CarbonUserMultiple class="text-xxs" />{formatUserCount(assistant.userCount)} </div> {/if} {#if hasRag} <div class="absolute left-3 top-3 grid size-5 place-items-center rounded-full bg-blue-500/10" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> </div> {/if} {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg" alt="Avatar" class="mb-2 aspect-square size-12 flex-none rounded-full object-cover sm:mb-6 sm:size-20" /> {:else} <div class="mb-2 flex aspect-square size-12 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 sm:mb-6 sm:size-20 dark:bg-gray-800" > {assistant.name[0]} </div> {/if} <h3 class="mb-2 line-clamp-2 max-w-full break-words text-center text-[.8rem] font-semibold leading-snug sm:text-sm" > {assistant.name} </h3> <p class="line-clamp-4 text-xs text-gray-700 sm:line-clamp-2 dark:text-gray-400"> {assistant.description} </p> {#if assistant.createdByName} <p class="mt-auto pt-2 text-xs text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}" > {assistant.createdByName} </a> </p> {/if} </button> {:else} No assistants found {/each} </div> <Pagination classNames="w-full flex justify-center mt-14 mb-4" numItemsPerPage={data.numItemsPerPage} numTotalItems={data.numTotalItems} /> </div> </div>
chat-ui/src/routes/assistants/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistants/+page.svelte", "repo_id": "chat-ui", "token_count": 4279 }
59
<script lang="ts"> import type { PageData } from "./$types"; import { PUBLIC_APP_NAME } from "$env/static/public"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { base } from "$app/paths"; import { page } from "$app/stores"; import CarbonHelpFilled from "~icons/carbon/help-filled"; export let data: PageData; </script> <svelte:head> {#if isHuggingChat} <title>HuggingChat - Models</title> <meta property="og:title" content="HuggingChat - Models" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat available models" /> <meta property="og:url" content={$page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom mr-1 h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Models</h1> {#if isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/372" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" > <CarbonHelpFilled /> </a> {/if} </div> <h3 class="text-gray-500">All models available on {PUBLIC_APP_NAME}</h3> <dl class="mt-8 grid grid-cols-1 gap-3 sm:gap-5 xl:grid-cols-2"> {#each data.models.filter((el) => !el.unlisted) as model, index (model.id)} <a href="{base}/models/{model.id}" class="relative flex flex-col gap-2 overflow-hidden rounded-xl border bg-gray-50/50 px-6 py-5 shadow hover:bg-gray-50 hover:shadow-inner dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40" > <div class="flex items-center justify-between"> {#if model.logoUrl} <img class=" overflown aspect-square size-6 rounded border dark:border-gray-700" src={model.logoUrl} alt="" /> {:else} <div class="size-6 rounded border border-transparent bg-gray-300 dark:bg-gray-800" /> {/if} {#if index === 0} <div class="rounded-full border border-gray-300 px-2 py-0.5 text-xs text-gray-500 dark:border-gray-500 dark:text-gray-400" > Default </div> {/if} </div> <dt class="flex items-center gap-2 font-semibold"> {model.displayName} </dt> <dd class="text-sm text-gray-500 dark:text-gray-400">{model.description || "-"}</dd> </a> {/each} </dl> </div> </div>
chat-ui/src/routes/models/+page.svelte/0
{ "file_path": "chat-ui/src/routes/models/+page.svelte", "repo_id": "chat-ui", "token_count": 1103 }
60
import { collections } from "$lib/server/database"; import { error, type RequestHandler } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; export const GET: RequestHandler = async ({ params }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { throw error(404, "No assistant found"); } if (!assistant.avatar) { throw error(404, "No avatar found"); } const fileId = collections.bucket.find({ filename: assistant._id.toString() }); const content = await fileId.next().then(async (file) => { if (!file?._id) { throw error(404, "Avatar not found"); } const fileStream = collections.bucket.openDownloadStream(file?._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; }); return new Response(content, { headers: { "Content-Type": "image/jpeg", }, }); };
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts", "repo_id": "chat-ui", "token_count": 385 }
61
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none"> <path fill="#2063EC" d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z" /> </svg>
chat-ui/static/chatui/logo.svg/0
{ "file_path": "chat-ui/static/chatui/logo.svg", "repo_id": "chat-ui", "token_count": 125 }
62
# Add patterns of files dvc should ignore, which could improve # the performance. Learn more at # https://dvc.org/doc/user-guide/dvcignore
datasets/.dvcignore/0
{ "file_path": "datasets/.dvcignore", "repo_id": "datasets", "token_count": 40 }
63
.PHONY: quality style test check_dirs := tests src benchmarks metrics utils # Check that source code meets quality standards quality: ruff check $(check_dirs) setup.py # linter ruff format --check $(check_dirs) setup.py # formatter # Format source code automatically style: ruff check --fix $(check_dirs) setup.py # linter ruff format $(check_dirs) setup.py # formatter # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/
datasets/Makefile/0
{ "file_path": "datasets/Makefile", "repo_id": "datasets", "token_count": 149 }
64
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def get_duration(func): def wrapper(*args, **kwargs): starttime = timeit.default_timer() _ = func(*args, **kwargs) delta = timeit.default_timer() - starttime return delta wrapper.__name__ = func.__name__ return wrapper def generate_examples(features: dict, num_examples=100, seq_shapes=None): dummy_data = [] seq_shapes = seq_shapes or {} for i in range(num_examples): example = {} for col_id, (k, v) in enumerate(features.items()): if isinstance(v, _ArrayXD): data = np.random.rand(*v.shape).astype(v.dtype) elif isinstance(v, datasets.Value): if v.dtype == "string": data = "The small grey turtle was surprisingly fast when challenged." else: data = np.random.randint(10, size=1).astype(v.dtype).item() elif isinstance(v, datasets.Sequence): while isinstance(v, datasets.Sequence): v = v.feature shape = seq_shapes[k] data = np.random.rand(*shape).astype(v.dtype) example[k] = data dummy_data.append((i, example)) return dummy_data def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None): dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes) with ArrowWriter(features=features, path=dataset_path) as writer: for key, record in dummy_data: example = features.encode_example(record) writer.write(example) num_final_examples, num_bytes = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features)) return dataset
datasets/benchmarks/utils.py/0
{ "file_path": "datasets/benchmarks/utils.py", "repo_id": "datasets", "token_count": 927 }
65
# Beam Datasets <Tip warning={true}> The Beam API is deprecated and will be removed in the next major release. </Tip> Some datasets are too large to be processed on a single machine. Instead, you can process them with [Apache Beam](https://beam.apache.org/), a library for parallel data processing. The processing pipeline is executed on a distributed processing backend such as [Apache Flink](https://flink.apache.org/), [Apache Spark](https://spark.apache.org/), or [Google Cloud Dataflow](https://cloud.google.com/dataflow). We have already created Beam pipelines for some of the larger datasets like [wikipedia](https://huggingface.co/datasets/wikipedia), and [wiki40b](https://huggingface.co/datasets/wiki40b). You can load these normally with [`load_dataset`]. But if you want to run your own Beam pipeline with Dataflow, here is how: 1. Specify the dataset and configuration you want to process: ``` DATASET_NAME=your_dataset_name # ex: wikipedia CONFIG_NAME=your_config_name # ex: 20220301.en ``` 2. Input your Google Cloud Platform information: ``` PROJECT=your_project BUCKET=your_bucket REGION=your_region ``` 3. Specify your Python requirements: ``` echo "datasets" > /tmp/beam_requirements.txt echo "apache_beam" >> /tmp/beam_requirements.txt ``` 4. Run the pipeline: ``` datasets-cli run_beam datasets/$DATASET_NAME \ --name $CONFIG_NAME \ --save_info \ --cache_dir gs://$BUCKET/cache/datasets \ --beam_pipeline_options=\ "runner=DataflowRunner,project=$PROJECT,job_name=$DATASET_NAME-gen,"\ "staging_location=gs://$BUCKET/binaries,temp_location=gs://$BUCKET/temp,"\ "region=$REGION,requirements_file=/tmp/beam_requirements.txt" ``` <Tip> When you run your pipeline, you can adjust the parameters to change the runner (Flink or Spark), output location (S3 bucket or HDFS), and the number of workers. </Tip>
datasets/docs/source/beam.mdx/0
{ "file_path": "datasets/docs/source/beam.mdx", "repo_id": "datasets", "token_count": 594 }
66
# Datasets <img class="float-left !m-0 !border-0 !dark:border-0 !shadow-none !max-w-lg w-[150px]" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasets_logo.png"/> 🤗 Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. Load a dataset in a single line of code, and use our powerful data processing methods to quickly get your dataset ready for training in a deep learning model. Backed by the Apache Arrow format, process large datasets with zero-copy reads without any memory constraints for optimal speed and efficiency. We also feature a deep integration with the [Hugging Face Hub](https://huggingface.co/datasets), allowing you to easily load and share a dataset with the wider machine learning community. Find your dataset today on the [Hugging Face Hub](https://huggingface.co/datasets), and take an in-depth look inside of it with the live viewer. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorial" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the basics and become familiar with loading, accessing, and processing a dataset. Start here if you are using 🤗 Datasets for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./how_to" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Datasets to solve real-world problems.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./about_arrow" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding about important topics such as the underlying data format, the cache, and how datasets are generated.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/main_classes" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 Datasets classes and methods work.</p> </a> </div> </div>
datasets/docs/source/index.mdx/0
{ "file_path": "datasets/docs/source/index.mdx", "repo_id": "datasets", "token_count": 1014 }
67
# Structure your repository To host and share your dataset, create a dataset repository on the Hugging Face Hub and upload your data files. This guide will show you how to structure your dataset repository when you upload it. A dataset with a supported structure and file format (`.txt`, `.csv`, `.parquet`, `.jsonl`, `.mp3`, `.jpg`, `.zip` etc.) are loaded automatically with [`~datasets.load_dataset`], and it'll have a dataset viewer on its dataset page on the Hub. ## Main use-case The simplest dataset structure has two files: `train.csv` and `test.csv` (this works with any supported file format). Your repository will also contain a `README.md` file, the [dataset card](dataset_card) displayed on your dataset page. ``` my_dataset_repository/ ├── README.md ├── train.csv └── test.csv ``` In this simple case, you'll get a dataset with two splits: `train` (containing examples from `train.csv`) and `test` (containing examples from `test.csv`). ## Define your splits and subsets in YAML ## Splits If you have multiple files and want to define which file goes into which split, you can use the YAML `configs` field at the top of your README.md. For example, given a repository like this one: ``` my_dataset_repository/ ├── README.md ├── data.csv └── holdout.csv ``` You can define your splits by adding the `configs` field in the YAML block at the top of your README.md: ```yaml --- configs: - config_name: default data_files: - split: train path: "data.csv" - split: test path: "holdout.csv" --- ``` You can select multiple files per split using a list of paths: ``` my_dataset_repository/ ├── README.md ├── data/ │ ├── abc.csv │ └── def.csv └── holdout/ └── ghi.csv ``` ```yaml --- configs: - config_name: default data_files: - split: train path: - "data/abc.csv" - "data/def.csv" - split: test path: "holdout/ghi.csv" --- ``` Or you can use glob patterns to automatically list all the files you need: ```yaml --- configs: - config_name: default data_files: - split: train path: "data/*.csv" - split: test path: "holdout/*.csv" --- ``` <Tip warning={true}> Note that `config_name` field is required even if you have a single configuration. </Tip> ## Configurations Your dataset might have several subsets of data that you want to be able to load separately. In that case you can define a list of configurations inside the `configs` field in YAML: ``` my_dataset_repository/ ├── README.md ├── main_data.csv └── additional_data.csv ``` ```yaml --- configs: - config_name: main_data data_files: "main_data.csv" - config_name: additional_data data_files: "additional_data.csv" --- ``` Each configuration is shown separately on the Hugging Face Hub, and can be loaded by passing its name as a second parameter: ```python from datasets import load_dataset main_data = load_dataset("my_dataset_repository", "main_data") additional_data = load_dataset("my_dataset_repository", "additional_data") ``` ## Builder parameters Not only `data_files`, but other builder-specific parameters can be passed via YAML, allowing for more flexibility on how to load the data while not requiring any custom code. For example, define which separator to use in which configuration to load your `csv` files: ```yaml --- configs: - config_name: tab data_files: "main_data.csv" sep: "\t" - config_name: comma data_files: "additional_data.csv" sep: "," --- ``` Refer to [specific builders' documentation](./package_reference/builder_classes) to see what configuration parameters they have. <Tip> You can set a default configuration using `default: true`, e.g. you can run `main_data = load_dataset("my_dataset_repository")` if you set ```yaml - config_name: main_data data_files: "main_data.csv" default: true ``` </Tip> ## Automatic splits detection If no YAML is provided, 🤗 Datasets searches for certain patterns in the dataset repository to automatically infer the dataset splits. There is an order to the patterns, beginning with the custom filename split format to treating all files as a single split if no pattern is found. ### Directory name Your data files may also be placed into different directories named `train`, `test`, and `validation` where each directory contains the data files for that split: ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ └── bees.csv ├── test/ │ └── more_bees.csv └── validation/ └── even_more_bees.csv ``` ### Filename splits If you don't have any non-traditional splits, then you can place the split name anywhere in the data file and it is automatically inferred. The only rule is that the split name must be delimited by non-word characters, like `test-file.csv` for example instead of `testfile.csv`. Supported delimiters include underscores, dashes, spaces, dots, and numbers. For example, the following file names are all acceptable: - train split: `train.csv`, `my_train_file.csv`, `train1.csv` - validation split: `validation.csv`, `my_validation_file.csv`, `validation1.csv` - test split: `test.csv`, `my_test_file.csv`, `test1.csv` Here is an example where all the files are placed into a directory named `data`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train.csv ├── test.csv └── validation.csv ``` ### Custom filename split If your dataset splits have custom names that aren't `train`, `test`, or `validation`, then you can name your data files like `data/<split_name>-xxxxx-of-xxxxx.csv`. Here is an example with three splits, `train`, `test`, and `random`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train-00000-of-00003.csv ├── train-00001-of-00003.csv ├── train-00002-of-00003.csv ├── test-00000-of-00001.csv ├── random-00000-of-00003.csv ├── random-00001-of-00003.csv └── random-00002-of-00003.csv ``` ### Single split When 🤗 Datasets can't find any of the above patterns, then it'll treat all the files as a single train split. If your dataset splits aren't loading as expected, it may be due to an incorrect pattern. ### Split name keywords There are several ways to name splits. Validation splits are sometimes called "dev", and test splits may be referred to as "eval". These other split names are also supported, and the following keywords are equivalent: - train, training - validation, valid, val, dev - test, testing, eval, evaluation The structure below is a valid repository: ``` my_dataset_repository/ ├── README.md └── data/ ├── training.csv ├── eval.csv └── valid.csv ``` ### Multiple files per split If one of your splits comprises several files, 🤗 Datasets can still infer whether it is the train, validation, and test split from the file name. For example, if your train and test splits span several files: ``` my_dataset_repository/ ├── README.md ├── train_0.csv ├── train_1.csv ├── train_2.csv ├── train_3.csv ├── test_0.csv └── test_1.csv ``` Make sure all the files of your `train` set have *train* in their names (same for test and validation). Even if you add a prefix or suffix to `train` in the file name (like `my_train_file_00001.csv` for example), 🤗 Datasets can still infer the appropriate split. For convenience, you can also place your data files into different directories. In this case, the split name is inferred from the directory name. ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ ├── shard_0.csv │ ├── shard_1.csv │ ├── shard_2.csv │ └── shard_3.csv └── test/ ├── shard_0.csv └── shard_1.csv ``` For more flexibility over how to load and generate a dataset, you can also write a [dataset loading script](./dataset_script).
datasets/docs/source/repository_structure.mdx/0
{ "file_path": "datasets/docs/source/repository_structure.mdx", "repo_id": "datasets", "token_count": 2588 }
68
# Metric Card for BERT Score ## Metric description BERTScore is an automatic evaluation metric for text generation that computes a similarity score for each token in the candidate sentence with each token in the reference sentence. It leverages the pre-trained contextual embeddings from [BERT](https://huggingface.co/bert-base-uncased) models and matches words in candidate and reference sentences by cosine similarity. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks. ## How to use BERTScore takes 3 mandatory arguments : `predictions` (a list of string of candidate sentences), `references` (a list of strings or list of list of strings of reference sentences) and either `lang` (a string of two letters indicating the language of the sentences, in [ISO 639-1 format](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)) or `model_type` (a string specififying which model to use, according to the BERT specification). The default behavior of the metric is to use the suggested model for the target language when one is specified, otherwise to use the `model_type` indicated. ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello there", "general kenobi"] references = ["hello there", "general kenobi"] results = bertscore.compute(predictions=predictions, references=references, lang="en") ``` BERTScore also accepts multiple optional arguments: `num_layers` (int): The layer of representation to use. The default is the number of layers tuned on WMT16 correlation data, which depends on the `model_type` used. `verbose` (bool): Turn on intermediate status update. The default value is `False`. `idf` (bool or dict): Use idf weighting; can also be a precomputed idf_dict. `device` (str): On which the contextual embedding model will be allocated on. If this argument is `None`, the model lives on `cuda:0` if cuda is available. `nthreads` (int): Number of threads used for computation. The default value is `4`. `rescale_with_baseline` (bool): Rescale BERTScore with the pre-computed baseline. The default value is `False`. `batch_size` (int): BERTScore processing batch size, at least one of `model_type` or `lang`. `lang` needs to be specified when `rescale_with_baseline` is `True`. `baseline_path` (str): Customized baseline file. `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer. The default value is `False`. ## Output values BERTScore outputs a dictionary with the following values: `precision`: The [precision](https://huggingface.co/metrics/precision) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `recall`: The [recall](https://huggingface.co/metrics/recall) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `f1`: The [F1 score](https://huggingface.co/metrics/f1) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `hashcode:` The hashcode of the library. ### Values from popular papers The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) reported average model selection accuracies (Hits@1) on WMT18 hybrid systems for different language pairs, which ranged from 0.004 for `en<->tr` to 0.824 for `en<->de`. For more recent model performance, see the [metric leaderboard](https://paperswithcode.com/paper/bertscore-evaluating-text-generation-with). ## Examples Maximal values with the `distilbert-base-uncased` model: ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello world", "general kenobi"] references = ["hello world", "general kenobi"] results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased") print(results) {'precision': [1.0, 1.0], 'recall': [1.0, 1.0], 'f1': [1.0, 1.0], 'hashcode': 'distilbert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'} ``` Partial match with the `bert-base-uncased` model: ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello world", "general kenobi"] references = ["goodnight moon", "the sun is shining"] results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased") print(results) {'precision': [0.7380737066268921, 0.5584042072296143], 'recall': [0.7380737066268921, 0.5889028906822205], 'f1': [0.7380737066268921, 0.5732481479644775], 'hashcode': 'bert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'} ``` ## Limitations and bias The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) showed that BERTScore correlates well with human judgment on sentence-level and system-level evaluation, but this depends on the model and language pair selected. Furthermore, not all languages are supported by the metric -- see the [BERTScore supported language list](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages) for more information. Finally, calculating the BERTScore metric involves downloading the BERT model that is used to compute the score-- the default model for `en`, `roberta-large`, takes over 1.4GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `distilbert-base-uncased` is 268MB. A full list of compatible models can be found [here](https://docs.google.com/spreadsheets/d/1RKOVpselB98Nnh_EOC4A2BYn8_201tmPODpNWu4w7xI/edit#gid=0). ## Citation ```bibtex @inproceedings{bert-score, title={BERTScore: Evaluating Text Generation with BERT}, author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi}, booktitle={International Conference on Learning Representations}, year={2020}, url={https://openreview.net/forum?id=SkeHuCVFDr} } ``` ## Further References - [BERTScore Project README](https://github.com/Tiiiger/bert_score#readme) - [BERTScore ICLR 2020 Poster Presentation](https://iclr.cc/virtual_2020/poster_SkeHuCVFDr.html)
datasets/metrics/bertscore/README.md/0
{ "file_path": "datasets/metrics/bertscore/README.md", "repo_id": "datasets", "token_count": 1908 }
69
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accuracy metric for the Mathematics Aptitude Test of Heuristics (MATH) dataset.""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _CITATION = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ _DESCRIPTION = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. """ _KWARGS_DESCRIPTION = r""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class CompetitionMathMetric(datasets.Metric): """Accuracy metric for the MATH dataset.""" def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string"), "references": datasets.Value("string"), } ), # Homepage of the metric for documentation homepage="https://github.com/hendrycks/math", # Additional links to the codebase or references codebase_urls=["https://github.com/hendrycks/math"], ) def _compute(self, predictions, references): """Returns the scores""" n_correct = 0.0 for i, j in zip(predictions, references): n_correct += 1.0 if math_equivalence.is_equiv(i, j) else 0.0 accuracy = n_correct / len(predictions) return { "accuracy": accuracy, }
datasets/metrics/competition_math/competition_math.py/0
{ "file_path": "datasets/metrics/competition_math/competition_math.py", "repo_id": "datasets", "token_count": 1181 }
70
# Metric Card for IndicGLUE ## Metric description This metric is used to compute the evaluation metric for the [IndicGLUE dataset](https://huggingface.co/datasets/indic_glue). IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - Assamese (`as`), Bengali (`bn`), Gujarati (`gu`), Hindi (`hi`), Kannada (`kn`), Malayalam (`ml`), Marathi(`mr`), Oriya(`or`), Panjabi (`pa`), Tamil(`ta`) and Telugu (`te`). ## How to use There are two steps: (1) loading the IndicGLUE metric relevant to the subset of the dataset being used for evaluation; and (2) calculating the metric. 1. **Loading the relevant IndicGLUE metric** : the subsets of IndicGLUE are the following: `wnli`, `copa`, `sna`, `csqa`, `wstp`, `inltkh`, `bbca`, `cvit-mkb-clsr`, `iitp-mr`, `iitp-pr`, `actsa-sc`, `md`, and`wiki-ner`. More information about the different subsets of the Indic GLUE dataset can be found on the [IndicGLUE dataset page](https://indicnlp.ai4bharat.org/indic-glue/). 2. **Calculating the metric**: the metric takes two inputs : one list with the predictions of the model to score and one lists of references for each translation for all subsets of the dataset except for `cvit-mkb-clsr`, where each prediction and reference is a vector of floats. ```python from datasets import load_metric indic_glue_metric = load_metric('indic_glue', 'wnli') references = [0, 1] predictions = [0, 1] results = indic_glue_metric.compute(predictions=predictions, references=references) ``` ## Output values The output of the metric depends on the IndicGLUE subset chosen, consisting of a dictionary that contains one or several of the following metrics: `accuracy`: the proportion of correct predictions among the total number of cases processed, with a range between 0 and 1 (see [accuracy](https://huggingface.co/metrics/accuracy) for more information). `f1`: the harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. `precision@10`: the fraction of the true examples among the top 10 predicted examples, with a range between 0 and 1 (see [precision](https://huggingface.co/metrics/precision) for more information). The `cvit-mkb-clsr` subset returns `precision@10`, the `wiki-ner` subset returns `accuracy` and `f1`, and all other subsets of Indic GLUE return only accuracy. ### Values from popular papers The [original IndicGlue paper](https://aclanthology.org/2020.findings-emnlp.445.pdf) reported an average accuracy of 0.766 on the dataset, which varies depending on the subset selected. ## Examples Maximal values for the WNLI subset (which outputs `accuracy`): ```python from datasets import load_metric indic_glue_metric = load_metric('indic_glue', 'wnli') references = [0, 1] predictions = [0, 1] results = indic_glue_metric.compute(predictions=predictions, references=references) print(results) {'accuracy': 1.0} ``` Minimal values for the Wiki-NER subset (which outputs `accuracy` and `f1`): ```python >>> from datasets import load_metric >>> indic_glue_metric = load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [1,0] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} ``` Partial match for the CVIT-Mann Ki Baat subset (which outputs `precision@10`) ```python >>> from datasets import load_metric >>> indic_glue_metric = load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} ``` ## Limitations and bias This metric works only with datasets that have the same format as the [IndicGLUE dataset](https://huggingface.co/datasets/glue). ## Citation ```bibtex @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ``` ## Further References - [IndicNLP website](https://indicnlp.ai4bharat.org/home/) -
datasets/metrics/indic_glue/README.md/0
{ "file_path": "datasets/metrics/indic_glue/README.md", "repo_id": "datasets", "token_count": 1527 }
71
# Metric Card for Pearson Correlation Coefficient (pearsonr) ## Metric Description Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ## How to Use This metric takes a list of predictions and a list of references as input ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr']), 2) ['-0.74'] ``` ### Inputs - **predictions** (`list` of `int`): Predicted class labels, as returned by a model. - **references** (`list` of `int`): Ground truth labels. - **return_pvalue** (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. ### Output Values - **pearsonr**(`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. - **p-value**(`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. Output Example(s): ```python {'pearsonr': -0.7} ``` ```python {'p-value': 0.15} ``` #### Values from Popular Papers ### Examples Example 1-A simple example using only predictions and references. ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 ``` Example 2-The same as Example 1, but that also returns the `p-value`. ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 ``` ## Limitations and Bias As stated above, the calculation of the p-value relies on the assumption that each data set is normally distributed. This is not always the case, so verifying the true distribution of datasets is recommended. ## Citation(s) ```bibtex @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ``` ## Further References
datasets/metrics/pearsonr/README.md/0
{ "file_path": "datasets/metrics/pearsonr/README.md", "repo_id": "datasets", "token_count": 1387 }
72
# Metric Card for seqeval ## Metric description seqeval is a Python framework for sequence labeling evaluation. seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on. ## How to use Seqeval produces labelling scores along with its sufficient statistics from a source against one or more references. It takes two mandatory arguments: `predictions`: a list of lists of predicted labels, i.e. estimated targets as returned by a tagger. `references`: a list of lists of reference labels, i.e. the ground truth/target values. It can also take several optional arguments: `suffix` (boolean): `True` if the IOB tag is a suffix (after type) instead of a prefix (before type), `False` otherwise. The default value is `False`, i.e. the IOB tag is a prefix (before type). `scheme`: the target tagging scheme, which can be one of [`IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `BILOU`]. The default value is `None`. `mode`: whether to count correct entity labels with incorrect I/B tags as true positives or not. If you want to only count exact matches, pass `mode="strict"` and a specific `scheme` value. The default is `None`. `sample_weight`: An array-like of shape (n_samples,) that provides weights for individual samples. The default is `None`. `zero_division`: Which value to substitute as a metric value when encountering zero division. Should be one of [`0`,`1`,`"warn"`]. `"warn"` acts as `0`, but the warning is raised. ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) ``` ## Output values This metric returns a dictionary with a summary of scores for overall and per type: Overall: `accuracy`: the average [accuracy](https://huggingface.co/metrics/accuracy), on a scale between 0.0 and 1.0. `precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0. `recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0. `f1`: the average [F1 score](https://huggingface.co/metrics/f1), which is the harmonic mean of the precision and recall. It also has a scale of 0.0 to 1.0. Per type (e.g. `MISC`, `PER`, `LOC`,...): `precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0. `recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0. `f1`: the average [F1 score](https://huggingface.co/metrics/f1), on a scale between 0.0 and 1.0. ### Values from popular papers The 1995 "Text Chunking using Transformation-Based Learning" [paper](https://aclanthology.org/W95-0107) reported a baseline recall of 81.9% and a precision of 78.2% using non Deep Learning-based methods. More recently, seqeval continues being used for reporting performance on tasks such as [named entity detection](https://www.mdpi.com/2306-5729/6/8/84/htm) and [information extraction](https://ieeexplore.ieee.org/abstract/document/9697942/). ## Examples Maximal values (full match) : ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 1.0, 'overall_recall': 1.0, 'overall_f1': 1.0, 'overall_accuracy': 1.0} ``` Minimal values (no match): ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'B-MISC', 'I-MISC'], ['B-PER', 'I-PER', 'O']] >>> references = [['B-MISC', 'O', 'O'], ['I-PER', '0', 'I-PER']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 2}, '_': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'overall_precision': 0.0, 'overall_recall': 0.0, 'overall_f1': 0.0, 'overall_accuracy': 0.0} ``` Partial match: ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 0.5, 'overall_recall': 0.5, 'overall_f1': 0.5, 'overall_accuracy': 0.8} ``` ## Limitations and bias seqeval supports following IOB formats (short for inside, outside, beginning) : `IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `IOBES` (only in strict mode) and `BILOU` (only in strict mode). For more information about IOB formats, refer to the [Wikipedia page](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)) and the description of the [CoNLL-2000 shared task](https://aclanthology.org/W02-2024). ## Citation ```bibtex @inproceedings{ramshaw-marcus-1995-text, title = "Text Chunking using Transformation-Based Learning", author = "Ramshaw, Lance and Marcus, Mitch", booktitle = "Third Workshop on Very Large Corpora", year = "1995", url = "https://www.aclweb.org/anthology/W95-0107", } ``` ```bibtex @misc{seqeval, title={{seqeval}: A Python framework for sequence labeling evaluation}, url={https://github.com/chakki-works/seqeval}, note={Software available from https://github.com/chakki-works/seqeval}, author={Hiroki Nakayama}, year={2018}, } ``` ## Further References - [README for seqeval at GitHub](https://github.com/chakki-works/seqeval) - [CoNLL-2000 shared task](https://www.clips.uantwerpen.be/conll2002/ner/bin/conlleval.txt)
datasets/metrics/seqeval/README.md/0
{ "file_path": "datasets/metrics/seqeval/README.md", "repo_id": "datasets", "token_count": 2355 }
73
# Copyright 2021 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Word Error Ratio (WER) metric.""" from jiwer import compute_measures import datasets _CITATION = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ _DESCRIPTION = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ _KWARGS_DESCRIPTION = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class WER(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ], ) def _compute(self, predictions=None, references=None, concatenate_texts=False): if concatenate_texts: return compute_measures(references, predictions)["wer"] else: incorrect = 0 total = 0 for prediction, reference in zip(predictions, references): measures = compute_measures(reference, prediction) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
datasets/metrics/wer/wer.py/0
{ "file_path": "datasets/metrics/wer/wer.py", "repo_id": "datasets", "token_count": 1452 }
74
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal logger = logging.get_logger(__name__) DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """ from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets.") for i, dataset in enumerate(datasets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") if dataset_type is Dataset: return _interleave_map_style_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) else: return _interleave_iterable_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) def concatenate_datasets( dsets: List[DatasetType], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> DatasetType: """ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ``` """ if not dsets: raise ValueError("Unable to concatenate an empty list of datasets.") for i, dataset in enumerate(dsets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) else: return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
datasets/src/datasets/combine.py/0
{ "file_path": "datasets/src/datasets/combine.py", "repo_id": "datasets", "token_count": 4607 }
75
import glob import io import os import posixpath import re import tarfile import time import xml.dom.minidom import zipfile from asyncio import TimeoutError from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from xml.etree import ElementTree as ET import fsspec from aiohttp.client_exceptions import ClientError from fsspec.core import url_to_fs from huggingface_hub.utils import EntryNotFoundError from packaging import version from .. import config from ..filesystems import COMPRESSION_FILESYSTEMS from ..utils.file_utils import ( get_authentication_headers_for_url, get_datasets_user_agent, http_head, is_local_path, is_relative_path, url_or_path_join, ) from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) try: size = fs.size(main_hop) except EntryNotFoundError: raise FileNotFoundError(f"No such file: {path}") if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except (ClientError, TimeoutError) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out file_obj.read = read_with_retries def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. Storage options are formatted in the form {protocol: storage_options_for_protocol} """ token = None if download_config is None else download_config.token if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath: urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = download_config.storage_options[protocol] elif download_config is not None and protocol not in download_config.storage_options: storage_options = { option_name: option_value for option_name, option_value in download_config.storage_options.items() if option_name not in fsspec.available_protocols() } else: storage_options = {} if storage_options: storage_options = {protocol: storage_options} if protocol in ["http", "https"]: storage_options[protocol] = { "headers": { **get_authentication_headers_for_url(urlpath, token=token), "user-agent": get_datasets_user_agent(), }, "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables. **(storage_options.get(protocol, {})), } if "drive.google.com" in urlpath: response = http_head(urlpath) cookies = None for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})} # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in urlpath and "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 storage_options[protocol]["headers"]["Accept-Encoding"] = "identity" elif protocol == "hf": storage_options[protocol] = { "token": token, "endpoint": config.HF_ENDPOINT, **storage_options.get(protocol, {}), } # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967) if config.HF_HUB_VERSION < version.parse("0.21.0"): storage_options[protocol]["block_size"] = "default" return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): # ignore fsspec-specific kwargs kwargs.pop("block_size", None) return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") paths = fs.listdir(inner_path, detail=False) return [os.path.basename(path.rstrip("/")) for path in paths] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {})) globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: Tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pyarrow.parquet as pq if hasattr(filepath_or_buffer, "read"): return pq.read_table(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) # Set block_size=0 to get faster streaming # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config) class StreamingDownloadManager: """ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives. Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract data, but they rather return the path or url that could be opened using the `xopen` function which extends the built-in `open` function to stream data from remote files. """ is_streaming = True def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, ): self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") self.download_config = download_config or DownloadConfig() @property def manual_dir(self): return self._data_dir def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls def _download(self, urlpath: str) -> str: urlpath = str(urlpath) if is_relative_path(urlpath): # append the relative path to the base_path urlpath = url_or_path_join(self._base_path, urlpath) return urlpath def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) return urlpaths def _extract(self, urlpath: str) -> str: urlpath = str(urlpath) protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz path = urlpath.split("::")[0] extension = _get_path_extension(path) if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")): raise NotImplementedError( f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. " f"Please use `dl_manager.iter_archive` instead.\n\n" f"Example usage:\n\n" f"\turl = dl_manager.download(url)\n" f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n" f"\tfor filename, file in tar_archive_iterator:\n" f"\t\t..." ) if protocol is None: # no extraction return urlpath elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(urlpath.split("::")[0]) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file return f"{protocol}://{inner_file}::{urlpath}" else: return f"{protocol}://::{urlpath}" def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """ return self.extract(self.download(url_or_urls)) def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(urlpath_or_buf, "read"): return ArchiveIterable.from_buf(urlpath_or_buf) else: return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
datasets/src/datasets/download/streaming_download_manager.py/0
{ "file_path": "datasets/src/datasets/download/streaming_download_manager.py", "repo_id": "datasets", "token_count": 18436 }
76
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import tensorflow as tf class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): def __init__(self, features=None, **tf_tensor_kwargs): super().__init__(features=features) self.tf_tensor_kwargs = tf_tensor_kwargs import tensorflow as tf # noqa: F401 - import tf at initialization def _consolidate(self, column): import tensorflow as tf if isinstance(column, list) and column: if all( isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return tf.stack(column) elif all( isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype for x in column ): # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated return tf.ragged.stack(column) return column def _tensorize(self, value): import tensorflow as tf if value is None: return value default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": tf.int64} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": tf.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) def _recursive_tensorize(self, data_struct): import tensorflow as tf # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "tf.Tensor": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
datasets/src/datasets/formatting/tf_formatter.py/0
{ "file_path": "datasets/src/datasets/formatting/tf_formatter.py", "repo_id": "datasets", "token_count": 1885 }
77
# Copyright 2020 The HuggingFace Datasets Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Metrics base class.""" import os import types import uuid from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import pyarrow as pa from filelock import BaseFileLock, Timeout from . import config from .arrow_dataset import Dataset from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter from .download.download_config import DownloadConfig from .download.download_manager import DownloadManager from .features import Features from .info import DatasetInfo, MetricInfo from .naming import camelcase_to_snakecase from .utils._filelock import FileLock from .utils.deprecation_utils import deprecated from .utils.logging import get_logger from .utils.py_utils import copyfunc, temp_seed logger = get_logger(__name__) class FileFreeLock(BaseFileLock): """Thread lock until a file **cannot** be locked""" def __init__(self, lock_file, *args, **kwargs): self.filelock = FileLock(lock_file) super().__init__(self.filelock.lock_file, *args, **kwargs) def _acquire(self): try: self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once except Timeout: # We couldn't acquire the lock, the file is locked! self._context.lock_file_fd = self.filelock.lock_file else: # We were able to acquire the lock, the file is not yet locked! self.filelock.release() self._context.lock_file_fd = None def _release(self): self._context.lock_file_fd = None # lists - summarize long lists similarly to NumPy # arrays/tensors - let the frameworks control formatting def summarize_if_long_list(obj): if not type(obj) == list or len(obj) <= 6: # noqa: E721 return f"{obj}" def format_chunk(chunk): return ", ".join(repr(x) for x in chunk) return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]" class MetricInfoMixin: """This base class exposes some attributes of MetricInfo at the base level of the Metric for easy access. <Deprecated version="2.5.0"> Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate </Deprecated> """ def __init__(self, info: MetricInfo): self._metric_info = info @property def info(self): """:class:`datasets.MetricInfo` object containing all the metadata in the metric.""" return self._metric_info @property def name(self) -> str: return self._metric_info.metric_name @property def experiment_id(self) -> Optional[str]: return self._metric_info.experiment_id @property def description(self) -> str: return self._metric_info.description @property def citation(self) -> str: return self._metric_info.citation @property def features(self) -> Features: return self._metric_info.features @property def inputs_description(self) -> str: return self._metric_info.inputs_description @property def homepage(self) -> Optional[str]: return self._metric_info.homepage @property def license(self) -> str: return self._metric_info.license @property def codebase_urls(self) -> Optional[List[str]]: return self._metric_info.codebase_urls @property def reference_urls(self) -> Optional[List[str]]: return self._metric_info.reference_urls @property def streamable(self) -> bool: return self._metric_info.streamable @property def format(self) -> Optional[str]: return self._metric_info.format class Metric(MetricInfoMixin): """A Metric is the base class and common API for all metrics. <Deprecated version="2.5.0"> Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate </Deprecated> Args: config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data to be overridden when the metric loading script is modified. keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings. cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored. The data directory should be located on a shared file-system in distributed setups. num_process (``int``): specify the total number of nodes in a distributed settings. This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1) This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run. experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system. This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000). timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization. """ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") def __init__( self, config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs, ): # prepare info self.config_name = config_name or "default" info = self._info() info.metric_name = camelcase_to_snakecase(self.__class__.__name__) info.config_name = self.config_name info.experiment_id = experiment_id or "default_experiment" MetricInfoMixin.__init__(self, info) # For easy access on low level # Safety checks on num_process and process_id if not isinstance(process_id, int) or process_id < 0: raise ValueError("'process_id' should be a number greater than 0") if not isinstance(num_process, int) or num_process <= process_id: raise ValueError("'num_process' should be a number greater than process_id") if keep_in_memory and num_process != 1: raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).") self.num_process = num_process self.process_id = process_id self.max_concurrent_cache_files = max_concurrent_cache_files self.keep_in_memory = keep_in_memory self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE) self.data_dir = self._build_data_dir() if seed is None: _, seed, pos, *_ = np.random.get_state() self.seed: int = seed[pos] if pos < 624 else seed[0] else: self.seed: int = seed self.timeout: Union[int, float] = timeout # Update 'compute' and 'add' docstring # methods need to be copied otherwise it changes the docstrings of every instance self.compute = types.MethodType(copyfunc(self.compute), self) self.add_batch = types.MethodType(copyfunc(self.add_batch), self) self.add = types.MethodType(copyfunc(self.add), self) self.compute.__func__.__doc__ += self.info.inputs_description self.add_batch.__func__.__doc__ += self.info.inputs_description self.add.__func__.__doc__ += self.info.inputs_description # self.arrow_schema = pa.schema(field for field in self.info.features.type) self.buf_writer = None self.writer = None self.writer_batch_size = None self.data = None # This is the cache file we store our predictions/references in # Keep it None for now so we can (cloud)pickle the object self.cache_file_name = None self.filelock = None self.rendez_vous_lock = None # This is all the cache files on which we have a lock when we are in a distributed setting self.file_paths = None self.filelocks = None def __len__(self): """Return the number of examples (predictions or predictions/references pair) currently stored in the metric's cache. """ return 0 if self.writer is None else len(self.writer) def __repr__(self): return ( f'Metric(name: "{self.name}", features: {self.features}, ' f'usage: """{self.inputs_description}""", ' f"stored examples: {len(self)})" ) def _build_data_dir(self): """Path of this metric in cache_dir: Will be: self._data_dir_root/self.name/self.config_name/self.hash (if not none)/ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. """ builder_data_dir = self._data_dir_root builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name) os.makedirs(builder_data_dir, exist_ok=True) return builder_data_dir def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]: """Create a new cache file. If the default cache file is used, we generated a new hash.""" file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow") filelock = None for i in range(self.max_concurrent_cache_files): filelock = FileLock(file_path + ".lock") try: filelock.acquire(timeout=timeout) except Timeout: # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup) # We raise an error if self.num_process != 1: raise ValueError( f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. " f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " f"between distributed metric instances." ) from None if i == self.max_concurrent_cache_files - 1: raise ValueError( f"Cannot acquire lock, too many metric instance are operating concurrently on this file system." f"You should set a larger value of max_concurrent_cache_files when creating the metric " f"(current value is {self.max_concurrent_cache_files})." ) from None # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name. file_uuid = str(uuid.uuid4()) file_path = os.path.join( self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow" ) else: break return file_path, filelock def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]: """Get a lock on all the cache files in a distributed setup. We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds). """ if self.num_process == 1: if self.cache_file_name is None: raise ValueError( "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` " "at least once before calling `compute`." ) file_paths = [self.cache_file_name] else: file_paths = [ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow") for process_id in range(self.num_process) ] # Let's acquire a lock on each process files to be sure they are finished writing filelocks = [] for process_id, file_path in enumerate(file_paths): if process_id == 0: # process 0 already has its lock file filelocks.append(self.filelock) else: filelock = FileLock(file_path + ".lock") try: filelock.acquire(timeout=self.timeout) except Timeout: raise ValueError( f"Cannot acquire lock on cached file {file_path} for process {process_id}." ) from None else: filelocks.append(filelock) return file_paths, filelocks def _check_all_processes_locks(self): expected_lock_file_names = [ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock") for process_id in range(self.num_process) ] for expected_lock_file_name in expected_lock_file_names: nofilelock = FileFreeLock(expected_lock_file_name) try: nofilelock.acquire(timeout=self.timeout) except Timeout: raise ValueError( f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." ) from None else: nofilelock.release() def _check_rendez_vous(self): expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock") nofilelock = FileFreeLock(expected_lock_file_name) try: nofilelock.acquire(timeout=self.timeout) except Timeout: raise ValueError( f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." ) from None else: nofilelock.release() lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") rendez_vous_lock = FileLock(lock_file_name) try: rendez_vous_lock.acquire(timeout=self.timeout) except Timeout: raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None else: rendez_vous_lock.release() def _finalize(self): """Close all the writing process and load/gather the data from all the nodes if main node or all_process is True. """ if self.writer is not None: self.writer.finalize() self.writer = None # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data if self.filelock is not None and self.process_id > 0: self.filelock.release() if self.keep_in_memory: # Read the predictions and references reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features)) self.data = Dataset.from_buffer(self.buf_writer.getvalue()) elif self.process_id == 0: # Let's acquire a lock on each node files to be sure they are finished writing file_paths, filelocks = self._get_all_cache_files() # Read the predictions and references try: reader = ArrowReader(path="", info=DatasetInfo(features=self.features)) self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) except FileNotFoundError: raise ValueError( "Error in finalize: another metric instance is already using the local cache file. " "Please specify an experiment_id to avoid collision between distributed metric instances." ) from None # Store file paths and locks and we will release/delete them after the computation. self.file_paths = file_paths self.filelocks = filelocks def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]: """Compute the metrics. Usage of positional arguments is not allowed to prevent mistakes. Args: predictions (list/array/tensor, optional): Predictions. references (list/array/tensor, optional): References. **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute` method (see details in the docstring). Return: dict or None - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``). - None if the metric is not run on the main process (``process_id != 0``). Example: ```py >>> from datasets import load_metric >>> metric = load_metric("accuracy") >>> accuracy = metric.compute(predictions=model_prediction, references=labels) ``` """ all_kwargs = {"predictions": predictions, "references": references, **kwargs} if predictions is None and references is None: missing_kwargs = {k: None for k in self.features if k not in all_kwargs} all_kwargs.update(missing_kwargs) else: missing_inputs = [k for k in self.features if k not in all_kwargs] if missing_inputs: raise ValueError( f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}" ) inputs = {input_name: all_kwargs[input_name] for input_name in self.features} compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features} if any(v is not None for v in inputs.values()): self.add_batch(**inputs) self._finalize() self.cache_file_name = None self.filelock = None if self.process_id == 0: self.data.set_format(type=self.info.format) inputs = {input_name: self.data[input_name] for input_name in self.features} with temp_seed(self.seed): output = self._compute(**inputs, **compute_kwargs) if self.buf_writer is not None: self.buf_writer = None del self.data self.data = None else: # Release locks and delete all the cache files. Process 0 is released last. for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))): logger.info(f"Removing {file_path}") del self.data self.data = None del self.writer self.writer = None os.remove(file_path) filelock.release() return output else: return None def add_batch(self, *, predictions=None, references=None, **kwargs): """Add a batch of predictions and references for the metric's stack. Args: predictions (list/array/tensor, optional): Predictions. references (list/array/tensor, optional): References. Example: ```py >>> from datasets import load_metric >>> metric = load_metric("accuracy") >>> metric.add_batch(predictions=model_prediction, references=labels) ``` """ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features] if bad_inputs: raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}") batch = {"predictions": predictions, "references": references, **kwargs} batch = {intput_name: batch[intput_name] for intput_name in self.features} batch = self.info.features.encode_batch(batch) if self.writer is None: self._init_writer() try: self.writer.write_batch(batch) except pa.ArrowInvalid: if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch): col0 = next(iter(batch)) bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0] error_msg = ( f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})" ) elif sorted(self.features) != ["references", "predictions"]: error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n" error_msg_inputs = ",\n".join( f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features ) error_msg += error_msg_inputs else: error_msg = ( f"Predictions and/or references don't match the expected format.\n" f"Expected format: {self.features},\n" f"Input predictions: {summarize_if_long_list(predictions)},\n" f"Input references: {summarize_if_long_list(references)}" ) raise ValueError(error_msg) from None def add(self, *, prediction=None, reference=None, **kwargs): """Add one prediction and reference for the metric's stack. Args: prediction (list/array/tensor, optional): Predictions. reference (list/array/tensor, optional): References. Example: ```py >>> from datasets import load_metric >>> metric = load_metric("accuracy") >>> metric.add(predictions=model_predictions, references=labels) ``` """ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features] if bad_inputs: raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}") example = {"predictions": prediction, "references": reference, **kwargs} example = {intput_name: example[intput_name] for intput_name in self.features} example = self.info.features.encode_example(example) if self.writer is None: self._init_writer() try: self.writer.write(example) except pa.ArrowInvalid: error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n" error_msg_inputs = ",\n".join( f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features ) error_msg += error_msg_inputs raise ValueError(error_msg) from None def _init_writer(self, timeout=1): if self.num_process > 1: if self.process_id == 0: file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") self.rendez_vous_lock = FileLock(file_path) try: self.rendez_vous_lock.acquire(timeout=timeout) except TimeoutError: raise ValueError( f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. " f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " f"between distributed metric instances." ) from None if self.keep_in_memory: self.buf_writer = pa.BufferOutputStream() self.writer = ArrowWriter( features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size ) else: self.buf_writer = None # Get cache file name and lock it if self.cache_file_name is None or self.filelock is None: cache_file_name, filelock = self._create_cache_file() # get ready self.cache_file_name = cache_file_name self.filelock = filelock self.writer = ArrowWriter( features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size ) # Setup rendez-vous here if if self.num_process > 1: if self.process_id == 0: self._check_all_processes_locks() # wait for everyone to be ready self.rendez_vous_lock.release() # let everyone go else: self._check_rendez_vous() # wait for master to be ready and to let everyone go def _info(self) -> MetricInfo: """Construct the MetricInfo object. See `MetricInfo` for details. Warning: This function is only called once and the result is cached for all following .info() calls. Returns: info: (MetricInfo) The metrics information """ raise NotImplementedError def download_and_prepare( self, download_config: Optional[DownloadConfig] = None, dl_manager: Optional[DownloadManager] = None, ): """Downloads and prepares dataset for reading. Args: download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. dl_manager (:class:`DownloadManager`, optional): Specific download manager to use. """ if dl_manager is None: if download_config is None: download_config = DownloadConfig() download_config.cache_dir = os.path.join(self.data_dir, "downloads") download_config.force_download = False dl_manager = DownloadManager( dataset_name=self.name, download_config=download_config, data_dir=self.data_dir ) self._download_and_prepare(dl_manager) def _download_and_prepare(self, dl_manager): """Downloads and prepares resources for the metric. This is the internal implementation to overwrite called when user calls `download_and_prepare`. It should download all required resources for the metric. Args: dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data. """ return None def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]: """This method defines the common API for all the metrics in the library""" raise NotImplementedError def __del__(self): if hasattr(self, "filelock") and self.filelock is not None: self.filelock.release() if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None: self.rendez_vous_lock.release() if hasattr(self, "writer"): # in case it was already deleted del self.writer if hasattr(self, "data"): # in case it was already deleted del self.data
datasets/src/datasets/metric.py/0
{ "file_path": "datasets/src/datasets/metric.py", "repo_id": "datasets", "token_count": 11908 }
78
from typing import List import datasets from datasets.tasks import ImageClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None class ImageFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Image BASE_COLUMN_NAME = "image" BUILDER_CONFIG_CLASS = ImageFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label") # Obtained with: # ``` # import PIL.Image # IMAGE_EXTENSIONS = [] # PIL.Image.init() # for ext, format in PIL.Image.EXTENSION.items(): # if format in PIL.Image.OPEN: # IMAGE_EXTENSIONS.append(ext[1:]) # ``` # We intentionally do not run this code on launch because: # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic IMAGE_EXTENSIONS = [ ".blp", ".bmp", ".dib", ".bufr", ".cur", ".pcx", ".dcx", ".dds", ".ps", ".eps", ".fit", ".fits", ".fli", ".flc", ".ftc", ".ftu", ".gbr", ".gif", ".grib", ".h5", ".hdf", ".png", ".apng", ".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c", ".icns", ".ico", ".im", ".iim", ".tif", ".tiff", ".jfif", ".jpe", ".jpg", ".jpeg", ".mpg", ".mpeg", ".msp", ".pcd", ".pxr", ".pbm", ".pgm", ".ppm", ".pnm", ".psd", ".bw", ".rgb", ".rgba", ".sgi", ".ras", ".tga", ".icb", ".vda", ".vst", ".webp", ".wmf", ".emf", ".xbm", ".xpm", ] ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py", "repo_id": "datasets", "token_count": 883 }
79
from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
datasets/src/datasets/parallel/__init__.py/0
{ "file_path": "datasets/src/datasets/parallel/__init__.py", "repo_id": "datasets", "token_count": 25 }
80
from typing import Any, Dict, List, Optional, Union from .. import config from ..exceptions import DatasetsError from .file_utils import ( get_authentication_headers_for_url, http_get, ) from .logging import get_logger logger = get_logger(__name__) class DatasetsServerError(DatasetsError): """Dataset-server error. Raised when trying to use the Datasets-server HTTP API and when trying to access: - a missing dataset, or - a private/gated dataset and the user is not authenticated. - unavailable /parquet or /info responses """ def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]: """ Get the dataset exported parquet files Docs: https://huggingface.co/docs/datasets-server/parquet """ datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset=" try: parquet_data_files_response = http_get( url=datasets_server_parquet_url + dataset, temp_file=None, headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token), timeout=100.0, max_retries=3, ) parquet_data_files_response.raise_for_status() if "X-Revision" in parquet_data_files_response.headers: if parquet_data_files_response.headers["X-Revision"] == revision or revision is None: parquet_data_files_response_json = parquet_data_files_response.json() if ( parquet_data_files_response_json.get("partial") is False and not parquet_data_files_response_json.get("pending", True) and not parquet_data_files_response_json.get("failed", True) and "parquet_files" in parquet_data_files_response_json ): return parquet_data_files_response_json["parquet_files"] else: logger.debug(f"Parquet export for {dataset} is not completely ready yet.") else: logger.debug( f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')" ) except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})") raise DatasetsServerError("No exported Parquet files available.") def get_exported_dataset_infos( dataset: str, revision: str, token: Optional[Union[str, bool]] ) -> Dict[str, Dict[str, Any]]: """ Get the dataset information, can be useful to get e.g. the dataset features. Docs: https://huggingface.co/docs/datasets-server/info """ datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset=" try: info_response = http_get( url=datasets_server_info_url + dataset, temp_file=None, headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token), timeout=100.0, max_retries=3, ) info_response.raise_for_status() if "X-Revision" in info_response.headers: if info_response.headers["X-Revision"] == revision or revision is None: info_response = info_response.json() if ( info_response.get("partial") is False and not info_response.get("pending", True) and not info_response.get("failed", True) and "dataset_info" in info_response ): return info_response["dataset_info"] else: logger.debug(f"Dataset info for {dataset} is not completely ready yet.") else: logger.debug( f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')" ) except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})") raise DatasetsServerError("No exported dataset infos available.")
datasets/src/datasets/utils/_datasets_server.py/0
{ "file_path": "datasets/src/datasets/utils/_datasets_server.py", "repo_id": "datasets", "token_count": 1946 }
81
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Some python utils function and classes.""" import copy import functools import itertools import multiprocessing.pool import os import queue import re import types import warnings from contextlib import contextmanager from dataclasses import fields, is_dataclass from multiprocessing import Manager from pathlib import Path from queue import Empty from shutil import disk_usage from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union from urllib.parse import urlparse import multiprocess import multiprocess.pool import numpy as np from tqdm.auto import tqdm from .. import config from ..parallel import parallel_map from . import logging from . import tqdm as hf_tqdm from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0 Pickler, dump, dumps, pklregister, ) from ._filelock import FileLock try: # pragma: no branch import typing_extensions as _typing_extensions from typing_extensions import Final, Literal except ImportError: _typing_extensions = Literal = Final = None logger = logging.get_logger(__name__) # NOTE: When used on an instance method, the cache is shared across all # instances and IS NOT per-instance. # See # https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance # For @property methods, use @memoized_property below. memoize = functools.lru_cache def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "Unknown size". For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "Unknown size" _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)] size_in_bytes = float(size_in_bytes) for name, size_bytes in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return f"{value:.2f} {name}" return f"{int(size_in_bytes)} bytes" def convert_file_size_to_int(size: Union[int, str]) -> int: """ Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("PIB"): return int(size[:-3]) * (2**50) if size.upper().endswith("TIB"): return int(size[:-3]) * (2**40) if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("PB"): int_size = int(size[:-2]) * (10**15) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("TB"): int_size = int(size[:-2]) * (10**12) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def glob_pattern_to_regex(pattern): # partially taken from fsspec: # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735 return ( pattern.replace("\\", r"\\") .replace(".", r"\.") .replace("*", ".*") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .replace("^", r"\^") .replace("$", r"\$") .rstrip("/") .replace("?", ".") ) def string_to_dict(string: str, pattern: str) -> Dict[str, str]: """Un-format a string using a python f-string pattern. From https://stackoverflow.com/a/36838374 Example:: >>> p = 'hello, my name is {name} and I am a {age} year old {what}' >>> s = p.format(name='cody', age=18, what='quarterback') >>> s 'hello, my name is cody and I am a 18 year old quarterback' >>> string_to_dict(s, p) {'age': '18', 'name': 'cody', 'what': 'quarterback'} Args: string (str): input string pattern (str): pattern formatted like a python f-string Returns: Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern Raises: ValueError: if the string doesn't match the pattern """ regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern) result = re.search(regex, string) if result is None: raise ValueError(f"String {string} doesn't match the pattern {pattern}") values = list(result.groups()) keys = re.findall(r"{(.+?)}", pattern) _dict = dict(zip(keys, values)) return _dict def asdict(obj): """Convert an object to its dictionary representation recursively. <Added version="2.4.0"/> """ # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict def _is_dataclass_instance(obj): # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass return is_dataclass(obj) and not isinstance(obj, type) def _asdict_inner(obj): if _is_dataclass_instance(obj): result = {} for f in fields(obj): value = _asdict_inner(getattr(obj, f.name)) if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): result[f.name] = value return result elif isinstance(obj, tuple) and hasattr(obj, "_fields"): # obj is a namedtuple return type(obj)(*[_asdict_inner(v) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v) for v in obj) elif isinstance(obj, dict): return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()} else: return copy.deepcopy(obj) if not isinstance(obj, dict) and not _is_dataclass_instance(obj): raise TypeError(f"{obj} is not a dict or a dataclass") return _asdict_inner(obj) @contextmanager def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr, None) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original) @contextmanager def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False): """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.""" np_state = np.random.get_state() np.random.seed(seed) if set_pytorch and config.TORCH_AVAILABLE: import torch torch_state = torch.random.get_rng_state() torch.random.manual_seed(seed) if torch.cuda.is_available(): torch_cuda_states = torch.cuda.get_rng_state_all() torch.cuda.manual_seed_all(seed) if set_tensorflow and config.TF_AVAILABLE: import tensorflow as tf from tensorflow.python.eager import context as tfpycontext tf_state = tf.random.get_global_generator() temp_gen = tf.random.Generator.from_seed(seed) tf.random.set_global_generator(temp_gen) if not tf.executing_eagerly(): raise ValueError("Setting random seed for TensorFlow is only available in eager mode") tf_context = tfpycontext.context() # eager mode context tf_seed = tf_context._seed tf_rng_initialized = hasattr(tf_context, "_rng") if tf_rng_initialized: tf_rng = tf_context._rng tf_context._set_global_seed(seed) try: yield finally: np.random.set_state(np_state) if set_pytorch and config.TORCH_AVAILABLE: torch.random.set_rng_state(torch_state) if torch.cuda.is_available(): torch.cuda.set_rng_state_all(torch_cuda_states) if set_tensorflow and config.TF_AVAILABLE: tf.random.set_global_generator(tf_state) tf_context._seed = tf_seed if tf_rng_initialized: tf_context._rng = tf_rng else: delattr(tf_context, "_rng") def unique_values(values): """Iterate over iterable and return only unique values in order.""" seen = set() for value in values: if value not in seen: seen.add(value) yield value def no_op_if_value_is_null(func): """If the value is None, return None, else call `func`.""" def wrapper(value): return func(value) if value is not None else None return wrapper def first_non_null_value(iterable): """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" for i, value in enumerate(iterable): if value is not None: return i, value return -1, None def zip_dict(*dicts): """Iterate over items of dictionaries grouped by their keys.""" for key in unique_values(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts) class NonMutableDict(dict): """Dict where keys can only be added but not modified. Will raise an error if the user try to overwrite one key. The error message can be customized during construction. It will be formatted using {key} for the overwritten key. """ def __init__(self, *args, **kwargs): self._error_msg = kwargs.pop( "error_msg", "Try to overwrite existing key: {key}", ) if kwargs: raise ValueError("NonMutableDict cannot be initialized with kwargs.") super().__init__(*args, **kwargs) def __setitem__(self, key, value): if key in self: raise ValueError(self._error_msg.format(key=key)) return super().__setitem__(key, value) def update(self, other): if any(k in self for k in other): raise ValueError(self._error_msg.format(key=set(self) & set(other))) return super().update(other) class classproperty(property): # pylint: disable=invalid-name """Descriptor to be used as decorator for @classmethods.""" def __get__(self, obj, objtype=None): return self.fget.__get__(None, objtype)() def _single_map_nested(args): """Apply a function recursively to each element of a nested data struct.""" function, data_struct, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print(" ", end="", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar: if isinstance(data_struct, dict): return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} else: mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) def map_nested( function: Callable[[Any], Any], data_struct: Any, dict_only: bool = False, map_list: bool = True, map_tuple: bool = False, map_numpy: bool = False, num_proc: Optional[int] = None, parallel_min_length: int = 2, types: Optional[tuple] = None, disable_tqdm: bool = True, desc: Optional[str] = None, ) -> Any: """Apply a function recursively to each element of a nested data struct. Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to `parallel_min_length`. <Changed version="2.5.0"> Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and multiprocessing is used. </Changed> Args: function (`Callable`): Function to be applied to `data_struct`. data_struct (`Any`): Data structure to apply `function` to. dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in `data_struct`. map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` values). map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides `dict` values). map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides `dict` values). num_proc (`int`, *optional*): Number of processes. parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel processing. <Added version="2.5.0"/> types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. desc (`str`, *optional*): Prefix for the tqdm progressbar. Returns: `Any` """ if types is None: types = [] if not dict_only: if map_list: types.append(list) if map_tuple: types.append(tuple) if map_numpy: types.append(np.ndarray) types = tuple(types) # Singleton if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct if num_proc is None: num_proc = 1 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable): mapped = [ map_nested( function=function, data_struct=obj, num_proc=num_proc, parallel_min_length=parallel_min_length, types=types, ) for obj in iterable ] elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: mapped = [ _single_map_nested((function, obj, types, None, True, None)) for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) ] else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".* is experimental and might be subject to breaking changes in the future\\.$", category=UserWarning, ) mapped = parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, _single_map_nested) if isinstance(data_struct, dict): return dict(zip(data_struct.keys(), mapped)) else: if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) class NestedDataStructure: def __init__(self, data=None): self.data = data if data is not None else [] def flatten(self, data=None): data = data if data is not None else self.data if isinstance(data, dict): return self.flatten(list(data.values())) elif isinstance(data, (list, tuple)): return [flattened for item in data for flattened in self.flatten(item)] else: return [data] def has_sufficient_disk_space(needed_bytes, directory="."): try: free_bytes = disk_usage(os.path.abspath(directory)).free except OSError: return True return needed_bytes < free_bytes def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: """Convert a link to a file on a github repo in a link to the raw github object.""" parsed = urlparse(url_path) sub_directory = None if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": if "blob" in url_path: if not url_path.endswith(".py"): raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") url_path = url_path.replace("blob", "raw") # Point to the raw file else: # Parse github url to point to zip github_path = parsed.path[1:] repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") repo_owner, repo_name = repo_info.split("/") url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" sub_directory = f"{repo_name}-{branch}" return url_path, sub_directory def lock_importable_file(importable_local_file: str) -> FileLock: # Check the directory with a unique name in our dataset folder # path is: ./datasets/dataset_name/hash_from_code/script.py # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together importable_directory_path = str(Path(importable_local_file).resolve().parent.parent) lock_path = importable_directory_path + ".lock" return FileLock(lock_path) def get_imports(file_path: str) -> Tuple[str, str, str, str]: """Find whether we should import or clone additional files for a given processing script. And list the import. We allow: - library dependencies, - local dependencies and - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. external dependencies will be downloaded (and extracted if needed in the dataset folder). We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. Note that only direct import in the dataset processing script will be handled We don't recursively explore the additional import to download further files. Example:: import tensorflow import .c4_utils import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset """ lines = [] with open(file_path, encoding="utf-8") as f: lines.extend(f.readlines()) logger.debug(f"Checking {file_path} for additional imports.") imports: List[Tuple[str, str, str, Optional[str]]] = [] is_in_docstring = False for line in lines: docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) if len(docstr_start_match) == 1: # flip True <=> False only if doctstring # starts at line without finishing is_in_docstring = not is_in_docstring if is_in_docstring: # import statements in doctstrings should # not be added as required dependencies continue match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) if match is None: match = re.match( r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE, ) if match is None: continue if match.group(1): # The import starts with a '.', we will download the relevant file if any(imp[1] == match.group(2) for imp in imports): # We already have this import continue if match.group(3): # The import has a comment with 'From:', we'll retrieve it from the given url url_path = match.group(3) url_path, sub_directory = _convert_github_url(url_path) imports.append(("external", match.group(2), url_path, sub_directory)) elif match.group(2): # The import should be at the same place as the file imports.append(("internal", match.group(2), match.group(2), None)) else: if match.group(3): # The import has a comment with `From: git+https:...`, asks user to pip install from git. url_path = match.group(3) imports.append(("library", match.group(2), url_path, None)) else: imports.append(("library", match.group(2), match.group(2), None)) return imports def copyfunc(func): result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) result.__kwdefaults__ = func.__kwdefaults__ return result Y = TypeVar("Y") def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int: for i, result in enumerate(func(**kwargs)): queue.put(result) return i def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]: return {f.pid for f in pool._pool} def iflatmap_unordered( pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool], func: Callable[..., Iterable[Y]], *, kwargs_iterable: Iterable[dict], ) -> Iterable[Y]: initial_pool_pid = _get_pool_pid(pool) pool_changed = False manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager with manager_cls() as manager: queue = manager.Queue() async_results = [ pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable ] try: while True: try: yield queue.get(timeout=0.05) except Empty: if all(async_result.ready() for async_result in async_results) and queue.empty(): break if _get_pool_pid(pool) != initial_pool_pid: pool_changed = True # One of the subprocesses has died. We should not wait forever. raise RuntimeError( "One of the subprocesses has abruptly died during map operation." "To debug the error, disable multiprocessing." ) finally: if not pool_changed: # we get the result in case there's an error to raise [async_result.get(timeout=0.05) for async_result in async_results]
datasets/src/datasets/utils/py_utils.py/0
{ "file_path": "datasets/src/datasets/utils/py_utils.py", "repo_id": "datasets", "token_count": 10570 }
82
--- YAML tags (full spec here: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1): - copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging --- # Dataset Card Creation Guide ## Table of Contents - [Dataset Card Creation Guide](#dataset-card-creation-guide) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Add homepage URL here if available (unless it's a GitHub repository)]() - **Repository:** [If the dataset is hosted on github or has a github homepage, add URL here]() - **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]() - **Leaderboard:** [If the dataset supports an active leaderboard, add link here]() - **Point of Contact:** [If known, name and email of at least one person the reader can contact for questions about the dataset.]() ### Dataset Summary Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. *translations between several pairs of European languages*), and describe the domain, topic, or genre covered. ### Supported Tasks and Leaderboards For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`). - `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name). ### Languages Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,... When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available. ## Dataset Structure ### Data Instances Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples. ``` { 'example_field': ..., ... } ``` Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit. ### Data Fields List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points. - `example_field`: description of `example_field` Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions. ### Data Splits Describe and name the splits in the dataset if there are more than one. Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here. Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example: | | train | validation | test | |-------------------------|------:|-----------:|-----:| | Input Sentences | | | | | Average Sentence Length | | | | ## Dataset Creation ### Curation Rationale What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together? ### Source Data This section describes the source data (e.g. news text and headlines, social media posts, translated sentences,...) #### Initial Data Collection and Normalization Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process. If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name). If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used. #### Who are the source language producers? State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data. If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here. Describe other people represented or mentioned in the data. Where possible, link to references for the information. ### Annotations If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs. #### Annotation process If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes. #### Who are the annotators? If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated. Describe the people or systems who originally created the annotations and their selection criteria if applicable. If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here. ### Personal and Sensitive Information State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data). State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history). If efforts were made to anonymize the data, describe the anonymization process. ## Considerations for Using the Data ### Social Impact of Dataset Please discuss some of the ways you believe the use of this dataset will impact society. The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations. Also describe in this section if the proposed dataset contains a low-resource or under-represented language. If this is the case or if this task has any impact on underserved communities, please elaborate here. ### Discussion of Biases Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact. For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic. If analyses have been run quantifying these biases, please add brief summaries and links to the studies here. ### Other Known Limitations If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here. ## Additional Information ### Dataset Curators List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here. ### Licensing Information Provide the license and link to the license webpage if available. ### Citation Information Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example: ``` @article{article_id, author = {Author List}, title = {Dataset Paper Title}, journal = {Publication Venue}, year = {2525} } ``` If the dataset has a [DOI](https://www.doi.org/), please provide it here. ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
datasets/templates/README_guide.md/0
{ "file_path": "datasets/templates/README_guide.md", "repo_id": "datasets", "token_count": 3254 }
83
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_json_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ], ) def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} features = {"col_2": "int64", "col_3": "float64", "col_1": "string"} expected_features = features.copy() features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) cache_dir = tmp_path / "cache" dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_json_split(split, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read() _check_json_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): if issubclass(path_type, str): path = jsonl_path elif issubclass(path_type, list): path = [jsonl_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def load_json(buffer): return json.load(buffer) def load_json_lines(buffer): return [json.loads(line) for line in buffer] class TestJsonDatasetWriter: @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 def test_dataset_to_json_orient_invalidproc(self, dataset): with pytest.raises(ValueError): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, num_proc=0) @pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")]) def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset): path = tmp_path_factory.mktemp("data") / f"test.json.{extension}" original_path = str(shared_datadir / f"test_file.json.{extension}") JsonDatasetWriter(dataset, path, compression=compression).write() with fsspec.open(path, "rb", compression="infer") as f: exported_content = f.read() with fsspec.open(original_path, "rb", compression="infer") as f: original_content = f.read() assert exported_content == original_content def test_dataset_to_json_fsspec(self, dataset, mockfs): dataset_path = "mock://my_dataset.json" writer = JsonDatasetWriter(dataset, dataset_path, storage_options=mockfs.storage_options) assert writer.write() > 0 assert mockfs.isfile(dataset_path) with fsspec.open(dataset_path, "rb", **mockfs.storage_options) as f: assert f.read()
datasets/tests/io/test_json.py/0
{ "file_path": "datasets/tests/io/test_json.py", "repo_id": "datasets", "token_count": 5153 }
84
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import Array2D, ClassLabel, Features, Image, Value from datasets.features.features import Array2DExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class TypedSequenceTest(TestCase): def test_no_type(self): arr = pa.array(TypedSequence([1, 2, 3])) self.assertEqual(arr.type, pa.int64()) def test_array_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3]), type=pa.int64()) def test_try_type_and_type_forbidden(self): with self.assertRaises(ValueError): _ = pa.array(TypedSequence([1, 2, 3], try_type=Value("bool"), type=Value("int64"))) def test_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_incompatible_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Value("int64"))) def test_try_compatible_type(self): arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32"))) self.assertEqual(arr.type, pa.int32()) def test_try_incompatible_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int64"))) self.assertEqual(arr.type, pa.string()) def test_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_incompatible_extension_type(self): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): _ = pa.array(TypedSequence(["foo", "bar"], type=Array2D((1, 3), "int64"))) def test_try_compatible_extension_type(self): arr = pa.array(TypedSequence([[[1, 2, 3]]], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) def test_try_incompatible_extension_type(self): arr = pa.array(TypedSequence(["foo", "bar"], try_type=Array2D((1, 3), "int64"))) self.assertEqual(arr.type, pa.string()) @require_pil def test_exhaustive_cast(self): import PIL.Image pil_image = PIL.Image.fromarray(np.arange(10, dtype=np.uint8).reshape(2, 5)) with patch( "datasets.arrow_writer.cast_to_python_objects", side_effect=cast_to_python_objects ) as mock_cast_to_python_objects: _ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image], type=Image())) args, kwargs = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting", kwargs) self.assertFalse(kwargs["optimize_list_casting"]) def _check_output(output, expected_num_chunks: int): stream = pa.BufferReader(output) if isinstance(output, pa.Buffer) else pa.memory_map(output) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() assert len(pa_table.to_batches()) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [ None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}, {"col_2": pa.int64(), "col_1": pa.string()}, ], ) def test_write(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_with_features(): output = pa.BufferOutputStream() features = Features({"labels": ClassLabel(names=["neg", "pos"])}) with ArrowWriter(stream=output, features=features) as writer: writer.write({"labels": 0}) writer.write({"labels": 1}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata stream = pa.BufferReader(output.getvalue()) f = pa.ipc.open_stream(stream) pa_table: pa.Table = f.read_all() schema = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(schema) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) def test_key_datatype(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(InvalidKeyError): writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2]) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_duplicate_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: with pytest.raises(DuplicatedKeysError): writer.write({"col_1": "foo", "col_2": 1}, key=10) writer.write({"col_1": "bar", "col_2": 2}, key=10) num_examples, num_bytes = writer.finalize() @pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) def test_write_with_keys(writer_batch_size): output = pa.BufferOutputStream() with ArrowWriter( stream=output, writer_batch_size=writer_batch_size, hash_salt="split_name", check_duplicates=True, ) as writer: writer.write({"col_1": "foo", "col_2": 1}, key=1) writer.write({"col_1": "bar", "col_2": 2}, key=2) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_batch(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) writer.write_batch({"col_1": [], "col_2": []}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_table(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) @pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) @pytest.mark.parametrize( "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] ) def test_write_row(fields, writer_batch_size): output = pa.BufferOutputStream() schema = pa.schema(fields) if fields else None with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]})) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]})) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: fields = {"col_1": pa.string(), "col_2": pa.int64()} assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) def test_write_file(): with tempfile.TemporaryDirectory() as tmp_dir: fields = {"col_1": pa.string(), "col_2": pa.int64()} output = os.path.join(tmp_dir, "test.arrow") with ArrowWriter(path=output, schema=pa.schema(fields)) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) _check_output(output, 1) def get_base_dtype(arr_type): if pa.types.is_list(arr_type): return get_base_dtype(arr_type.value_type) else: return arr_type def change_first_primitive_element_in_list(lst, value): if isinstance(lst[0], list): change_first_primitive_element_in_list(lst[0], value) else: lst[0] = value @pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.int64()), (Value("int32"), pa.int32())]) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_int_type_for_typed_sequence(sequence, optimized_int_type, expected_dtype): arr = pa.array(TypedSequence(sequence, optimized_int_type=optimized_int_type)) assert get_base_dtype(arr.type) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype", [ ("attention_mask", pa.int8()), ("special_tokens_mask", pa.int8()), ("token_type_ids", pa.int8()), ("input_ids", pa.int32()), ("other", pa.int64()), ], ) @pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) def test_optimized_typed_sequence(sequence, col, expected_dtype): # in range arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications sequence = copy.deepcopy(sequence) value = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1 change_first_primitive_element_in_list(sequence, value) arr = pa.array(OptimizedTypedSequence(sequence, col=col)) assert get_base_dtype(arr.type) == pa.int64() @pytest.mark.parametrize("raise_exception", [False, True]) def test_arrow_writer_closes_stream(raise_exception, tmp_path): path = str(tmp_path / "dataset-train.arrow") try: with ArrowWriter(path=path) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def test_arrow_writer_with_filesystem(mockfs): path = "mock://dataset-train.arrow" with ArrowWriter(path=path, storage_options=mockfs.storage_options) as writer: assert isinstance(writer._fs, type(mockfs)) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(path) def test_parquet_writer_write(): output = pa.BufferOutputStream() with ParquetWriter(stream=output) as writer: writer.write({"col_1": "foo", "col_2": 1}) writer.write({"col_1": "bar", "col_2": 2}) num_examples, num_bytes = writer.finalize() assert num_examples == 2 assert num_bytes > 0 stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files", [False, True]) def test_writer_embed_local_files(tmp_path, embed_local_files): import PIL.Image image_path = str(tmp_path / "test_image_rgb.jpg") PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uint8)).save(image_path, format="png") output = pa.BufferOutputStream() with ParquetWriter( stream=output, features=Features({"image": Image()}), embed_local_files=embed_local_files ) as writer: writer.write({"image": image_path}) writer.finalize() stream = pa.BufferReader(output.getvalue()) pa_table: pa.Table = pq.read_table(stream) out = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"], str) with open(image_path, "rb") as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def test_always_nullable(): non_nullable_schema = pa.schema([pa.field("col_1", pa.string(), nullable=False)]) output = pa.BufferOutputStream() with ArrowWriter(stream=output) as writer: writer._build_writer(inferred_schema=non_nullable_schema) assert writer._schema == pa.schema([pa.field("col_1", pa.string())])
datasets/tests/test_arrow_writer.py/0
{ "file_path": "datasets/tests/test_arrow_writer.py", "repo_id": "datasets", "token_count": 6236 }
85
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"]) @pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"]) @pytest.mark.parametrize("revision", [None, "v2"]) def test_hf_hub_url(repo_id, filename, revision): url = hf_hub_url(repo_id=repo_id, filename=filename, revision=revision) assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
datasets/tests/test_hub.py/0
{ "file_path": "datasets/tests/test_hub.py", "repo_id": "datasets", "token_count": 219 }
86
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict", [ SplitDict(), SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42, dataset_name="my_dataset")}), SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42)}), SplitDict({"train": SplitInfo()}), ], ) def test_split_dict_to_yaml_list(split_dict: SplitDict): split_dict_yaml_list = split_dict._to_yaml_list() assert len(split_dict_yaml_list) == len(split_dict) reloaded = SplitDict._from_yaml_list(split_dict_yaml_list) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump split_info.dataset_name = None # the split name of split_dict takes over the name of the split info object split_info.name = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info", [SplitInfo(), SplitInfo(dataset_name=None), SplitInfo(dataset_name="my_dataset")] ) def test_split_dict_asdict_has_dataset_name(split_info): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files split_dict_asdict = asdict(SplitDict({"train": split_info})) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
datasets/tests/test_splits.py/0
{ "file_path": "datasets/tests/test_splits.py", "repo_id": "datasets", "token_count": 622 }
87
# Setup [[setup]] After all this information, it's time to get started. We're going to do two things: 1. **Create your Hugging Face account** if it's not already done 2. **Sign up to Discord and introduce yourself** (don't be shy 🤗) ### Let's create my Hugging Face account (If it's not already done) create an account to HF <a href="https://huggingface.co/join">here</a> ### Let's join our Discord server You can now sign up for our Discord Server. This is the place where you **can chat with the community and with us, create and join study groups to grow with each other and more** 👉🏻 Join our discord server <a href="https://discord.gg/ydHrjt3WP5">here.</a> When you join, remember to introduce yourself in #introduce-yourself and sign-up for reinforcement channels in #channels-and-roles. We have multiple RL-related channels: - `rl-announcements`: where we give the latest information about the course. - `rl-discussions`: where you can chat about RL and share information. - `rl-study-group`: where you can create and join study groups. - `rl-i-made-this`: where you can share your projects and models. If this is your first time using Discord, we wrote a Discord 101 to get the best practices. Check the next section. Congratulations! **You've just finished the on-boarding**. You're now ready to start to learn Deep Reinforcement Learning. Have fun! ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit0/setup.mdx/0
{ "file_path": "deep-rl-class/units/en/unit0/setup.mdx", "repo_id": "deep-rl-class", "token_count": 389 }
88
# Conclusion [[conclusion]] Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorials. You’ve just implemented your first RL agent from scratch and shared it on the Hub 🥳. Implementing from scratch when you study a new architecture **is important to understand how it works.** It's **normal if you still feel confused** by all these elements. **This was the same for me and for everyone who studies RL.** Take time to really grasp the material before continuing. In the next chapter, we’re going to dive deeper by studying our first Deep Reinforcement Learning algorithm based on Q-Learning: Deep Q-Learning. And you'll train a **DQN agent with <a href="https://github.com/DLR-RM/rl-baselines3-zoo">RL-Baselines3 Zoo</a> to play Atari Games**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Atari environments"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit2/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 337 }
89
# The Deep Q-Network (DQN) [[deep-q-network]] This is the architecture of our Deep Q-Learning network: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> As input, we take a **stack of 4 frames** passed through the network as a state and output a **vector of Q-values for each possible action at that state**. Then, like with Q-Learning, we just need to use our epsilon-greedy policy to select which action to take. When the Neural Network is initialized, **the Q-value estimation is terrible**. But during training, our Deep Q-Network agent will associate a situation with the appropriate action and **learn to play the game well**. ## Preprocessing the input and temporal limitation [[preprocessing]] We need to **preprocess the input**. It’s an essential step since we want to **reduce the complexity of our state to reduce the computation time needed for training**. To achieve this, we **reduce the state space to 84x84 and grayscale it**. We can do this since the colors in Atari environments don't add important information. This is a big improvement since we **reduce our three color channels (RGB) to 1**. We can also **crop a part of the screen in some games** if it does not contain important information. Then we stack four frames together. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/preprocessing.jpg" alt="Preprocessing"/> **Why do we stack four frames together?** We stack frames together because it helps us **handle the problem of temporal limitation**. Let’s take an example with the game of Pong. When you see this frame: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation.jpg" alt="Temporal Limitation"/> Can you tell me where the ball is going? No, because one frame is not enough to have a sense of motion! But what if I add three more frames? **Here you can see that the ball is going to the right**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation-2.jpg" alt="Temporal Limitation"/> That’s why, to capture temporal information, we stack four frames together. Then the stacked frames are processed by three convolutional layers. These layers **allow us to capture and exploit spatial relationships in images**. But also, because the frames are stacked together, **we can exploit some temporal properties across those frames**. If you don't know what convolutional layers are, don't worry. You can check out [Lesson 4 of this free Deep Learning Course by Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) Finally, we have a couple of fully connected layers that output a Q-value for each possible action at that state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> So, we see that Deep Q-Learning uses a neural network to approximate, given a state, the different Q-values for each possible action at that state. Now let's study the Deep Q-Learning algorithm.
deep-rl-class/units/en/unit3/deep-q-network.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/deep-q-network.mdx", "repo_id": "deep-rl-class", "token_count": 888 }
90
# Bonus: Learn to create your own environments with Unity and MLAgents **You can create your own reinforcement learning environments with Unity and MLAgents**. Using a game engine such as Unity can be intimidating at first, but here are the steps you can take to learn smoothly. ## Step 1: Know how to use Unity - The best way to learn Unity is to do ["Create with Code" course](https://learn.unity.com/course/create-with-code): it's a series of videos for beginners where **you will create 5 small games with Unity**. ## Step 2: Create the simplest environment with this tutorial - Then, when you know how to use Unity, you can create your [first basic RL environment using this tutorial](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Learning-Environment-Create-New.md). ## Step 3: Iterate and create nice environments - Now that you've created your first simple environment you can iterate to more complex ones using the [MLAgents documentation (especially Designing Agents and Agent part)](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/) - In addition, you can take this free course ["Create a hummingbird environment"](https://learn.unity.com/course/ml-agents-hummingbirds) by [Adam Kelly](https://twitter.com/aktwelve) Have fun! And if you create custom environments don't hesitate to share them to the `#rl-i-made-this` discord channel.
deep-rl-class/units/en/unit5/bonus.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/bonus.mdx", "repo_id": "deep-rl-class", "token_count": 360 }
91
# Additional Readings [[additional-readings]] ## An introduction to multi-agents - [Multi-agent reinforcement learning: An overview](https://www.dcsc.tudelft.nl/~bdeschutter/pub/rep/10_003.pdf) - [Multiagent Reinforcement Learning, Marc Lanctot](https://rlss.inria.fr/files/2019/07/RLSS_Multiagent.pdf) - [Example of a multi-agent environment](https://www.mathworks.com/help/reinforcement-learning/ug/train-3-agents-for-area-coverage.html?s_eid=PSM_15028) - [A list of different multi-agent environments](https://agents.inf.ed.ac.uk/blog/multiagent-learning-environments/) - [Multi-Agent Reinforcement Learning: Independent vs. Cooperative Agents](https://bit.ly/3nVK7My) - [Dealing with Non-Stationarity in Multi-Agent Deep Reinforcement Learning](https://bit.ly/3v7LxaT) ## Self-Play and MA-POCA - [Self Play Theory and with MLAgents](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) - [Training complex behavior with MLAgents](https://blog.unity.com/technology/ml-agents-v20-release-now-supports-training-complex-cooperative-behaviors) - [MLAgents plays dodgeball](https://blog.unity.com/technology/ml-agents-plays-dodgeball) - [On the Use and Misuse of Absorbing States in Multi-agent Reinforcement Learning (MA-POCA)](https://arxiv.org/pdf/2111.05992.pdf)
deep-rl-class/units/en/unit7/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 432 }
92
# The intuition behind PPO [[the-intuition-behind-ppo]] The idea with Proximal Policy Optimization (PPO) is that we want to improve the training stability of the policy by limiting the change you make to the policy at each training epoch: **we want to avoid having too large of a policy update.** For two reasons: - We know empirically that smaller policy updates during training are **more likely to converge to an optimal solution.** - A too-big step in a policy update can result in falling “off the cliff” (getting a bad policy) **and taking a long time or even having no possibility to recover.** <figure class="image table text-center m-0 w-full"> <img class="center" src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/cliff.jpg" alt="Policy Update cliff"/> <figcaption>Taking smaller policy updates to improve the training stability</figcaption> <figcaption>Modified version from RL — Proximal Policy Optimization (PPO) <a href="https://jonathan-hui.medium.com/rl-proximal-policy-optimization-ppo-explained-77f014ec3f12">Explained by Jonathan Hui</a></figcaption> </figure> **So with PPO, we update the policy conservatively**. To do so, we need to measure how much the current policy changed compared to the former one using a ratio calculation between the current and former policy. And we clip this ratio in a range \\( [1 - \epsilon, 1 + \epsilon] \\), meaning that we **remove the incentive for the current policy to go too far from the old one (hence the proximal policy term).**
deep-rl-class/units/en/unit8/intuition-behind-ppo.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/intuition-behind-ppo.mdx", "repo_id": "deep-rl-class", "token_count": 426 }
93
# Language models in RL ## LMs encode useful knowledge for agents **Language models** (LMs) can exhibit impressive abilities when manipulating text such as question-answering or even step-by-step reasoning. Additionally, their training on massive text corpora allowed them to **encode various types of knowledge including abstract ones about the physical rules of our world** (for instance what is possible to do with an object, what happens when one rotates an object…). A natural question recently studied was whether such knowledge could benefit agents such as robots when trying to solve everyday tasks. And while these works showed interesting results, the proposed agents lacked any learning method. **This limitation prevents these agent from adapting to the environment (e.g. fixing wrong knowledge) or learning new skills.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language.png" alt="Language"> <figcaption>Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html">Towards Helpful Robots: Grounding Language in Robotic Affordances</a></figcaption> </figure> ## LMs and RL There is therefore a potential synergy between LMs which can bring knowledge about the world, and RL which can align and correct this knowledge by interacting with an environment. It is especially interesting from a RL point-of-view as the RL field mostly relies on the **Tabula-rasa** setup where everything is learned from scratch by the agent leading to: 1) Sample inefficiency 2) Unexpected behaviors from humans’ eyes As a first attempt, the paper [“Grounding Large Language Models with Online Reinforcement Learning”](https://arxiv.org/abs/2302.02662v1) tackled the problem of **adapting or aligning a LM to a textual environment using PPO**. They showed that the knowledge encoded in the LM lead to a fast adaptation to the environment (opening avenues for sample efficient RL agents) but also that such knowledge allowed the LM to better generalize to new tasks once aligned. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/papier_v4.mp4" type="video/mp4" controls /> Another direction studied in [“Guiding Pretraining in Reinforcement Learning with Large Language Models”](https://arxiv.org/abs/2302.06692) was to keep the LM frozen but leverage its knowledge to **guide an RL agent’s exploration**. Such a method allows the RL agent to be guided towards human-meaningful and plausibly useful behaviors without requiring a human in the loop during training. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language2.png" alt="Language"> <figcaption> Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html"> Towards Helpful Robots: Grounding Language in Robotic Affordances</a> </figcaption> </figure> Several limitations make these works still very preliminary such as the need to convert the agent's observation to text before giving it to a LM as well as the compute cost of interacting with very large LMs. ## Further reading For more information we recommend you check out the following resources: - [Google Research, 2022 & beyond: Robotics](https://ai.googleblog.com/2023/02/google-research-2022-beyond-robotics.html) - [Pre-Trained Language Models for Interactive Decision-Making](https://arxiv.org/abs/2202.01771) - [Grounding Large Language Models with Online Reinforcement Learning](https://arxiv.org/abs/2302.02662v1) - [Guiding Pretraining in Reinforcement Learning with Large Language Models](https://arxiv.org/abs/2302.06692) ## Author This section was written by <a href="https://twitter.com/ClementRomac"> Clément Romac </a>
deep-rl-class/units/en/unitbonus3/language-models.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/language-models.mdx", "repo_id": "deep-rl-class", "token_count": 1011 }
94
import argparse import csv import gc import os from dataclasses import dataclass from typing import Dict, List, Union import torch import torch.utils.benchmark as benchmark GITHUB_SHA = os.getenv("GITHUB_SHA", None) BENCHMARK_FIELDS = [ "pipeline_cls", "ckpt_id", "batch_size", "num_inference_steps", "model_cpu_offload", "run_compile", "time (secs)", "memory (gbs)", "actual_gpu_memory (gbs)", "github_sha", ] PROMPT = "ghibli style, a fantasy landscape with castles" BASE_PATH = os.getenv("BASE_PATH", ".") TOTAL_GPU_MEMORY = float(os.getenv("TOTAL_GPU_MEMORY", torch.cuda.get_device_properties(0).total_memory / (1024**3))) REPO_ID = "diffusers/benchmarks" FINAL_CSV_FILE = "collated_results.csv" @dataclass class BenchmarkInfo: time: float memory: float def flush(): """Wipes off memory.""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() def bytes_to_giga_bytes(bytes): return f"{(bytes / 1024 / 1024 / 1024):.3f}" def benchmark_fn(f, *args, **kwargs): t0 = benchmark.Timer( stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}, num_threads=torch.get_num_threads(), ) return f"{(t0.blocked_autorange().mean):.3f}" def generate_csv_dict( pipeline_cls: str, ckpt: str, args: argparse.Namespace, benchmark_info: BenchmarkInfo ) -> Dict[str, Union[str, bool, float]]: """Packs benchmarking data into a dictionary for latter serialization.""" data_dict = { "pipeline_cls": pipeline_cls, "ckpt_id": ckpt, "batch_size": args.batch_size, "num_inference_steps": args.num_inference_steps, "model_cpu_offload": args.model_cpu_offload, "run_compile": args.run_compile, "time (secs)": benchmark_info.time, "memory (gbs)": benchmark_info.memory, "actual_gpu_memory (gbs)": f"{(TOTAL_GPU_MEMORY):.3f}", "github_sha": GITHUB_SHA, } return data_dict def write_to_csv(file_name: str, data_dict: Dict[str, Union[str, bool, float]]): """Serializes a dictionary into a CSV file.""" with open(file_name, mode="w", newline="") as csvfile: writer = csv.DictWriter(csvfile, fieldnames=BENCHMARK_FIELDS) writer.writeheader() writer.writerow(data_dict) def collate_csv(input_files: List[str], output_file: str): """Collates multiple identically structured CSVs into a single CSV file.""" with open(output_file, mode="w", newline="") as outfile: writer = csv.DictWriter(outfile, fieldnames=BENCHMARK_FIELDS) writer.writeheader() for file in input_files: with open(file, mode="r") as infile: reader = csv.DictReader(infile) for row in reader: writer.writerow(row)
diffusers/benchmarks/utils.py/0
{ "file_path": "diffusers/benchmarks/utils.py", "repo_id": "diffusers", "token_count": 1254 }
95