index
int64
0
0
repo_id
stringclasses
179 values
file_path
stringlengths
26
186
content
stringlengths
1
2.1M
__index_level_0__
int64
0
9
0
hf_public_repos/autotrain-advanced/src/autotrain/trainers
hf_public_repos/autotrain-advanced/src/autotrain/trainers/text_classification/params.py
from typing import Optional from pydantic import Field from autotrain.trainers.common import AutoTrainParams class TextClassificationParams(AutoTrainParams): """ [`TextClassificationParams`] is a configuration class for text classification training parameters. Attributes: data_path (str): Path to the dataset. model (str): Name of the model to use. Default is "bert-base-uncased". lr (float): Learning rate. Default is 5e-5. epochs (int): Number of training epochs. Default is 3. max_seq_length (int): Maximum sequence length. Default is 128. batch_size (int): Training batch size. Default is 8. warmup_ratio (float): Warmup proportion. Default is 0.1. gradient_accumulation (int): Number of gradient accumulation steps. Default is 1. optimizer (str): Optimizer to use. Default is "adamw_torch". scheduler (str): Scheduler to use. Default is "linear". weight_decay (float): Weight decay. Default is 0.0. max_grad_norm (float): Maximum gradient norm. Default is 1.0. seed (int): Random seed. Default is 42. train_split (str): Name of the training split. Default is "train". valid_split (Optional[str]): Name of the validation split. Default is None. text_column (str): Name of the text column in the dataset. Default is "text". target_column (str): Name of the target column in the dataset. Default is "target". logging_steps (int): Number of steps between logging. Default is -1. project_name (str): Name of the project. Default is "project-name". auto_find_batch_size (bool): Whether to automatically find the batch size. Default is False. mixed_precision (Optional[str]): Mixed precision setting (fp16, bf16, or None). Default is None. save_total_limit (int): Total number of checkpoints to save. Default is 1. token (Optional[str]): Hub token for authentication. Default is None. push_to_hub (bool): Whether to push the model to the hub. Default is False. eval_strategy (str): Evaluation strategy. Default is "epoch". username (Optional[str]): Hugging Face username. Default is None. log (str): Logging method for experiment tracking. Default is "none". early_stopping_patience (int): Number of epochs with no improvement after which training will be stopped. Default is 5. early_stopping_threshold (float): Threshold for measuring the new optimum to continue training. Default is 0.01. """ data_path: str = Field(None, title="Data path") model: str = Field("bert-base-uncased", title="Model name") lr: float = Field(5e-5, title="Learning rate") epochs: int = Field(3, title="Number of training epochs") max_seq_length: int = Field(128, title="Max sequence length") batch_size: int = Field(8, title="Training batch size") warmup_ratio: float = Field(0.1, title="Warmup proportion") gradient_accumulation: int = Field(1, title="Gradient accumulation steps") optimizer: str = Field("adamw_torch", title="Optimizer") scheduler: str = Field("linear", title="Scheduler") weight_decay: float = Field(0.0, title="Weight decay") max_grad_norm: float = Field(1.0, title="Max gradient norm") seed: int = Field(42, title="Seed") train_split: str = Field("train", title="Train split") valid_split: Optional[str] = Field(None, title="Validation split") text_column: str = Field("text", title="Text column") target_column: str = Field("target", title="Target column") logging_steps: int = Field(-1, title="Logging steps") project_name: str = Field("project-name", title="Output directory") auto_find_batch_size: bool = Field(False, title="Auto find batch size") mixed_precision: Optional[str] = Field(None, title="fp16, bf16, or None") save_total_limit: int = Field(1, title="Save total limit") token: Optional[str] = Field(None, title="Hub Token") push_to_hub: bool = Field(False, title="Push to hub") eval_strategy: str = Field("epoch", title="Evaluation strategy") username: Optional[str] = Field(None, title="Hugging Face Username") log: str = Field("none", title="Logging using experiment tracking") early_stopping_patience: int = Field(5, title="Early stopping patience") early_stopping_threshold: float = Field(0.01, title="Early stopping threshold")
0
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/preprocessor/text.py
import ast from dataclasses import dataclass from typing import Optional import pandas as pd from datasets import ClassLabel, Dataset, DatasetDict, Sequence from sklearn.model_selection import train_test_split from autotrain import logger RESERVED_COLUMNS = ["autotrain_text", "autotrain_label", "autotrain_question", "autotrain_answer"] LLM_RESERVED_COLUMNS = [ "autotrain_prompt", "autotrain_context", "autotrain_rejected_text", "autotrain_prompt_start", ] @dataclass class TextBinaryClassificationPreprocessor: """ A preprocessor class for binary text classification tasks. Attributes: train_data (pd.DataFrame): The training data. text_column (str): The name of the column containing text data. label_column (str): The name of the column containing label data. username (str): The username for the Hugging Face Hub. project_name (str): The project name for saving datasets. token (str): The authentication token for the Hugging Face Hub. valid_data (Optional[pd.DataFrame]): The validation data. Defaults to None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Defaults to 0.2. seed (Optional[int]): The random seed for splitting the data. Defaults to 42. convert_to_class_label (Optional[bool]): Whether to convert labels to class labels. Defaults to False. local (Optional[bool]): Whether to save the dataset locally. Defaults to False. Methods: __post_init__(): Validates the presence of required columns in the dataframes and checks for reserved column names. split(): Splits the training data into training and validation sets if validation data is not provided. prepare_columns(train_df, valid_df): Prepares the columns for training and validation dataframes. prepare(): Prepares the datasets for training and validation, converts labels if required, and saves or uploads the datasets. """ train_data: pd.DataFrame text_column: str label_column: str username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 convert_to_class_label: Optional[bool] = False local: Optional[bool] = False def __post_init__(self): # check if text_column and label_column are in train_data if self.text_column not in self.train_data.columns: raise ValueError(f"{self.text_column} not in train data") if self.label_column not in self.train_data.columns: raise ValueError(f"{self.label_column} not in train data") # check if text_column and label_column are in valid_data if self.valid_data is not None: if self.text_column not in self.valid_data.columns: raise ValueError(f"{self.text_column} not in valid data") if self.label_column not in self.valid_data.columns: raise ValueError(f"{self.label_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column], ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_text"] = train_df[self.text_column] train_df.loc[:, "autotrain_label"] = train_df[self.label_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] valid_df.loc[:, "autotrain_label"] = valid_df[self.label_column] # drop text_column and label_column train_df = train_df.drop(columns=[self.text_column, self.label_column]) valid_df = valid_df.drop(columns=[self.text_column, self.label_column]) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df.loc[:, "autotrain_label"] = train_df["autotrain_label"].astype(str) valid_df.loc[:, "autotrain_label"] = valid_df["autotrain_label"].astype(str) label_names = sorted(set(train_df["autotrain_label"].unique().tolist())) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.convert_to_class_label: train_df = train_df.cast_column("autotrain_label", ClassLabel(names=label_names)) valid_df = valid_df.cast_column("autotrain_label", ClassLabel(names=label_names)) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TextMultiClassClassificationPreprocessor(TextBinaryClassificationPreprocessor): """ TextMultiClassClassificationPreprocessor is a class for preprocessing text data for multi-class classification tasks. This class inherits from TextBinaryClassificationPreprocessor and is designed to handle scenarios where the text data needs to be classified into more than two categories. Methods: Inherits all methods from TextBinaryClassificationPreprocessor. Attributes: Inherits all attributes from TextBinaryClassificationPreprocessor. """ pass class TextSingleColumnRegressionPreprocessor(TextBinaryClassificationPreprocessor): """ A preprocessor class for single-column regression tasks, inheriting from TextBinaryClassificationPreprocessor. Methods ------- split(): Splits the training data into training and validation sets. If validation data is already provided, it returns the training and validation data as is. Otherwise, it performs a train-test split on the training data. prepare(): Prepares the training and validation datasets by splitting the data, preparing the columns, and converting them to Hugging Face Datasets. The datasets are then either saved locally or pushed to the Hugging Face Hub, depending on the `local` attribute. """ def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TextTokenClassificationPreprocessor(TextBinaryClassificationPreprocessor): """ A preprocessor class for text token classification tasks, inheriting from TextBinaryClassificationPreprocessor. Methods ------- split(): Splits the training data into training and validation sets. If validation data is already provided, it returns the training and validation data as is. Otherwise, it splits the training data based on the test size and seed. prepare(): Prepares the training and validation data for token classification. This includes splitting the data, preparing columns, evaluating text and label columns, and converting them to datasets. The datasets are then either saved locally or pushed to the Hugging Face Hub based on the configuration. """ def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) try: train_df.loc[:, "autotrain_text"] = train_df["autotrain_text"].apply(lambda x: ast.literal_eval(x)) valid_df.loc[:, "autotrain_text"] = valid_df["autotrain_text"].apply(lambda x: ast.literal_eval(x)) except ValueError: logger.warning("Unable to do ast.literal_eval on train_df['autotrain_text']") logger.warning("assuming autotrain_text is already a list") try: train_df.loc[:, "autotrain_label"] = train_df["autotrain_label"].apply(lambda x: ast.literal_eval(x)) valid_df.loc[:, "autotrain_label"] = valid_df["autotrain_label"].apply(lambda x: ast.literal_eval(x)) except ValueError: logger.warning("Unable to do ast.literal_eval on train_df['autotrain_label']") logger.warning("assuming autotrain_label is already a list") label_names_train = sorted(set(train_df["autotrain_label"].explode().unique().tolist())) label_names_valid = sorted(set(valid_df["autotrain_label"].explode().unique().tolist())) label_names = sorted(set(label_names_train + label_names_valid)) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.convert_to_class_label: train_df = train_df.cast_column("autotrain_label", Sequence(ClassLabel(names=label_names))) valid_df = valid_df.cast_column("autotrain_label", Sequence(ClassLabel(names=label_names))) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class LLMPreprocessor: """ A class used to preprocess data for large language model (LLM) training. Attributes ---------- train_data : pd.DataFrame The training data. username : str The username for the Hugging Face Hub. project_name : str The name of the project. token : str The token for authentication. valid_data : Optional[pd.DataFrame], optional The validation data, by default None. test_size : Optional[float], optional The size of the test split, by default 0.2. seed : Optional[int], optional The random seed, by default 42. text_column : Optional[str], optional The name of the text column, by default None. prompt_column : Optional[str], optional The name of the prompt column, by default None. rejected_text_column : Optional[str], optional The name of the rejected text column, by default None. local : Optional[bool], optional Whether to save the dataset locally, by default False. Methods ------- __post_init__() Validates the provided columns and checks for reserved column names. split() Splits the data into training and validation sets. prepare_columns(train_df, valid_df) Prepares the columns for training and validation datasets. prepare() Prepares the datasets and pushes them to the Hugging Face Hub or saves them locally. """ train_data: pd.DataFrame username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 text_column: Optional[str] = None prompt_column: Optional[str] = None rejected_text_column: Optional[str] = None local: Optional[bool] = False def __post_init__(self): if self.text_column is None: raise ValueError("text_column must be provided") # check if text_column and rejected_text_column are in train_data if self.prompt_column is not None and self.prompt_column not in self.train_data.columns: self.prompt_column = None if self.rejected_text_column is not None and self.rejected_text_column not in self.train_data.columns: self.rejected_text_column = None # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS + LLM_RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data # no validation is done in llm training if validation data is not provided return self.train_data, self.train_data # else: # train_df, valid_df = train_test_split( # self.train_data, # test_size=self.test_size, # random_state=self.seed, # ) # train_df = train_df.reset_index(drop=True) # valid_df = valid_df.reset_index(drop=True) # return train_df, valid_df def prepare_columns(self, train_df, valid_df): drop_cols = [self.text_column] train_df.loc[:, "autotrain_text"] = train_df[self.text_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] if self.prompt_column is not None: drop_cols.append(self.prompt_column) train_df.loc[:, "autotrain_prompt"] = train_df[self.prompt_column] valid_df.loc[:, "autotrain_prompt"] = valid_df[self.prompt_column] if self.rejected_text_column is not None: drop_cols.append(self.rejected_text_column) train_df.loc[:, "autotrain_rejected_text"] = train_df[self.rejected_text_column] valid_df.loc[:, "autotrain_rejected_text"] = valid_df[self.rejected_text_column] # drop drop_cols train_df = train_df.drop(columns=drop_cols) valid_df = valid_df.drop(columns=drop_cols) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class Seq2SeqPreprocessor: """ Seq2SeqPreprocessor is a class for preprocessing sequence-to-sequence training data. Attributes: train_data (pd.DataFrame): The training data. text_column (str): The name of the column containing the input text. label_column (str): The name of the column containing the labels. username (str): The username for pushing data to the hub. project_name (str): The name of the project. token (str): The token for authentication. valid_data (Optional[pd.DataFrame]): The validation data. Default is None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Default is 0.2. seed (Optional[int]): The random seed for splitting the data. Default is 42. local (Optional[bool]): Whether to save the dataset locally or push to the hub. Default is False. Methods: __post_init__(): Validates the presence of required columns in the training and validation data. split(): Splits the training data into training and validation sets if validation data is not provided. prepare_columns(train_df, valid_df): Prepares the columns for training and validation data. prepare(): Prepares the dataset for training by splitting, preparing columns, and converting to Dataset objects. """ train_data: pd.DataFrame text_column: str label_column: str username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def __post_init__(self): # check if text_column and label_column are in train_data if self.text_column not in self.train_data.columns: raise ValueError(f"{self.text_column} not in train data") if self.label_column not in self.train_data.columns: raise ValueError(f"{self.label_column} not in train data") # check if text_column and label_column are in valid_data if self.valid_data is not None: if self.text_column not in self.valid_data.columns: raise ValueError(f"{self.text_column} not in valid data") if self.label_column not in self.valid_data.columns: raise ValueError(f"{self.label_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_text"] = train_df[self.text_column] train_df.loc[:, "autotrain_label"] = train_df[self.label_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] valid_df.loc[:, "autotrain_label"] = valid_df[self.label_column] # drop text_column and label_column train_df = train_df.drop(columns=[self.text_column, self.label_column]) valid_df = valid_df.drop(columns=[self.text_column, self.label_column]) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class SentenceTransformersPreprocessor: """ A preprocessor class for preparing datasets for sentence transformers. Attributes: train_data (pd.DataFrame): The training data. username (str): The username for the Hugging Face Hub. project_name (str): The project name for the Hugging Face Hub. token (str): The token for authentication with the Hugging Face Hub. valid_data (Optional[pd.DataFrame]): The validation data. Default is None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Default is 0.2. seed (Optional[int]): The random seed for splitting the data. Default is 42. local (Optional[bool]): Whether to save the dataset locally or push to the Hugging Face Hub. Default is False. sentence1_column (Optional[str]): The name of the first sentence column. Default is "sentence1". sentence2_column (Optional[str]): The name of the second sentence column. Default is "sentence2". sentence3_column (Optional[str]): The name of the third sentence column. Default is "sentence3". target_column (Optional[str]): The name of the target column. Default is "target". convert_to_class_label (Optional[bool]): Whether to convert the target column to class labels. Default is False. Methods: __post_init__(): Ensures no reserved columns are in train_data or valid_data. split(): Splits the train_data into training and validation sets if valid_data is not provided. prepare_columns(train_df, valid_df): Prepares the columns for training and validation datasets. prepare(): Prepares the datasets and either saves them locally or pushes them to the Hugging Face Hub. """ train_data: pd.DataFrame username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False sentence1_column: Optional[str] = "sentence1" sentence2_column: Optional[str] = "sentence2" sentence3_column: Optional[str] = "sentence3" target_column: Optional[str] = "target" convert_to_class_label: Optional[bool] = False def __post_init__(self): # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS + LLM_RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_sentence1"] = train_df[self.sentence1_column] train_df.loc[:, "autotrain_sentence2"] = train_df[self.sentence2_column] valid_df.loc[:, "autotrain_sentence1"] = valid_df[self.sentence1_column] valid_df.loc[:, "autotrain_sentence2"] = valid_df[self.sentence2_column] keep_cols = ["autotrain_sentence1", "autotrain_sentence2"] if self.sentence3_column is not None: train_df.loc[:, "autotrain_sentence3"] = train_df[self.sentence3_column] valid_df.loc[:, "autotrain_sentence3"] = valid_df[self.sentence3_column] keep_cols.append("autotrain_sentence3") if self.target_column is not None: train_df.loc[:, "autotrain_target"] = train_df[self.target_column] valid_df.loc[:, "autotrain_target"] = valid_df[self.target_column] keep_cols.append("autotrain_target") train_df = train_df[keep_cols] valid_df = valid_df[keep_cols] return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) if self.convert_to_class_label: label_names = sorted(set(train_df["autotrain_target"].unique().tolist())) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.convert_to_class_label: train_df = train_df.cast_column("autotrain_target", ClassLabel(names=label_names)) valid_df = valid_df.cast_column("autotrain_target", ClassLabel(names=label_names)) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class TextExtractiveQuestionAnsweringPreprocessor: """ Preprocessor for text extractive question answering tasks. Attributes: train_data (pd.DataFrame): The training data. text_column (str): The name of the text column in the data. question_column (str): The name of the question column in the data. answer_column (str): The name of the answer column in the data. username (str): The username for the Hugging Face Hub. project_name (str): The project name for the Hugging Face Hub. token (str): The token for authentication with the Hugging Face Hub. valid_data (Optional[pd.DataFrame]): The validation data. Default is None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Default is 0.2. seed (Optional[int]): The random seed for splitting the data. Default is 42. local (Optional[bool]): Whether to save the dataset locally or push to the Hugging Face Hub. Default is False. Methods: __post_init__(): Validates the columns in the training and validation data and converts the answer column to a dictionary. split(): Splits the training data into training and validation sets if validation data is not provided. prepare_columns(train_df, valid_df): Prepares the columns for training and validation data. prepare(): Prepares the dataset for training by splitting, preparing columns, and converting to Hugging Face Dataset format. """ train_data: pd.DataFrame text_column: str question_column: str answer_column: str username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def __post_init__(self): # check if text_column, question_column, and answer_column are in train_data if self.text_column not in self.train_data.columns: raise ValueError(f"{self.text_column} not in train data") if self.question_column not in self.train_data.columns: raise ValueError(f"{self.question_column} not in train data") if self.answer_column not in self.train_data.columns: raise ValueError(f"{self.answer_column} not in train data") # check if text_column, question_column, and answer_column are in valid_data if self.valid_data is not None: if self.text_column not in self.valid_data.columns: raise ValueError(f"{self.text_column} not in valid data") if self.question_column not in self.valid_data.columns: raise ValueError(f"{self.question_column} not in valid data") if self.answer_column not in self.valid_data.columns: raise ValueError(f"{self.answer_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") # convert answer_column to dict try: self.train_data.loc[:, self.answer_column] = self.train_data[self.answer_column].apply( lambda x: ast.literal_eval(x) ) except ValueError: logger.warning("Unable to do ast.literal_eval on train_data[answer_column]") logger.warning("assuming answer_column is already a dict") if self.valid_data is not None: try: self.valid_data.loc[:, self.answer_column] = self.valid_data[self.answer_column].apply( lambda x: ast.literal_eval(x) ) except ValueError: logger.warning("Unable to do ast.literal_eval on valid_data[answer_column]") logger.warning("assuming answer_column is already a dict") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_text"] = train_df[self.text_column] train_df.loc[:, "autotrain_question"] = train_df[self.question_column] train_df.loc[:, "autotrain_answer"] = train_df[self.answer_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] valid_df.loc[:, "autotrain_question"] = valid_df[self.question_column] valid_df.loc[:, "autotrain_answer"] = valid_df[self.answer_column] # drop all other columns train_df = train_df.drop( columns=[ x for x in train_df.columns if x not in ["autotrain_text", "autotrain_question", "autotrain_answer"] ] ) valid_df = valid_df.drop( columns=[ x for x in valid_df.columns if x not in ["autotrain_text", "autotrain_question", "autotrain_answer"] ] ) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}"
1
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/preprocessor/tabular.py
from dataclasses import dataclass from typing import List, Optional import pandas as pd from datasets import Dataset, DatasetDict from sklearn.model_selection import train_test_split RESERVED_COLUMNS = ["autotrain_id", "autotrain_label"] @dataclass class TabularBinaryClassificationPreprocessor: """ A preprocessor class for tabular binary classification tasks. Attributes: train_data (pd.DataFrame): The training data. label_column (str): The name of the label column in the training data. username (str): The username for the Hugging Face Hub. project_name (str): The name of the project. token (str): The authentication token for the Hugging Face Hub. id_column (Optional[str]): The name of the ID column in the training data. Default is None. valid_data (Optional[pd.DataFrame]): The validation data. Default is None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Default is 0.2. seed (Optional[int]): The random seed for splitting the data. Default is 42. local (Optional[bool]): Whether to save the dataset locally or push to the Hugging Face Hub. Default is False. Methods: __post_init__(): Validates the presence of required columns in the training and validation data. split(): Splits the training data into training and validation sets if validation data is not provided. prepare_columns(train_df, valid_df): Prepares the columns by adding 'autotrain_id' and 'autotrain_label', and drops the original ID and label columns. prepare(): Prepares the dataset by splitting, processing columns, and saving or pushing the dataset to the Hugging Face Hub. """ train_data: pd.DataFrame label_column: str username: str project_name: str token: str id_column: Optional[str] = None valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def __post_init__(self): # check if id_column and label_column are in train_data if self.id_column is not None: if self.id_column not in self.train_data.columns: raise ValueError(f"{self.id_column} not in train data") if self.label_column not in self.train_data.columns: raise ValueError(f"{self.label_column} not in train data") # check if id_column and label_column are in valid_data if self.valid_data is not None: if self.id_column is not None: if self.id_column not in self.valid_data.columns: raise ValueError(f"{self.id_column} not in valid data") if self.label_column not in self.valid_data.columns: raise ValueError(f"{self.label_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column], ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_id"] = train_df[self.id_column] if self.id_column else list(range(len(train_df))) train_df.loc[:, "autotrain_label"] = train_df[self.label_column] valid_df.loc[:, "autotrain_id"] = valid_df[self.id_column] if self.id_column else list(range(len(valid_df))) valid_df.loc[:, "autotrain_label"] = valid_df[self.label_column] # drop id_column and label_column drop_cols = [self.id_column, self.label_column] if self.id_column else [self.label_column] train_df = train_df.drop(columns=drop_cols) valid_df = valid_df.drop(columns=drop_cols) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TabularMultiClassClassificationPreprocessor(TabularBinaryClassificationPreprocessor): pass class TabularSingleColumnRegressionPreprocessor(TabularBinaryClassificationPreprocessor): def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df @dataclass class TabularMultiLabelClassificationPreprocessor: """ TabularMultiLabelClassificationPreprocessor is a class for preprocessing tabular data for multi-label classification tasks. Attributes: train_data (pd.DataFrame): The training data. label_column (List[str]): List of columns to be used as labels. username (str): The username for the Hugging Face Hub. project_name (str): The project name for the Hugging Face Hub. id_column (Optional[str]): The column to be used as an identifier. Defaults to None. valid_data (Optional[pd.DataFrame]): The validation data. Defaults to None. test_size (Optional[float]): The proportion of the dataset to include in the validation split. Defaults to 0.2. seed (Optional[int]): The random seed for splitting the data. Defaults to 42. token (Optional[str]): The token for authentication with the Hugging Face Hub. Defaults to None. local (Optional[bool]): Whether to save the dataset locally or push to the Hugging Face Hub. Defaults to False. Methods: __post_init__(): Validates the presence of id_column and label_column in train_data and valid_data, and checks for reserved column names. split(): Splits the train_data into training and validation sets if valid_data is not provided. prepare_columns(train_df, valid_df): Prepares the columns by adding autotrain_id and autotrain_label columns, and drops the original id_column and label_column. prepare(): Prepares the dataset by splitting the data, preparing the columns, and converting to Hugging Face Dataset format. Saves the dataset locally or pushes to the Hugging Face Hub. """ train_data: pd.DataFrame label_column: List[str] username: str project_name: str id_column: Optional[str] = None valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 token: Optional[str] = None local: Optional[bool] = False def __post_init__(self): # check if id_column and label_column are in train_data if self.id_column is not None: if self.id_column not in self.train_data.columns: raise ValueError(f"{self.id_column} not in train data") for label in self.label_column: if label not in self.train_data.columns: raise ValueError(f"{label} not in train data") # check if id_column and label_column are in valid_data if self.valid_data is not None: if self.id_column is not None: if self.id_column not in self.valid_data.columns: raise ValueError(f"{self.id_column} not in valid data") for label in self.label_column: if label not in self.valid_data.columns: raise ValueError(f"{label} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column], ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_id"] = train_df[self.id_column] if self.id_column else list(range(len(train_df))) for label in range(len(self.label_column)): train_df.loc[:, f"autotrain_label_{label}"] = train_df[self.label_column[label]] valid_df.loc[:, "autotrain_id"] = valid_df[self.id_column] if self.id_column else list(range(len(valid_df))) for label in range(len(self.label_column)): valid_df.loc[:, f"autotrain_label_{label}"] = valid_df[self.label_column[label]] # drop id_column and label_column drop_cols = [self.id_column] + self.label_column if self.id_column else self.label_column train_df = train_df.drop(columns=drop_cols) valid_df = valid_df.drop(columns=drop_cols) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TabularMultiColumnRegressionPreprocessor(TabularMultiLabelClassificationPreprocessor): pass
2
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/preprocessor/vlm.py
import os import shutil import uuid from dataclasses import dataclass from typing import Optional import pandas as pd from datasets import Features, Image, Value, load_dataset from sklearn.model_selection import train_test_split ALLOWED_EXTENSIONS = ("jpeg", "png", "jpg", "JPG", "JPEG", "PNG") @dataclass class VLMPreprocessor: """ VLMPreprocessor is a class for preprocessing visual language model (VLM) datasets. It handles tasks such as validating data paths, ensuring the presence of required files, splitting datasets, and preparing data for training and validation. Attributes: train_data (str): Path to the training data directory. username (str): Username for the Hugging Face Hub. project_name (str): Name of the project. token (str): Authentication token for the Hugging Face Hub. column_mapping (dict): Mapping of column names. valid_data (Optional[str]): Path to the validation data directory. Default is None. test_size (Optional[float]): Proportion of the dataset to include in the validation split. Default is 0.2. seed (Optional[int]): Random seed for dataset splitting. Default is 42. local (Optional[bool]): Flag indicating whether to save data locally or push to the Hugging Face Hub. Default is False. Methods: _process_metadata(data_path): Processes the metadata.jsonl file in the given data path and ensures it contains the required columns. __post_init__(): Validates the existence of training and validation data paths, checks for required files, and ensures the presence of a minimum number of image files. split(df): Splits the given DataFrame into training and validation sets based on the specified test size and seed. prepare(): Prepares the dataset for training and validation by copying data to a cache directory, processing metadata, and either saving the dataset locally or pushing it to the Hugging Face Hub. """ train_data: str username: str project_name: str token: str column_mapping: dict valid_data: Optional[str] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def _process_metadata(self, data_path): metadata = pd.read_json(os.path.join(data_path, "metadata.jsonl"), lines=True) # make sure that the metadata.jsonl file contains the required columns: file_name, objects if "file_name" not in metadata.columns: raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' column.") col_names = list(self.column_mapping.values()) for col in col_names: if col not in metadata.columns: raise ValueError(f"{data_path}/metadata.jsonl should contain '{col}' column.") return metadata def __post_init__(self): # Check if train data path exists if not os.path.exists(self.train_data): raise ValueError(f"{self.train_data} does not exist.") # check if self.train_data contains at least 5 image files in jpeg, png or jpg format only train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(train_image_files) < 5: raise ValueError(f"{self.train_data} should contain at least 5 jpeg, png or jpg files.") # check if self.train_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.train_data): raise ValueError(f"{self.train_data} should contain a metadata.jsonl file.") # Check if valid data path exists if self.valid_data: if not os.path.exists(self.valid_data): raise ValueError(f"{self.valid_data} does not exist.") # check if self.valid_data contains at least 5 image files in jpeg, png or jpg format only valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(valid_image_files) < 5: raise ValueError(f"{self.valid_data} should contain at least 5 jpeg, png or jpg files.") # check if self.valid_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.valid_data): raise ValueError(f"{self.valid_data} should contain a metadata.jsonl file.") def split(self, df): train_df, valid_df = train_test_split( df, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): random_uuid = uuid.uuid4() cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") data_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) if self.valid_data: shutil.copytree(self.train_data, os.path.join(data_dir, "train")) shutil.copytree(self.valid_data, os.path.join(data_dir, "validation")) train_metadata = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) features = Features( { "image": Image(), } ) for _, col_map in self.column_mapping.items(): features[col_map] = Value(dtype="string") dataset = load_dataset("imagefolder", data_dir=data_dir, features=features) rename_dict = { "image": "autotrain_image", } for col, col_map in self.column_mapping.items(): if col == "text_column": rename_dict[col_map] = "autotrain_text" elif col == "prompt_text_column": rename_dict[col_map] = "autotrain_prompt" dataset = dataset.rename_columns(rename_dict) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) else: metadata = pd.read_json(os.path.join(self.train_data, "metadata.jsonl"), lines=True) train_df, valid_df = self.split(metadata) # create train and validation folders os.makedirs(os.path.join(data_dir, "train"), exist_ok=True) os.makedirs(os.path.join(data_dir, "validation"), exist_ok=True) # move images to train and validation folders for row in train_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "train", row[1]["file_name"]), ) for row in valid_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "validation", row[1]["file_name"]), ) # save metadata.jsonl file to train and validation folders train_df.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_df.to_json(os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True) train_metadata = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) features = Features( { "image": Image(), } ) for _, col_map in self.column_mapping.items(): features[col_map] = Value(dtype="string") dataset = load_dataset("imagefolder", data_dir=data_dir, features=features) rename_dict = { "image": "autotrain_image", } for col, col_map in self.column_mapping.items(): if col == "text_column": rename_dict[col_map] = "autotrain_text" elif col == "prompt_text_column": rename_dict[col_map] = "autotrain_prompt" dataset = dataset.rename_columns(rename_dict) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}"
3
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/preprocessor/vision.py
import os import shutil import uuid from dataclasses import dataclass from typing import Optional import pandas as pd from datasets import ClassLabel, Features, Image, Sequence, Value, load_dataset from sklearn.model_selection import train_test_split ALLOWED_EXTENSIONS = ("jpeg", "png", "jpg", "JPG", "JPEG", "PNG") @dataclass class ImageClassificationPreprocessor: """ A class used to preprocess image data for classification tasks. Attributes ---------- train_data : str Path to the training data directory. username : str Username for the Hugging Face Hub. project_name : str Name of the project. token : str Authentication token for the Hugging Face Hub. valid_data : Optional[str], optional Path to the validation data directory, by default None. test_size : Optional[float], optional Proportion of the dataset to include in the validation split, by default 0.2. seed : Optional[int], optional Random seed for reproducibility, by default 42. local : Optional[bool], optional Whether to save the dataset locally or push to the Hugging Face Hub, by default False. Methods ------- __post_init__(): Validates the structure and contents of the training and validation data directories. split(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]: Splits the dataframe into training and validation sets. prepare() -> str: Prepares the dataset for training and either saves it locally or pushes it to the Hugging Face Hub. """ train_data: str username: str project_name: str token: str valid_data: Optional[str] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def __post_init__(self): # Check if train data path exists if not os.path.exists(self.train_data): raise ValueError(f"{self.train_data} does not exist.") # Check if train data path contains at least 2 folders subfolders = [f.path for f in os.scandir(self.train_data) if f.is_dir()] # list subfolders if len(subfolders) < 2: raise ValueError(f"{self.train_data} should contain at least 2 subfolders.") # Check if each subfolder contains at least 5 image files in jpeg, png or jpg format only for subfolder in subfolders: image_files = [f for f in os.listdir(subfolder) if f.endswith(ALLOWED_EXTENSIONS)] if len(image_files) < 5: raise ValueError(f"{subfolder} should contain at least 5 jpeg, png or jpg files.") # Check if there are no other files except image files in the subfolder if len(image_files) != len(os.listdir(subfolder)): raise ValueError(f"{subfolder} should not contain any other files except image files.") # Check if there are no subfolders inside subfolders subfolders_in_subfolder = [f.path for f in os.scandir(subfolder) if f.is_dir()] if len(subfolders_in_subfolder) > 0: raise ValueError(f"{subfolder} should not contain any subfolders.") if self.valid_data: # Check if valid data path exists if not os.path.exists(self.valid_data): raise ValueError(f"{self.valid_data} does not exist.") # Check if valid data path contains at least 2 folders subfolders = [f.path for f in os.scandir(self.valid_data) if f.is_dir()] # make sure that the subfolders in train and valid data are the same train_subfolders = set(os.path.basename(f.path) for f in os.scandir(self.train_data) if f.is_dir()) valid_subfolders = set(os.path.basename(f.path) for f in os.scandir(self.valid_data) if f.is_dir()) if train_subfolders != valid_subfolders: raise ValueError(f"{self.valid_data} should have the same subfolders as {self.train_data}.") if len(subfolders) < 2: raise ValueError(f"{self.valid_data} should contain at least 2 subfolders.") # Check if each subfolder contains at least 5 image files in jpeg, png or jpg format only for subfolder in subfolders: image_files = [f for f in os.listdir(subfolder) if f.endswith(ALLOWED_EXTENSIONS)] if len(image_files) < 5: raise ValueError(f"{subfolder} should contain at least 5 jpeg, png or jpg files.") # Check if there are no other files except image files in the subfolder if len(image_files) != len(os.listdir(subfolder)): raise ValueError(f"{subfolder} should not contain any other files except image files.") # Check if there are no subfolders inside subfolders subfolders_in_subfolder = [f.path for f in os.scandir(subfolder) if f.is_dir()] if len(subfolders_in_subfolder) > 0: raise ValueError(f"{subfolder} should not contain any subfolders.") def split(self, df): train_df, valid_df = train_test_split( df, test_size=self.test_size, random_state=self.seed, stratify=df["subfolder"], ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): random_uuid = uuid.uuid4() cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") data_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) if self.valid_data: shutil.copytree(self.train_data, os.path.join(data_dir, "train")) shutil.copytree(self.valid_data, os.path.join(data_dir, "validation")) dataset = load_dataset("imagefolder", data_dir=data_dir) dataset = dataset.rename_columns({"image": "autotrain_image", "label": "autotrain_label"}) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) else: subfolders = [f.path for f in os.scandir(self.train_data) if f.is_dir()] image_filenames = [] subfolder_names = [] for subfolder in subfolders: for filename in os.listdir(subfolder): if filename.endswith(("jpeg", "png", "jpg")): image_filenames.append(filename) subfolder_names.append(os.path.basename(subfolder)) df = pd.DataFrame({"image_filename": image_filenames, "subfolder": subfolder_names}) train_df, valid_df = self.split(df) for row in train_df.itertuples(): os.makedirs(os.path.join(data_dir, "train", row.subfolder), exist_ok=True) shutil.copy( os.path.join(self.train_data, row.subfolder, row.image_filename), os.path.join(data_dir, "train", row.subfolder, row.image_filename), ) for row in valid_df.itertuples(): os.makedirs(os.path.join(data_dir, "validation", row.subfolder), exist_ok=True) shutil.copy( os.path.join(self.train_data, row.subfolder, row.image_filename), os.path.join(data_dir, "validation", row.subfolder, row.image_filename), ) dataset = load_dataset("imagefolder", data_dir=data_dir) dataset = dataset.rename_columns({"image": "autotrain_image", "label": "autotrain_label"}) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class ObjectDetectionPreprocessor: """ A class to preprocess data for object detection tasks. Attributes: ----------- train_data : str Path to the training data directory. username : str Username for the Hugging Face Hub. project_name : str Name of the project. token : str Authentication token for the Hugging Face Hub. valid_data : Optional[str], default=None Path to the validation data directory. test_size : Optional[float], default=0.2 Proportion of the dataset to include in the validation split. seed : Optional[int], default=42 Random seed for reproducibility. local : Optional[bool], default=False Whether to save the dataset locally or push to the Hugging Face Hub. Methods: -------- _process_metadata(data_path): Processes the metadata.jsonl file and extracts required columns and categories. __post_init__(): Validates the existence and content of the training and validation data directories. split(df): Splits the dataframe into training and validation sets. prepare(): Prepares the dataset for training by processing metadata, splitting data, and saving or pushing the dataset. """ train_data: str username: str project_name: str token: str valid_data: Optional[str] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False @staticmethod def _process_metadata(data_path): metadata = pd.read_json(os.path.join(data_path, "metadata.jsonl"), lines=True) # make sure that the metadata.jsonl file contains the required columns: file_name, objects if "file_name" not in metadata.columns or "objects" not in metadata.columns: raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' and 'objects' columns.") # keeo only file_name and objects columns metadata = metadata[["file_name", "objects"]] # inside metadata objects column, values should be bbox, area and category # if area does not exist, it should be created by multiplying bbox width and height categories = [] for _, row in metadata.iterrows(): obj = row["objects"] if "bbox" not in obj or "category" not in obj: raise ValueError(f"{data_path}/metadata.jsonl should contain 'bbox' and 'category' keys in 'objects'.") # keep only bbox, area and category keys obj = {k: obj[k] for k in ["bbox", "category"]} categories.extend(obj["category"]) categories = set(categories) return metadata, categories def __post_init__(self): # Check if train data path exists if not os.path.exists(self.train_data): raise ValueError(f"{self.train_data} does not exist.") # check if self.train_data contains at least 5 image files in jpeg, png or jpg format only train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(train_image_files) < 5: raise ValueError(f"{self.train_data} should contain at least 5 jpeg, png or jpg files.") # check if self.train_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.train_data): raise ValueError(f"{self.train_data} should contain a metadata.jsonl file.") # Check if valid data path exists if self.valid_data: if not os.path.exists(self.valid_data): raise ValueError(f"{self.valid_data} does not exist.") # check if self.valid_data contains at least 5 image files in jpeg, png or jpg format only valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(valid_image_files) < 5: raise ValueError(f"{self.valid_data} should contain at least 5 jpeg, png or jpg files.") # check if self.valid_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.valid_data): raise ValueError(f"{self.valid_data} should contain a metadata.jsonl file.") def split(self, df): train_df, valid_df = train_test_split( df, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): random_uuid = uuid.uuid4() cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") data_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) if self.valid_data: shutil.copytree(self.train_data, os.path.join(data_dir, "train")) shutil.copytree(self.valid_data, os.path.join(data_dir, "validation")) train_metadata, train_categories = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata, valid_categories = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) all_categories = train_categories.union(valid_categories) features = Features( { "image": Image(), "objects": Sequence( { "bbox": Sequence(Value("float32"), length=4), "category": ClassLabel(names=list(all_categories)), } ), } ) dataset = load_dataset("imagefolder", data_dir=data_dir, features=features) dataset = dataset.rename_columns( { "image": "autotrain_image", "objects": "autotrain_objects", } ) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) else: metadata = pd.read_json(os.path.join(self.train_data, "metadata.jsonl"), lines=True) train_df, valid_df = self.split(metadata) # create train and validation folders os.makedirs(os.path.join(data_dir, "train"), exist_ok=True) os.makedirs(os.path.join(data_dir, "validation"), exist_ok=True) # move images to train and validation folders for row in train_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "train", row[1]["file_name"]), ) for row in valid_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "validation", row[1]["file_name"]), ) # save metadata.jsonl file to train and validation folders train_df.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_df.to_json(os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True) train_metadata, train_categories = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata, valid_categories = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) all_categories = train_categories.union(valid_categories) features = Features( { "image": Image(), "objects": Sequence( { "bbox": Sequence(Value("float32"), length=4), "category": ClassLabel(names=list(all_categories)), } ), } ) dataset = load_dataset("imagefolder", data_dir=data_dir, features=features) dataset = dataset.rename_columns( { "image": "autotrain_image", "objects": "autotrain_objects", } ) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class ImageRegressionPreprocessor: train_data: str username: str project_name: str token: str valid_data: Optional[str] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False @staticmethod def _process_metadata(data_path): metadata = pd.read_json(os.path.join(data_path, "metadata.jsonl"), lines=True) # make sure that the metadata.jsonl file contains the required columns: file_name, target if "file_name" not in metadata.columns or "target" not in metadata.columns: raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' and 'target' columns.") # keep only file_name and target columns metadata = metadata[["file_name", "target"]] return metadata def __post_init__(self): # Check if train data path exists if not os.path.exists(self.train_data): raise ValueError(f"{self.train_data} does not exist.") # check if self.train_data contains at least 5 image files in jpeg, png or jpg format only train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(train_image_files) < 5: raise ValueError(f"{self.train_data} should contain at least 5 jpeg, png or jpg files.") # check if self.train_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.train_data): raise ValueError(f"{self.train_data} should contain a metadata.jsonl file.") # Check if valid data path exists if self.valid_data: if not os.path.exists(self.valid_data): raise ValueError(f"{self.valid_data} does not exist.") # check if self.valid_data contains at least 5 image files in jpeg, png or jpg format only valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)] if len(valid_image_files) < 5: raise ValueError(f"{self.valid_data} should contain at least 5 jpeg, png or jpg files.") # check if self.valid_data contains a metadata.jsonl file if "metadata.jsonl" not in os.listdir(self.valid_data): raise ValueError(f"{self.valid_data} should contain a metadata.jsonl file.") def split(self, df): train_df, valid_df = train_test_split( df, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): random_uuid = uuid.uuid4() cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") data_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) if self.valid_data: shutil.copytree(self.train_data, os.path.join(data_dir, "train")) shutil.copytree(self.valid_data, os.path.join(data_dir, "validation")) train_metadata = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) dataset = load_dataset("imagefolder", data_dir=data_dir) dataset = dataset.rename_columns( { "image": "autotrain_image", "target": "autotrain_label", } ) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) else: metadata = pd.read_json(os.path.join(self.train_data, "metadata.jsonl"), lines=True) train_df, valid_df = self.split(metadata) # create train and validation folders os.makedirs(os.path.join(data_dir, "train"), exist_ok=True) os.makedirs(os.path.join(data_dir, "validation"), exist_ok=True) # move images to train and validation folders for row in train_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "train", row[1]["file_name"]), ) for row in valid_df.iterrows(): shutil.copy( os.path.join(self.train_data, row[1]["file_name"]), os.path.join(data_dir, "validation", row[1]["file_name"]), ) # save metadata.jsonl file to train and validation folders train_df.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_df.to_json(os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True) train_metadata = self._process_metadata(os.path.join(data_dir, "train")) valid_metadata = self._process_metadata(os.path.join(data_dir, "validation")) train_metadata.to_json(os.path.join(data_dir, "train", "metadata.jsonl"), orient="records", lines=True) valid_metadata.to_json( os.path.join(data_dir, "validation", "metadata.jsonl"), orient="records", lines=True ) dataset = load_dataset("imagefolder", data_dir=data_dir) dataset = dataset.rename_columns( { "image": "autotrain_image", "target": "autotrain_label", } ) if self.local: dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: dataset.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}"
4
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/tests/test_dummy.py
def test_dummy(): assert 1 + 1 == 2
5
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/tools/merge_adapter.py
import torch from peft import PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer from autotrain import logger from autotrain.trainers.common import ALLOW_REMOTE_CODE def merge_llm_adapter( base_model_path, adapter_path, token, output_folder=None, pad_to_multiple_of=None, push_to_hub=False ): """ Merges a language model adapter into a base model and optionally saves or pushes the merged model. Args: base_model_path (str): Path to the base model. adapter_path (str): Path to the adapter model. token (str): Authentication token for accessing the models. output_folder (str, optional): Directory to save the merged model. Defaults to None. pad_to_multiple_of (int, optional): If specified, pad the token embeddings to a multiple of this value. Defaults to None. push_to_hub (bool, optional): If True, push the merged model to the Hugging Face Hub. Defaults to False. Raises: ValueError: If neither `output_folder` nor `push_to_hub` is specified. Returns: None """ if output_folder is None and push_to_hub is False: raise ValueError("You must specify either --output_folder or --push_to_hub") logger.info("Loading adapter...") base_model = AutoModelForCausalLM.from_pretrained( base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=ALLOW_REMOTE_CODE, token=token, ) tokenizer = AutoTokenizer.from_pretrained( adapter_path, trust_remote_code=ALLOW_REMOTE_CODE, token=token, ) if pad_to_multiple_of: base_model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=pad_to_multiple_of) else: base_model.resize_token_embeddings(len(tokenizer)) model = PeftModel.from_pretrained( base_model, adapter_path, token=token, ) model = model.merge_and_unload() if output_folder is not None: logger.info("Saving target model...") model.save_pretrained(output_folder) tokenizer.save_pretrained(output_folder) logger.info(f"Model saved to {output_folder}") if push_to_hub: logger.info("Pushing model to Hugging Face Hub...") model.push_to_hub(adapter_path) tokenizer.push_to_hub(adapter_path) logger.info(f"Model pushed to Hugging Face Hub as {adapter_path}")
6
0
hf_public_repos/autotrain-advanced/src/autotrain
hf_public_repos/autotrain-advanced/src/autotrain/tools/convert_to_kohya.py
from diffusers.utils import convert_all_state_dict_to_peft, convert_state_dict_to_kohya from safetensors.torch import load_file, save_file from autotrain import logger def convert_to_kohya(input_path, output_path): """ Converts a Lora state dictionary to a Kohya state dictionary and saves it to the specified output path. Args: input_path (str): The file path to the input Lora state dictionary. output_path (str): The file path where the converted Kohya state dictionary will be saved. Returns: None """ logger.info(f"Converting Lora state dict from {input_path} to Kohya state dict at {output_path}") lora_state_dict = load_file(input_path) peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict) kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict) save_file(kohya_state_dict, output_path) logger.info(f"Kohya state dict saved at {output_path}")
7
0
hf_public_repos/autotrain-advanced/configs
hf_public_repos/autotrain-advanced/configs/object_detection/local.yml
task: object_detection base_model: facebook/detr-resnet-50 project_name: autotrain-obj-det-local-dataset log: tensorboard backend: local data: path: data/ # this contains the train and validation folders train_split: train # this is the folder name inside the data path, contains images and metadata.jsonl valid_split: validation # this is the folder name inside the data path, contains images and metadata.jsonl, optional column_mapping: image_column: image objects_column: objects params: image_square_size: 600 epochs: 100 batch_size: 8 lr: 5e-5 weight_decay: 1e-4 optimizer: adamw_torch scheduler: linear gradient_accumulation: 1 mixed_precision: fp16 early_stopping_patience: 50 early_stopping_threshold: 0.001 hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true
8
0
hf_public_repos/autotrain-advanced/configs
hf_public_repos/autotrain-advanced/configs/object_detection/hub_dataset.yml
task: object_detection base_model: facebook/detr-resnet-50 project_name: autotrain-obj-det-cppe5-2 log: tensorboard backend: local data: path: cppe-5 train_split: train valid_split: test column_mapping: image_column: image objects_column: objects params: image_square_size: 600 epochs: 100 batch_size: 8 lr: 5e-5 weight_decay: 1e-4 optimizer: adamw_torch scheduler: linear gradient_accumulation: 1 mixed_precision: fp16 early_stopping_patience: 50 early_stopping_threshold: 0.001 hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true
9
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/metavoice/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use clap::Parser; use std::io::Write; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::encodec; use candle_transformers::models::metavoice::{adapters, gpt, tokenizers, transformer}; use candle_transformers::models::quantized_metavoice::transformer as qtransformer; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use hf_hub::api::sync::Api; use rand::{distributions::Distribution, SeedableRng}; pub const ENCODEC_NTOKENS: u32 = 1024; #[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)] enum ArgDType { F32, F16, Bf16, } enum Transformer { Normal(transformer::Model), Quantized(qtransformer::Model), } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// Use the quantized version of the model. #[arg(long)] quantized: bool, /// The guidance scale. #[arg(long, default_value_t = 3.0)] guidance_scale: f64, /// The temperature used to generate samples. #[arg(long, default_value_t = 1.0)] temperature: f64, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The maximum number of tokens to generate for the first stage. #[arg(long, default_value_t = 2000)] max_tokens: u64, /// The output file using the wav format. #[arg(long, default_value = "out.wav")] out_file: String, #[arg(long)] first_stage_meta: Option<String>, #[arg(long)] first_stage_weights: Option<String>, #[arg(long)] second_stage_weights: Option<String>, #[arg(long)] encodec_weights: Option<String>, #[arg(long)] spk_emb: Option<String>, #[arg(long, default_value = "f32")] dtype: ArgDType, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); let device = candle_examples::device(args.cpu)?; let api = Api::new()?; let repo = api.model("lmz/candle-metavoice".to_string()); let first_stage_meta = match &args.first_stage_meta { Some(w) => std::path::PathBuf::from(w), None => repo.get("first_stage.meta.json")?, }; let first_stage_meta: serde_json::Value = serde_json::from_reader(&std::fs::File::open(first_stage_meta)?)?; let first_stage_tokenizer = match first_stage_meta.as_object() { None => anyhow::bail!("not a json object"), Some(j) => match j.get("tokenizer") { None => anyhow::bail!("no tokenizer key"), Some(j) => j, }, }; let fs_tokenizer = tokenizers::BPE::from_json(first_stage_tokenizer, 512)?; let second_stage_weights = match &args.second_stage_weights { Some(w) => std::path::PathBuf::from(w), None => repo.get("second_stage.safetensors")?, }; let encodec_weights = match args.encodec_weights { Some(w) => std::path::PathBuf::from(w), None => Api::new()? .model("facebook/encodec_24khz".to_string()) .get("model.safetensors")?, }; let dtype = match args.dtype { ArgDType::F32 => DType::F32, ArgDType::F16 => DType::F16, ArgDType::Bf16 => DType::BF16, }; let first_stage_config = transformer::Config::cfg1b_v0_1(); let mut first_stage_model = if args.quantized { let filename = match &args.first_stage_weights { Some(w) => std::path::PathBuf::from(w), None => repo.get("first_stage_q4k.gguf")?, }; let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?; let first_stage_model = qtransformer::Model::new(&first_stage_config, vb)?; Transformer::Quantized(first_stage_model) } else { let first_stage_weights = match &args.first_stage_weights { Some(w) => std::path::PathBuf::from(w), None => repo.get("first_stage.safetensors")?, }; let first_stage_vb = unsafe { VarBuilder::from_mmaped_safetensors(&[first_stage_weights], dtype, &device)? }; let first_stage_model = transformer::Model::new(&first_stage_config, first_stage_vb)?; Transformer::Normal(first_stage_model) }; let second_stage_vb = unsafe { VarBuilder::from_mmaped_safetensors(&[second_stage_weights], dtype, &device)? }; let second_stage_config = gpt::Config::cfg1b_v0_1(); let second_stage_model = gpt::Model::new(second_stage_config.clone(), second_stage_vb)?; let encodec_device = if device.is_metal() { &candle::Device::Cpu } else { &device }; let encodec_vb = unsafe { VarBuilder::from_mmaped_safetensors(&[encodec_weights], dtype, encodec_device)? }; let encodec_config = encodec::Config::default(); let encodec_model = encodec::Model::new(&encodec_config, encodec_vb)?; println!("prompt: '{}'", args.prompt); let prompt_tokens = fs_tokenizer.encode(&args.prompt)?; let mut tokens = prompt_tokens.clone(); println!("{tokens:?}"); let spk_emb_file = match &args.spk_emb { Some(w) => std::path::PathBuf::from(w), None => repo.get("spk_emb.safetensors")?, }; let spk_emb = candle::safetensors::load(&spk_emb_file, &candle::Device::Cpu)?; let spk_emb = match spk_emb.get("spk_emb") { None => anyhow::bail!("missing spk_emb tensor in {spk_emb_file:?}"), Some(spk_emb) => spk_emb.to_dtype(dtype)?, }; let spk_emb = spk_emb.to_device(&device)?; let mut logits_processor = LogitsProcessor::new(args.seed, Some(args.temperature), Some(0.95)); // First stage generation. for index in 0..args.max_tokens { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &device)?; let input = Tensor::stack(&[&input, &input], 0)?; let logits = match &mut first_stage_model { Transformer::Normal(m) => m.forward(&input, &spk_emb, tokens.len() - context_size)?, Transformer::Quantized(m) => { m.forward(&input, &spk_emb, tokens.len() - context_size)? } }; let logits0 = logits.i((0, 0))?; let logits1 = logits.i((1, 0))?; let logits = ((logits0 * args.guidance_scale)? + logits1 * (1. - args.guidance_scale))?; let logits = logits.to_dtype(DType::F32)?; let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); print!("."); std::io::stdout().flush()?; if next_token == 2048 { break; } } println!(); let fie2c = adapters::FlattenedInterleavedEncodec2Codebook::new(ENCODEC_NTOKENS); let (text_ids, ids1, ids2) = fie2c.decode(&tokens); println!("text ids len: {}", text_ids.len()); let mut rng = rand::rngs::StdRng::seed_from_u64(args.seed + 1337); // TODO: Use the config rather than hardcoding the offset here. let encoded_text: Vec<_> = prompt_tokens.iter().map(|v| v - 1024).collect(); let mut hierarchies_in1 = [encoded_text.as_slice(), ids1.as_slice(), &[ENCODEC_NTOKENS]].concat(); let mut hierarchies_in2 = [ vec![ENCODEC_NTOKENS; encoded_text.len()].as_slice(), ids2.as_slice(), &[ENCODEC_NTOKENS], ] .concat(); hierarchies_in1.resize(second_stage_config.block_size, ENCODEC_NTOKENS); hierarchies_in2.resize(second_stage_config.block_size, ENCODEC_NTOKENS); let in_x1 = Tensor::new(hierarchies_in1, &device)?; let in_x2 = Tensor::new(hierarchies_in2, &device)?; let in_x = Tensor::stack(&[in_x1, in_x2], 0)?.unsqueeze(0)?; let logits = second_stage_model.forward(&in_x)?; println!("sampling from logits..."); let mut codes = vec![]; for logits in logits.iter() { let logits = logits.squeeze(0)?; let (seq_len, _) = logits.dims2()?; let mut codes_ = Vec::with_capacity(seq_len); for step in 0..seq_len { let logits = logits.i(step)?.to_dtype(DType::F32)?; let logits = &(&logits / 1.0)?; let prs = candle_nn::ops::softmax_last_dim(logits)?.to_vec1::<f32>()?; let distr = rand::distributions::WeightedIndex::new(prs.as_slice())?; let sample = distr.sample(&mut rng) as u32; codes_.push(sample) } codes.push(codes_) } let codes = Tensor::new(codes, &device)?.unsqueeze(0)?; let codes = Tensor::cat(&[in_x, codes], 1)?; println!("codes: {codes}"); let tilted_encodec = adapters::TiltedEncodec::new(ENCODEC_NTOKENS); let codes = codes.i(0)?.to_vec2::<u32>()?; let (text_ids, audio_ids) = tilted_encodec.decode(&codes); println!("text_ids len: {:?}", text_ids.len()); let audio_ids = Tensor::new(audio_ids, encodec_device)?.unsqueeze(0)?; println!("audio_ids shape: {:?}", audio_ids.shape()); let pcm = encodec_model.decode(&audio_ids)?; println!("output pcm shape: {:?}", pcm.shape()); let pcm = pcm.i(0)?.i(0)?.to_dtype(DType::F32)?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?; Ok(()) }
0
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/metavoice/README.md
# candle-metavoice MetaVoice-1B is a text-to-speech model trained on 100K hours of speech, more details on the [model card](https://huggingface.co/metavoiceio/metavoice-1B-v0.1). Note that the current candle implementation suffers from some limitations as of 2024-03-02: - The speaker embeddings are hardcoded. - The generated audio file quality is weaker than the Python implementation, probably because of some implementation discrepancies. ## Run an example ```bash cargo run --example metavoice --release -- \\ --prompt "This is a demo of text to speech by MetaVoice-1B, an open-source foundational audio model." ```
1
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/bigcode/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::bigcode::{Config, GPTBigCode}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: GPTBigCode, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, } impl TextGeneration { fn new( model: GPTBigCode, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); print!("{prompt}"); std::io::stdout().flush()?; let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let (context_size, past_len) = if self.model.config().use_cache && index > 0 { (1, tokens.len().saturating_sub(1)) } else { (tokens.len(), 0) }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, past_len)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({:.3} token/s)", sample_len as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "bigcode/starcoderbase-1b")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] weight_file: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => ["model.safetensors"] .iter() .map(|f| repo.get(f)) .collect::<std::result::Result<Vec<_>, _>>()?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let config = Config::starcoder_1b(); let model = GPTBigCode::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
2
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/bigcode/README.md
# candle-starcoder: code generation model [StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM model specialized to code generation. The initial model was trained on 80 programming languages. ## Running some example ```bash cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64 " > fn fact(n: u64) -> u64 { > if n == 0 { > 1 > } else { > n * fact(n - 1) > } > } ```
3
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/beit/main.rs
//! BEiT: BERT Pre-Training of Image Transformers //! https://github.com/microsoft/unilm/tree/master/beit #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::beit; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 384, 384). Beit special normalization is applied. pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = load_image384_beit_norm(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("vincent-espitalier/candle-beit".into()); api.get("beit_base_patch16_384.in22k_ft_in22k_in1k.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = beit::vit_base(vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
4
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/beit/README.md
# candle-beit [Beit](https://arxiv.org/abs/2106.08254) is a computer vision model. In this example, it is used as an ImageNet classifier: the model returns the probability for the image to belong to each of the 1000 ImageNet categories. ## Running some example ```bash cargo run --example beit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg > mountain bike, all-terrain bike, off-roader: 56.16% > bicycle-built-for-two, tandem bicycle, tandem: 3.08% > maillot : 2.23% > alp : 0.88% > crash helmet : 0.85% ``` ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg)
5
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/mobileclip/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Device, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use candle_transformers::models::mobileclip; use tokenizers::Tokenizer; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { S1, S2, } impl Which { fn model_name(&self) -> String { let name = match self { Self::S1 => "S1", Self::S2 => "S2", }; format!("apple/MobileCLIP-{}-OpenCLIP", name) } fn config(&self) -> mobileclip::MobileClipConfig { match self { Self::S1 => mobileclip::MobileClipConfig::s1(), Self::S2 => mobileclip::MobileClipConfig::s2(), } } } #[derive(Parser)] struct Args { #[arg(long, use_value_delimiter = true)] images: Option<Vec<String>>, #[arg(long)] cpu: bool, /// Use the pytorch weights rather than the safetensors ones #[arg(long)] use_pth: bool, #[arg(long, use_value_delimiter = true)] sequences: Option<Vec<String>>, #[arg(value_enum, long, default_value_t=Which::S1)] which: Which, } fn load_images<T: AsRef<std::path::Path>>( paths: &Vec<T>, image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for path in paths { let tensor = candle_examples::imagenet::load_image_with_std_mean( path, image_size, &[0.0, 0.0, 0.0], &[1.0, 1.0, 1.0], )?; images.push(tensor); } let images = Tensor::stack(&images, 0)?; Ok(images) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_name = args.which.model_name(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); let model_file = if args.use_pth { api.get("open_clip_pytorch_model.bin")? } else { api.get("open_clip_model.safetensors")? }; let tokenizer = api.get("tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let config = &args.which.config(); let device = candle_examples::device(args.cpu)?; let vec_imgs = match args.images { Some(imgs) => imgs, None => vec![ "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(), "candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(), ], }; let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?; let vb = if args.use_pth { VarBuilder::from_pth(&model_file, DType::F32, &device)? } else { unsafe { VarBuilder::from_mmaped_safetensors(&[model_file.clone()], DType::F32, &device)? } }; let model = mobileclip::MobileClipModel::new(vb, config)?; let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?; let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?; println!("softmax_image_vec: {:?}", softmax_image_vec); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) .collect::<Vec<f32>>(); let probability_per_image = probability_vec.len() / vec_imgs.len(); for (i, img) in vec_imgs.iter().enumerate() { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; println!("\n\nResults for image: {}\n", img); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {}", p, vec_seq[i]); } } Ok(()) } pub fn tokenize_sequences( sequences: Option<Vec<String>>, tokenizer: &Tokenizer, device: &Device, ) -> anyhow::Result<(Tensor, Vec<String>)> { // let pad_id = *tokenizer // .get_vocab(true) // .get("<|endoftext|>") // .ok_or(E::msg("No pad token"))?; // The model does not work well if the text is padded using the <|endoftext|> token, using 0 // as the original OpenCLIP code. let pad_id = 0; let vec_seq = match sequences { Some(seq) => seq, None => vec![ "a cycling race".to_string(), "a photo of two cats".to_string(), "a robot holding a candle".to_string(), ], }; let mut tokens = vec![]; for seq in vec_seq.clone() { let encoding = tokenizer.encode(seq, true).map_err(E::msg)?; tokens.push(encoding.get_ids().to_vec()); } let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0); // Pad the sequences to have the same length for token_vec in tokens.iter_mut() { let len_diff = max_len - token_vec.len(); if len_diff > 0 { token_vec.extend(vec![pad_id; len_diff]); } } let input_ids = Tensor::new(tokens, device)?; Ok((input_ids, vec_seq)) }
6
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/mobileclip/README.md
# candle-mobileclip MobileCLIP is family of efficient CLIP-like models using FastViT-based image encoders. See [MobileCLIP: Fast Image-Text Models through Multi-Modal Reinforced Training](https://arxiv.org/abs/2311.17049) ## Running on an example on cpu ``` $ cargo run --example mobileclip --release -- --images "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg","candle-examples/examples/yolo-v8/assets/bike.jpg" --cpu --sequences "a cycling race","a photo of two cats","a robot holding a candle" softmax_image_vec: [2.4819004e-5, 3.81081e-6, 0.9999714, 0.9999738, 2.382714e-5, 2.3317718e-6] Results for image: candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg Probability: 0.0025% Text: a cycling race Probability: 0.0004% Text: a photo of two cats Probability: 99.9971% Text: a robot holding a candle Results for image: candle-examples/examples/yolo-v8/assets/bike.jpg Probability: 99.9974% Text: a cycling race Probability: 0.0024% Text: a photo of two cats Probability: 0.0002% Text: a robot holding a candle ```
7
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/pixtral/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::pixtral::{vision_model, Config, Model}; use candle::{DType, Device, Module, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, image: Tensor, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, image: Tensor, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, image, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut generated_tokens = 0usize; let get_token = |v| match self.tokenizer.get_token(v) { Some(token) => Ok(token), None => anyhow::bail!("cannot find the {v} token"), }; let bos_token = get_token("<s>")?; let eos_token = get_token("</s>")?; let inst_token = get_token("[INST]")?; let end_inst_token = get_token("[/INST]")?; let img_break = get_token("[IMG_BREAK]")?; let img_end = get_token("[IMG_END]")?; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let logits = if index > 0 { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; self.model.lm_forward(&input)? } else { let (_b, _c, h, w) = self.image.dims4()?; let h = h / self.model.patch_size; let w = w / self.model.patch_size; let image_embeds = self.model.encode_image(&self.image)?; println!("generated image embeddings {image_embeds:?}"); let image_embeds = image_embeds.to_dtype(self.model.dtype)?; for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let break_embeds = { let input = Tensor::new(&[img_break], &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let start_embeds = { let mut in_tokens = vec![bos_token, inst_token]; in_tokens.extend_from_slice(tokens.as_slice()); let input = Tensor::new(in_tokens.as_slice(), &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let end_embeds = { let input = Tensor::new(&[img_end, end_inst_token], &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let mut input_embeds = vec![start_embeds]; for h_idx in 0..h { if h_idx > 0 { input_embeds.push(break_embeds.clone()) } let row = image_embeds.narrow(1, h_idx * w, w)?; input_embeds.push(row); } input_embeds.push(end_embeds); let input_embeds = Tensor::cat(&input_embeds, 1)?; self.model.lm_forward_embeds(&input_embeds)? }; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long, default_value = "Describe the image.\n")] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 10000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long)] image: String, #[arg(long)] vision_only: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match &args.model_id { Some(model_id) => model_id.to_string(), None => "mistral-community/pixtral-12b".to_string(), }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let device = candle_examples::device(args.cpu)?; let dtype = if device.supports_bf16() && !args.vision_only { DType::BF16 } else { DType::F32 }; let config: Config = match args.config_file { Some(config_file) => serde_json::from_slice(&std::fs::read(config_file)?)?, None => { let config_file = repo.get("config.json")?; serde_json::from_slice(&std::fs::read(config_file)?)? } }; let image = if args.image.ends_with(".safetensors") { match candle::safetensors::load(&args.image, &device)?.remove("img") { None => anyhow::bail!("no img tensor in {}", args.image), Some(v) => v, } } else { candle_examples::imagenet::load_image_with_std_mean( &args.image, 1024, &[0.48145466, 0.4578275, 0.40821073], &[0.26862954, 0.261_302_6, 0.275_777_1], )? }; let image = image.to_device(&device)?.unsqueeze(0)?; println!("loaded image with shape {:?}", image); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; if args.vision_only { let start = std::time::Instant::now(); let model = vision_model::Model::new(&config.vision_config, vb.pp("vision_tower"))?; println!("loaded the model in {:?}", start.elapsed()); let embs = model.forward(&image)?; println!("EMBS\n{embs}"); } else { let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, image, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; } Ok(()) }
8
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/pixtral/README.md
# pixtral Pixtral-12B is a 12B text+vision model. [Blog Post](https://mistral.ai/news/pixtral-12b/) - [HF Model Card](https://huggingface.co/mistralai/Pixtral-12B-2409) - [HF Community Model Card](https://huggingface.co/mistral-community/pixtral-12b). ```bash cargo run --profile=release-with-debug --features cuda --example pixtral -- \ --image candle-examples/examples/flux/assets/flux-robot.jpg ``` ``` Describe the image. The image depicts a charming, rustic robot standing on a sandy beach at sunset. The robot has a vintage, steampunk aesthetic with visible gears and mechanical parts. It is holding a small lantern in one hand, which emits a warm glow, and its other arm is extended forward as if reaching out or guiding the way. The robot's body is adorned with the word "RUST" in bright orange letters, adding to its rustic theme. The background features a dramatic sky filled with clouds, illuminated by the setting sun, casting a golden hue over the scene. Gentle waves lap against the shore, creating a serene and picturesque atmosphere. The overall mood of the image is whimsical and nostalgic, evoking a sense of adventure and tranquility. ```
9
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js
//load Candle Bert Module wasm module let init, ModelEncoder; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Encoder { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelEncoder } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelEncoder } = await import("./build/m.js")); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelEncoder( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, } = event.data; try { self.postMessage({ status: "ready", message: "Starting T5 Encoder" }); const model = await Encoder.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "encoding", message: "Encoding Sentences", }); const output = model.decode({ sentences: sentences, normalize_embeddings: normalize_embeddings || true, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
0
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/index.html
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle T5</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import { getModelInfo, MODELS, extractEmbeddings, generateText, } from "./utils.js"; const t5ModelEncoderWorker = new Worker("./T5ModelEncoderWorker.js", { type: "module", }); const t5ModelConditionalGeneration = new Worker( "./T5ModelConditionalGeneration.js", { type: "module" } ); const formEl = document.querySelector("#form"); const modelEl = document.querySelector("#model"); const promptEl = document.querySelector("#prompt"); const temperatureEl = document.querySelector("#temperature"); const toppEL = document.querySelector("#top-p"); const repeatPenaltyEl = document.querySelector("#repeat_penalty"); const seedEl = document.querySelector("#seed"); const outputEl = document.querySelector("#output-generation"); const tasksEl = document.querySelector("#tasks"); let selectedTaskID = ""; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelEl.appendChild(option); } populateTasks(modelEl.value); modelEl.addEventListener("change", (e) => { populateTasks(e.target.value); }); tasksEl.addEventListener("change", (e) => { const task = e.target.value; const modelID = modelEl.value; promptEl.value = MODELS[modelID].tasks[task].prefix; selectedTaskID = task; }); }); function populateTasks(modelID) { const tasks = MODELS[modelID].tasks; tasksEl.innerHTML = ""; for (const [task, params] of Object.entries(tasks)) { const div = document.createElement("div"); div.innerHTML = ` <input type="radio" name="task" id="${task}" class="font-light cursor-pointer" value="${task}" /> <label for="${task}" class="cursor-pointer"> ${params.prefix} </label> `; tasksEl.appendChild(div); } selectedTaskID = Object.keys(tasks)[0]; tasksEl.querySelector(`#${selectedTaskID}`).checked = true; } form.addEventListener("submit", (e) => { e.preventDefault(); const promptText = promptEl.value; const modelID = modelEl.value; const { modelURL, configURL, tokenizerURL, maxLength } = getModelInfo( modelID, selectedTaskID ); const params = { temperature: Number(temperatureEl.value), top_p: Number(toppEL.value), repetition_penalty: Number(repeatPenaltyEl.value), seed: BigInt(seedEl.value), max_length: maxLength, }; generateText( t5ModelConditionalGeneration, modelURL, tokenizerURL, configURL, modelID, promptText, params, (status) => { if (status.status === "loading") { outputEl.innerText = "Loading model..."; } if (status.status === "decoding") { outputEl.innerText = "Generating..."; } } ).then(({ output }) => { outputEl.innerText = output.generation; }); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle T5 Transformer</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcase Text-To-Text Transfer Transformer (<a href="https://blog.research.google/2020/02/exploring-transfer-learning-with-t5.html" target="_blank" class="link" >T5</a >) models right in your browser, thanks to <a href="https://github.com/huggingface/candle/" target="_blank" class="link"> Candle </a> ML framework and rust/wasm. You can choose from a range of available models, including <a href="https://huggingface.co/t5-small" target="_blank" class="link"> t5-small</a >, <a href="https://huggingface.co/t5-base" target="_blank" class="link" >t5-base</a >, <a href="https://huggingface.co/google/flan-t5-small" target="_blank" class="link" >flan-t5-small</a >, several <a href="https://huggingface.co/lmz/candle-quantized-t5/tree/main" target="_blank" class="link"> t5 quantized gguf models</a >, and also a quantized <a href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main" target="_blank" class="link"> CoEdIT model for text rewrite</a >. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"></select> </div> <div> <h3 class="font-medium">Task Prefix:</h3> <form id="tasks" class="flex flex-col gap-1 my-2"></form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add prompt here, e.g. 'translate English to German: Today I'm going to eat Ice Cream'" value="translate English to German: Today I'm going to eat Ice Cream" /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="temperature">Temperature</label> <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2 text-lg"> <p id="output-generation" class="grid-rows-2">No output yet</p> </div> </div> </main> </body> </html>
1
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/Cargo.toml
[package] name = "candle-wasm-example-t5" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
2
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web wasm-bindgen ../../target/wasm32-unknown-unknown/release/m-quantized.wasm --out-dir build --target web
3
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js
//load Candle Bert Module wasm module let init, ModelConditionalGeneration; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class ConditionalGeneration { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelConditionalGeneration } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelConditionalGeneration } = await import( "./build/m.js" )); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelConditionalGeneration( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } = event.data; let { temperature = 0.0, seed = 299792458, repeat_penalty = 1.1, repeat_last_n = 64, top_p = 1, } = { ...params }; try { self.postMessage({ status: "ready", message: "Starting T5 Conditional Generation", }); const model = await ConditionalGeneration.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "decoding", message: "Decoding Prompt", }); const output = model.decode({ prompt, temperature, seed, top_p, repeat_penalty, repeat_last_n, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
4
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/utils.js
export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export const MODELS = { t5_small_quantized: { size: "64.4 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, t5_small: { size: "242 MB", base_url: "https://huggingface.co/t5-small/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_small: { size: "308 MB", base_url: "https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_base_quantized: { size: "263 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model-flan-t5-base.gguf", tokenizer: "tokenizer.json", config: "config-flan-t5-base.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, coedit_large_quantized: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; }
5
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/README.md
## Running T5 with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m.js"; ``` For the quantized version, we need to import the quantized module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m-quantized.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
6
0
hf_public_repos/candle/candle-wasm-examples/t5
hf_public_repos/candle/candle-wasm-examples/t5/src/lib.rs
use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
7
0
hf_public_repos/candle/candle-wasm-examples/t5/src
hf_public_repos/candle/candle-wasm-examples/t5/src/bin/m.rs
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::t5::{Config, T5EncoderModel, T5ForConditionalGeneration}; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &Device::Cpu; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &Device::Cpu; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
8
0
hf_public_repos/candle/candle-wasm-examples/t5/src
hf_public_repos/candle/candle-wasm-examples/t5/src/bin/m-quantized.rs
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::quantized_t5::{ Config, T5EncoderModel, T5ForConditionalGeneration, VarBuilder, }; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; const DEVICE: Device = Device::Cpu; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &DEVICE; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &DEVICE; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
9
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-1.0-input.json
{ "data": { "sepal length (cm)": [ 6.1, 5.7, 7.7, 6.0, 6.8, 5.4, 5.6, 6.9, 6.2, 5.8 ], "sepal width (cm)": [ 2.8, 3.8, 2.6, 2.9, 2.8, 3.4, 2.9, 3.1, 2.2, 2.7 ], "petal length (cm)": [ 4.7, 1.7, 6.9, 4.5, 4.8, 1.5, 3.6, 5.1, 4.5, 3.9 ], "petal width (cm)": [ 1.2, 0.3, 2.3, 1.5, 1.4, 0.4, 1.3, 2.3, 1.5, 1.2 ] } }
0
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-logistic_regression-latest-output.json
[ [ { "label": "0", "score": 0.008896718626007294 }, { "label": "1", "score": 0.280220671837972 }, { "label": "2", "score": 0.044846046575922976 }, { "label": "3", "score": 0.03920991699885032 }, { "label": "4", "score": 0.057385215221753105 }, { "label": "5", "score": 0.052487265368533896 }, { "label": "6", "score": 0.13974153545132648 }, { "label": "7", "score": 0.07067742309755881 }, { "label": "8", "score": 0.01745753683135335 }, { "label": "9", "score": 0.025684542619266296 }, { "label": "10", "score": 0.009767948522403052 }, { "label": "11", "score": 0.02612484979490926 }, { "label": "12", "score": 0.03535200014248993 }, { "label": "13", "score": 0.10969064335116936 }, { "label": "14", "score": 0.013549205719292714 }, { "label": "15", "score": 0.04903959225618569 }, { "label": "16", "score": 0.01001134971391883 }, { "label": "17", "score": 0.0017206454852604731 }, { "label": "18", "score": 0.0073138998853855075 }, { "label": "19", "score": 0.0008229925004406002 } ] ]
1
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-logistic_regression-1.0-output.json
[ [ { "label": "0", "score": 0.011336682178470362 }, { "label": "1", "score": 0.2877934181568519 }, { "label": "2", "score": 0.04995931455255469 }, { "label": "3", "score": 0.048606811555846334 }, { "label": "4", "score": 0.048146172402679224 }, { "label": "5", "score": 0.04894534558346955 }, { "label": "6", "score": 0.15705162235349931 }, { "label": "7", "score": 0.06988420347214097 }, { "label": "8", "score": 0.020057367014262424 }, { "label": "9", "score": 0.023752600566338086 }, { "label": "10", "score": 0.008731496867220766 }, { "label": "11", "score": 0.02785232288841256 }, { "label": "12", "score": 0.031210992630705495 }, { "label": "13", "score": 0.08448433781265935 }, { "label": "14", "score": 0.01330510596772587 }, { "label": "15", "score": 0.04597003192222465 }, { "label": "16", "score": 0.011186980163869558 }, { "label": "17", "score": 0.0025914382007259642 }, { "label": "18", "score": 0.00786147018192487 }, { "label": "19", "score": 0.0012722855284180384 } ] ]
2
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-linear_regression-latest-output.json
[ 139.54755840379605, 179.51720835342783, 134.0387557189011, 291.4170292522083, 123.78965872239607, 92.17234650105041, 258.23238898921295, 181.3373205706072, 90.22411310941459, 108.63375858007925 ]
3
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/span_marker/requirements.txt
starlette==0.27.0 api-inference-community==0.0.32 huggingface_hub>=0.17.3 span_marker>=1.4.0
4
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/span_marker/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="Tom Aarsen <[email protected]>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
5
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/span_marker/prestart.sh
python app/main.py
6
0
hf_public_repos/api-inference-community/docker_images/span_marker
hf_public_repos/api-inference-community/docker_images/span_marker/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import Pipeline, TokenClassificationPipeline from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "token-classification": TokenClassificationPipeline } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
7
0
hf_public_repos/api-inference-community/docker_images/span_marker/app
hf_public_repos/api-inference-community/docker_images/span_marker/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
8
0
hf_public_repos/api-inference-community/docker_images/span_marker/app
hf_public_repos/api-inference-community/docker_images/span_marker/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.token_classification import TokenClassificationPipeline
9
0
hf_public_repos/blog
hf_public_repos/blog/zh/deep-learning-with-proteins.md
--- title: "蛋白质深度学习" thumbnail: /blog/assets/119_deep_learning_with_proteins/folding_example.png authors: - user: rocketknight1 translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 蛋白质深度学习 本文主要面向两类目标读者: 一类是想使用机器学习的生物学家,一类是想进入生物学领域的机器学习研究者。如果你不熟悉生物学或机器学习,仍然欢迎你阅读本文,但有时你可能会觉得有点读不太懂!如果你已经熟悉这两者,那么你可能根本不需要本文 —— 你可以直接跳到我们的示例 notebook 以查看这些模型的实际应用: - 微调蛋白质语言模型 ([PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb),[TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb)) - 使用 ESMFold 进行蛋白质折叠 ([PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_folding.ipynb),因为 `OpenFold` 仅支持 PyTorch,所以目前仅支持 PyTorch) ## 面向生物学家的科普: 语言模型是什么鬼? 用于处理蛋白质的模型深受 BERT 和 GPT 等大语言模型的启发。因此,为了了解这些模型是如何工作的,我们要回到 2016 年左右,那时大语言模型还没有出现,特朗普还没有当选,脱欧还没有发生,深度学习 (Deep Learning,DL) 还是个日日新的超级新星 …… DL 成功的关键在于它使用人工神经网络来学习数据中的复杂模式。不过,深度学习有一个关键问题 —— 它需要 **大量** 的数据才能正常工作,而在很多任务中,根本没那么多数据。 假设你想训练一个 DL 模型,输入一个英语句子,并判断它是否合乎语法。所以你准备了训练数据,格式如下: | Text | Label | | --- | --- | | The judge told the jurors to think carefully. | Correct | | The judge told that the jurors to think carefully. | Incorrect | | … | … | 理论上,这个任务在当时是完全可行的 —— 如果你将如上格式的训练数据输入深度学习模型,它就可以学着去预测新句子是否合乎语法。但在实践中,它的效果并不怎么好,因为在 2016 年,大多数人都从一个随机初始化的新模型开始他们的每项任务。这意味着 **模型必须仅从给定的训练数据中学习它们需要知道的一切!** 我们来理解一下这到底有多难,假设你是一个机器学习模型,我提供给你一些训练数据用于完成我希望你学习的任务。假如我给你的训练数据如下: | Text | Label | | --- | --- | | Is í an stiúrthóir is fearr ar domhan! | 1 | | Is fuath liom an scannán seo. | 0 | | Scannán den scoth ab ea é. | 1 | | D’fhág mé an phictiúrlann tar éis fiche nóiméad! | 0 | 在这里,我选择了一种我希望你从未曾见过的语言,所以我猜你已经可能开始对你是否能学会这个任务不太自信了。也许在数百或数千个样本之后,你可能会开始注意到输入中一些重复出现的单词或模式,然后你可能开始能够作出比随机机猜测更好的判断,但即使这样,一旦出现新单词或之前没见过的措辞马上就能够难住你,让你猜错。无独有偶,这也是 DL 模型当时的表现! 现在我们试试相同的任务,但这次使用英语: | Text | Label | | --- | --- | | She’s the best director in the world! | 1 | | I hate this movie. | 0 | | It was an absolutely excellent film. | 1 | | I left the cinema after twenty minutes! | 0 | 现在事情变得简单了 —— 任务只是预测电影评论是正面 (1) 还是负面 (0) 的。仅使用两个正例和两个反例,你就能以接近 100% 的准确率完成这项任务,因为 **你原本就具备大量的英语词汇和语法知识,并具有电影和情感相关表达的文化背景。** 如果没有这些知识,事情就会变得更像第一个任务 —— 你需要阅读大量的例子才能开始发现输入中的表达模式,即使你花时间研究了数十万个的例子你的猜测仍然远不如在英语任务中只有四个例子准确。 ### 关键突破: 迁移学习 在机器学习中,我们把这种将先验知识迁移到新任务的概念称为“**迁移学习**”。在 DL 上使用迁移学习是 2016 年左右该领域的一个主要目标。预训练词向量之类的东西 (非常有趣,但超出了本文的范围!) 在 2016 年确实存在并且允许迁移一些知识到新的模型,但是这种知识迁移仍然比较肤浅,模型仍然需要大量的训练数据才能很好地工作。 这种情况一直持续到 2018 年。2018 年,两篇巨著横空出世,第一篇引入了 [ULMFiT](https://arxiv.org/abs/1801.06146) 模型,第二篇引入了 [BERT](https://arxiv.org/abs/1810.04805) 模型。这两篇论文是让自然语言迁移学习真正发挥作用的开创性论文,尤其是 BERT 标志着预训练大语言模型时代的发轫。两篇论文共同使用了一个技巧,那就是它们利用了深度学习中人工神经网络的固有性质 —— 先花较长的时间在有着丰富训练数据的文本任务上训练神经网络,然后将整个神经网络复制到新任务中,仅用新任务的数据更新或重新训练与网络输出相对应的少数神经元。 ![迁移学习](/blog/assets/119_deep_learning_with_proteins/transfer_learning.png) _上图来自 [ULMFiT 论文](https://arxiv.org/abs/1801.06146),它展示了在三个独立的任务上使用迁移学习与从头开始训练模型相比带来的巨大的性能提升。在许多情况下,使用迁移学习的效果相当于拥有超过 100 倍的训练数据。不要忘记这是 2018 年发布的 —— 现代的大语言模型可以做得更好!_ 这样做的原因是,在解决任何重要任务的过程中,神经网络学习到很多输入数据的结构性知识 —— 如对于视觉神经网络,输入的是原始像素,模型学习到了如何识别直线、曲线和边缘; 对于文本神经网络,输入的是原始文本,模型学习到了有关语法结构的细节。而这些信息并不特定于某些任务。—— 迁移学习起作用的关键原因是 **解决任务需要知道的很多信息都不是特定于该任务的!** 要对电影评论进行分类,你不需要了解很多关于电影评论的知识,但你需要大量的英语和文化背景知识。通过选择训练数据丰富的任务,我们可以让神经网络学习此类“领域知识”,然后将其应用于我们关心的新任务,而在这些新任务中训练数据可能更难获取。 至此,希望你已经了解了什么是迁移学习,并且大语言模型是一个经过大量文本数据训练而得的大型神经网络,这使其成为迁移到新任务的主要备选方案。我们将在下面看到相同的技术如何应用​​于蛋白质,但首先我需要为另一半观众写一篇介绍。如果你已经熟悉这方面的知识,你可以随时跳过下一部分! ## 面向机器学习研究者的科普: 蛋白质是什么鬼? 简而言之,蛋白质可以做很多事情。有些蛋白质是 **酶** —— 它们充当化学反应的催化剂。当你的身体将营养物质转化为能量时,从食物到肌肉运动的每一步都由一种酶催化。一些蛋白质是 **结构性的**,它们的功能是提供稳定性以及塑形,例如结缔组织的蛋白质。如果你看过化妆品广告,你可能看到过 **胶原蛋白**、 **弹性蛋白** 以及 **角蛋白**,这些是构成我们皮肤和头发结构的蛋白质。 其它蛋白质对健康和疾病至关重要 —— 每个人可能都记得有关 COVID-19 病毒的 **spike 蛋白** 的无数新闻报道。 COVID spike 蛋白与人类细胞表面一种名为 ACE2 的蛋白质结合,使其能够进入细胞并传递病毒 RNA 的有效载荷。由于这种相互作用对感染至关重要,因此在 COVID 大流行期间对这些蛋白质及其相互作用进行建模是一个热门研究焦点。 蛋白质由多个 **氨基酸组成**。氨基酸是相对简单的分子,它们都具有相同的分子结构,而该结构的化学性质允许氨基酸融合在一起,从而使单个分子可以成为一条长链。这里关键是要知道氨基酸种类不多 —— 只有 20 种标准氨基酸,某些生物体上可能还有一些其他非标准的氨基酸,但总量不多。导致蛋白质巨大多样性的原因是 **这些氨基酸可以按任何顺序组合**,而由此产生的蛋白质链可以具有截然不同的形状和功能,因为链的不同部分会粘连以及彼此折叠。与文本类比一下: 英语只有 26 个字母,但想想你可以用这 26 个字母的组合写出各种单词。 事实上,由于氨基酸的数量很少,生物学家可以为每一种氨基酸分配一个不同的字母。这意味着你可以像编写文本字符串一样编写蛋白质!例如,假设一种蛋白质链中有这些氨基酸: 甲硫氨酸、丙氨酸和组氨酸。这些氨基酸的 [对应的字母](https://en.wikipedia.org/wiki/Amino_acid#Table_of_standard_amino_acid_abbreviations_and_properties) 是 M、A 和 H,因此我们可以将该链写为 “MAH”。不过,大多数蛋白质含有数百甚至数千个氨基酸,而不仅仅是三个!! ![蛋白质结构](/blog/assets/119_deep_learning_with_proteins/protein_structure.png) _上图显示了一种蛋白质的两种表示形式。所有氨基酸都包含碳 - 碳 - 氮 (C-C-N) 序列。当氨基酸融合到蛋白质中时,这种重复模式将贯穿始终,我们称为蛋白质的 “骨架”。然而,氨基酸的不同之处在于它们的 “侧链”,侧链指的是附着在 C-C-N 主链上的原子。图的下半部分有标记为 R1、R2 和 R3 的侧链,它们可以是任何氨基酸。在图的上半部分,中央氨基酸有一个 CH3 侧链 - 那么该氨基酸即为 **丙氨酸,由字母 A 表示**([图片来源](https://commons.wikimedia.org/wiki/File:Peptide-Figure-Revised.png))。_ 尽管我们可以将其写成文本字符串,但蛋白质实际上并不是一种 “语言”,至少不是诺姆 - 乔姆斯基认可的任何一种语言。但它们确实有一些类似语言的特征,从机器学习的角度来看,它们是一个与文本非常相似的领域: 只有一部分字符串是有“意义”的。随机文本是垃圾,随机蛋白质只是一个无形状的斑点。 此外,如果你只是孤立地考虑蛋白质的一部分,信息就会丢失,就像当你只阅读从较长文本中提取的某个句子时,信息也会丢失。蛋白质的一个区域可能只有在其它部分存在的情况下才会呈现其自然形状,因为需要其它部分帮助稳定和矫正其形状!这意味着被全局自注意力很好地捕捉到的那种长程作用力对于正确建模蛋白质非常重要。 至此,希望你对蛋白质是什么以及为什么生物学家如此关心它们有一个基本的概念 —— 尽管氨基酸“字母表” 、很小,但它们具有广泛的结构和功能多样性。因此如果能仅通过观察氨基酸的原始“字符串”来理解和预测蛋白质的结构和功能对研究是非常有价值的。 ## 联袂 - 蛋白质机器学习 现在我们已经了解了使用语言模型进行迁移学习是如何工作的,同时我们还了解了什么是蛋白质。一旦你有了这些背景知识,下一步就不难了 —— 我们可以在蛋白质上应用相同的迁移学习思想!我们不是在涉及英文文本的任务上预先训练模型,而是在输入是蛋白质且有大量可用训练数据的任务上训练它。一旦我们这样做了,我们的模型就有希望学到很多关于蛋白质结构的知识,就像语言模型学到了很多关于语言结构的知识一样。这使得预训练的蛋白质模型有希望可以迁移到任何其它基于蛋白质的任务! 生物学家想在哪些任务上用机器学习训练蛋白质模型?最著名的蛋白质建模任务是 **蛋白质折叠**。该任务是,给定像 “MLKNV……” 这样的氨基酸链,预测蛋白质最终会折叠成什么形状。这是一项极其重要的任务,因为准确预测蛋白质的形状和结构可以深入了解蛋白质作用和机理。 早在现代机器学习出现之前,人们就一直在研究这个问题。最早的一些大规模分布式计算项目,如 Folding@Home,以超精的空间和时间分辨率使用原子级模拟来模拟蛋白质折叠。甚至还存在一个专门的 _蛋白质晶体学_领域,该领域的研究者使用 X 射线衍射来观察从活细胞中分离出的蛋白质的结构。 然而,与许多其他领域一样,深度学习的到来改变了一切。 AlphaFold,尤其是 AlphaFold2 使用了 transformer 结构的深度学习模型,并在模型上增加了针对蛋白质数据的处理,在仅从原始氨基酸序列预测新型蛋白质结构方面取得了出色的结果。如果你对蛋白质折叠感兴趣,我们强烈建议你看看 [我们的 ESMFold notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_folding.ipynb) —— ESMFold 是一种类似于 AlphaFold2 的新模型,但它是一种更“纯”的深度学习模型,不需要任何外部数据库或搜索操作即可运行。因此,设置过程不像 AlphaFold2 那样痛苦,模型运行得更快,同时仍保持出色的准确性。 ![蛋白质折叠示例](/blog/assets/119_deep_learning_with_proteins/folding_example.png) _上图为多杀巴斯德氏菌的 **氨基葡萄糖 - 6 - 磷酸脱氨酶** 同源二聚体的预测结构。该结构和可视化图是由上文中的 ESMFold notebook 在几秒钟内生成的。深蓝色表示结构置信度最高的区域。_ 不过,蛋白质折叠并不是我们唯一感兴趣的任务!生物学家可能想做更多的蛋白质分类任务 —— 比如他们想预测蛋白质将在细胞的哪个部分起作用,或者在蛋白质产生后其中哪些氨基酸会被修改。在机器学习的语言中,当你想对整个蛋白质进行分类 (例如,预测其亚细胞定位) 时,这类任务可被建模为 **序列分类 (sequence classification)**; 当你想对每个氨基酸进行分类时 (例如,预测哪些氨基酸会被翻译后修饰 (Post-translational modification,PTM) ),这类任务可被建模为 **词分类 (token classification)**。 不过,关键的一点是,尽管蛋白质与语言非常不同,但它们可以通过几乎完全相同的机器学习方法来处理 —— 在一个大的蛋白质序列数据库上进行大规模预训练,然后通过 **迁移学习** 迁移到其它训练数据可能少得多的任务。事实上,在某些方面它甚至比像 BERT 这样的大型语言模型还要简单,因为不需要复杂的分词和词解析 —— 蛋白质没有分词,因此最简单的方法是直接将每个氨基酸转换成单词。 ## 听起来很酷,但从何下手? 如果你已经熟悉深度学习,那么你会发现微调蛋白质模型的代码看起来与微调语言模型的代码非常相似。我们提供了 [PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb) 和 [TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb) 两个示例供你起步。你可以从像 [UniProt](https://www.uniprot.org/) 这样的开放蛋白质数据库中获取大量标注数据,UniProt 除了提供 REST API 接口以供访问数据外还提供了一个漂亮的 Web 界面。你的主要困难是找到有趣的研究方向进行探索,这我就爱莫能助了 —— 但我相信有很多生物学家愿意与你合作! 反之,如果你是一名生物学家,你可能有很多想法想尝试,但可能对深入研究机器学习代码有点害怕。别怕!我们精心设计了示例 ([PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb)、[TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb)),这些示例中的数据加载部分与其他部分完全独立。这意味着如果你有一个 **序列分类** 或 **词分类** 任务,你只需要构建一个包含蛋白质序列及其应对标签的数据集,然后把我们的数据加载代码换成你自己写的用于加载你的数据集的代码就好了。 尽管示例中使用 [ESM-2](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v1) 作为基础预训练模型,因为它在当前是最先进的。该领域的研究人员可能还熟悉其他模型,如 Rost 实验室的 [ProtBERT](https://huggingface.co/Rostlab/prot_bert) ([论文链接](https://www.biorxiv.org/content/10.1101/2020.07.12.199554v3)) 是同类中最早的模型之一,并且引起了生物信息学界的极大兴趣。只需将示例代码中的 checkpoint 路径从 `facebook/esm2xxx` 改为 `Rostlab/prot_bert` 之类的,示例中的代码就可以使用 ProtBERT 模型了。 ## 结语 深度学习和生物学的交叉领域将在未来几年成为一个非常活跃和成果丰硕的领域。然而,使得深度学习发展如此迅速的原因之一是人们可以快速重现结果并调整新模型以供自己使用。本着这种精神,如果你训练了一个你认为对社区有用的模型,请分享它!上面那些 notebook 中都包含将模型上传到 Hub 的代码,其他研究人员可以在 Hub 上自由访问和构建它们 - 除了对该领域的好处之外,这也可以让你的论文被更多人见到和引用。你甚至可以使用 [Spaces](https://huggingface.co/docs/hub/spaces-overview) 做一个实时的网络演示版,以便其他研究人员可以输入蛋白质序列并免费获得结果,而无需编写一行代码。祝你好运,愿审稿人对你青眼相加!
0
0
hf_public_repos/blog
hf_public_repos/blog/zh/bloom-megatron-deepspeed.md
--- title: "千亿参数开源大模型 BLOOM 背后的技术" thumbnail: /blog/assets/86_bloom_megatron_deepspeed/thumbnail.png authors: - user: stas translators: - user: MatrixYao - user: inferjay proofreader: true --- # 千亿参数开源大模型 BLOOM 背后的技术 > 假设你现在有了数据,也搞到了预算,一切就绪,准备开始训练一个大模型,一显身手了,“一朝看尽长安花”似乎近在眼前 …… 且慢!训练可不仅仅像这两个字的发音那么简单,看看 BLOOM 的训练或许对你有帮助。 近年来,语言模型越训越大已成为常态。大家通常会诟病这些大模型本身的信息未被公开以供研究,但很少关注大模型训练技术这种背后的知识。本文旨在以 1760 亿参数的语言模型 [BLOOM](https://hf.co/bigscience/bloom) 为例,阐明训练此类模型背后的软硬件工程和技术要点,以促进大家对大模型训练技术的讨论。 首先,我们要感谢促成或赞助我们这个小组最终完成了训练 1760 亿参数模型这一惊人壮举的公司、个人和团体。 然后,我们开始讨论硬件配置和主要技术组件。 ![BLOOM](../assets/86_bloom_megatron_deepspeed/bloom-banner.png) 以下是对本项目的简要总结: | | | | :----- | :------------- | | 硬件 | 384 张 80GB A100 GPU | | 软件 | Megatron-DeepSpeed | | 模型架构 | 基于 GPT3 | | 数据集 | 含 59 种语言,共 3500 亿词元 | | 训练时长 | 3.5 个月 | ## 人员组成 该项目由 Thomas Wolf (Hugging Face 联合创始人兼 CSO) 发想,他敢于与大公司竞争,提出不仅要训练出立于世界上最大的多语言模型之林的模型,还要让所有人都可以公开访问训练结果,圆了大多数人的梦想 本文主要关注模型训练的工程方面。BLOOM 背后的技术中最重要的部分是分享专业知识并帮助我们进行编码和训练的人员和公司。 我们主要需要感谢 6 个群体: 1. HuggingFace 的 BigScience 团队投入了六名以上的全职员工全程参与了训练的研究和运行,他们还提供或报销了 Jean Zay 计算机之外的所有基础设施。 2. Microsoft DeepSpeed 团队,开发了 DeepSpeed,后来将其与 Megatron-LM 集成,其开发人员花费数周时间研究项目需求,并在训练前和训练期间提供了许多很棒的实用经验建议。 3. NVIDIA Megatron-LM 团队开发了 Megatron-LM,他们非常乐于回答我们的大量问题并提供一流的使用建议。 4. IDRIS / GENCI 团队管理着 Jean Zay 超级计算机,他们为该项目捐赠了大量的算力和强大的系统管理支持。 5. PyTorch 团队创建了一个超强的框架,其余软件都基于该框架,并且在准备训练期间非常支持我们,修复了多个 bug 并提高了我们所依赖的 PyTorch 组件的训练可用性。 6. BigScience 工程工作组志愿者 很难说出所有为该项目的工程方面做出贡献的杰出人物的名字,所以我只列举 Hugging Face 之外的几个关键人物,他们在过去 14 个月中为该项目奠定了工程基础: Olatunji Ruwase、Deepak Narayanan、Jeff Rasley、Jared Casper、Samyam Rajbhandari 和 Rémi Lacroix 我们也感谢所有允许其员工为该项目做出贡献的公司。 ## 概述 BLOOM 的模型架构与 [GPT3](https://en.wikipedia.org/wiki/GPT-3) 非常相似,只是增加了一些改进,本文稍后将对此进行讨论。 该模型是在 [Jean Zay](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html) 上训练的,Jean Zay 是由 GENCI 管理的法国政府资助的超级计算机,安装在法国国家科学研究中心 (CNRS) 的国家计算中心 [IDRIS](http://www.idris.fr/)。训练所需的算力由 GENCI 慷慨捐赠给本项目 (捐赠号 2021-A0101012475)。 训练硬件: - GPU: 384 张 NVIDIA A100 80GB GPU (48 个节点) + 32 张备用 GPU - 每个节点 8 张 GPU,4 条 NVLink 卡间互联,4 条 OmniPath 链路 - CPU: AMD EPYC 7543 32 核处理器 - CPU 内存: 每个节点 512GB - GPU 显存: 每个节点 640GB - 节点间连接: 使用 Omni-Path Architecture (OPA) 网卡,网络拓扑为无阻塞胖树 - NCCL - 通信网络: 一个完全专用的子网 - 磁盘 IO 网络: GPFS 与其他节点和用户共享 Checkpoints: - [主 checkpoints](https://huggingface.co/bigscience/bloom) - 每个 checkpoint 含精度为 fp32 的优化器状态和精度为 bf16+fp32 的权重,占用存储空间为 2.3TB。如只保存 bf16 的权重,则仅占用 329GB 的存储空间。 数据集: - 41.5TB 经过大量去重和清洗的文本,包含 46 种语言,最终转换为 350B 个词元 - 模型的词汇表含 250,680 个词元 - 更详细信息,请参阅 [The BigScience Corpus A 1.6TB Composite Multilingual Dataset](https://openreview.net/forum?id=UoEw6KigkUn) 176B BLOOM 模型的训练于 2022 年 3 月至 7 月期间,耗时约 3.5 个月完成 (约 100 万计算时)。 ## Megatron-DeepSpeed 176B BLOOM 模型使用 [Megatron-DeepSpeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) 进行训练,它结合了两种主要技术: * [DeepSpeed](https://github.com/microsoft/DeepSpeed) 是一个深度学习优化库,让分布式训练变得简单、高效且有效。 * [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) 是由 NVIDIA 的应用深度学习研究团队开发的大型、强大的 transformer 模型框架。 DeepSpeed 团队通过将 DeepSpeed 库中的 ZeRO 分片和流水线并行 (Pipeline Parallelism) 与 Megatron-LM 中的张量并行 (Tensor Parallelism) 相结合,开发了一种基于 3D 并行的方案。有关每个组件的更多详细信息,请参见下表。 请注意,BigScience 的 [Megatron-DeepSpeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) 是基于原始 [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed) 代码库,我们还在其上添加了不少代码。 下表列出了我们在训练 BLOOM 时各采用了两个框架的哪些组件 | 组件 | DeepSpeed | Megatron-LM | | :---- | :---- | :---- | | [ZeRO 数据并行](#zero-data-parallelism) | 是 | | | [张量并行](#tensor-parallelism) | | 是 | | [流水线并行](#pipeline-parallelism) | 是 | | | [BF16 优化器](#bf16optimizer) | 是 | | | [CUDA 融合核函数](#fused-cuda-kernels) | | 是 | | [DataLoader](#datasets) | | 是 | 请注意,Megatron-LM 和 DeepSpeed 都有流水线并行和 BF16 优化器实现,但我们使用 DeepSpeed 的实现,因为它们集成进了 ZeRO。 Megatron-DeepSpeed 实现了 3D 并行以允许大模型以非常有效的方式进行训练。我们简要讨论一下有哪些 3D 组件。 1. 数据并行 (Data Parallelism,DP) - 相同的设置和模型被复制多份,每份每次都被馈送不同的一份数据。处理是并行完成的,所有份在每个训练步结束时同步。 2. 张量并行 (Tensor Parallelism,TP) - 每个张量都被分成多个块,因此张量的每个分片都位于其指定的 GPU 上,而不是让整个张量驻留在单个 GPU 上。在处理过程中,每个分片在不同的 GPU 上分别并行处理,结果在步骤结束时同步。这就是所谓的水平并行,因为是做的水平拆分。 3. 流水线并行 (Pipeline Parallelism,PP) - 模型在多个 GPU 上垂直 (即按层) 拆分,因此只有一个或多个模型层放置在单个 GPU 上。每个 GPU 并行处理流水线的不同阶段,并处理 batch 的一部分数据。 4. 零冗余优化器 (Zero Redundancy Optimizer,ZeRO) - 也执行与 TP 相类似的张量分片,但整个张量会及时重建以进行前向或反向计算,因此不需要修改模型。它还支持各种卸载技术以补偿有限的 GPU 内存。 ## 数据并行 大多数只有几张 GPU 的用户可能比较熟悉 `DistributedDataParallel`(DDP),这是相应的 [PyTorch 文档](https://pytorch.org/docs/master/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel)。在该方法中,模型被完全复制到每个 GPU,然后在每次迭代后所有模型相互同步各自的状态。这种方法可以通过投入更多 GPU 资源的方式加快训练速度,解决问题。但它有个限制,即只有当模型能够放进单个 GPU 时才有效。 ### ZeRO 数据并行 下图很好地描述了 ZeRO 数据并行 (来自这篇 [博文](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/))。 ![DeepSpeed-Image-1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) 看上去比较高大上,可能让你很难专心去理解,但实际上,这个概念非常简单。这只是通常的 DDP,只是没有每个 GPU 都复制完整的模型参数、梯度和优化器状态,而是每个 GPU 只存储其中的一部分。在随后的运行过程中,当需要给定层的完整层参数时,所有 GPU 同步以相互提供它们缺失的部分 —— 仅此而已。 该组件由 DeepSpeed 实现。 ## 张量并行 在张量并行 (TP) 中,每个 GPU 仅处理张量的一部分,并且仅当某些算子需要完整的张量时才触发聚合操作。 在本节中,我们使用 [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) 论文: [Efficient Large-Scale Language Model Training on GPU Clusters](https://arxiv.org/abs/2104.04473)。 Transformer 类模型的主要模块为: 一个全连接层 `nn.Linear`,后面跟一个非线性激活层 `GeLU`。 沿用 Megatron 论文的符号,我们可以将其点积部分写为 `Y = GeLU (XA)`,其中 `X` 和 `Y` 是输入和输出向量, `A` 是权重矩阵。 如果以矩阵形式表示的话,很容易看出矩阵乘法可以如何在多个 GPU 之间拆分: ![并行 GEMM](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_gemm.png) 如果我们将权重矩阵 `A` 按列拆分到 `N` 个 GPU 上,然后并行执行矩阵乘法 `XA_1` 到 `XA_n`,那么我们最终将得到 `N` 个输出向量 `Y_1、Y_2、…… 、 Y_n` ,它们可以独立输入 `GeLU`: ![independent GeLU](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-independent-gelu.png) 注意因为 `Y` 矩阵是按列拆分的,因此随后的 GEMM 我们可以选择按行拆分方案,这样它就可以直接获取前面层的 GeLU 的输出,而无需任何额外的通信。 使用该原理,我们可以更新任意深度的 MLP,只需在每个 `拆列 - 拆行` 序列之后同步 GPU。Megatron-LM 论文作者为此提供了一个不错的图示: ![并行分片处理](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_shard_processing.png) 这里 `f` 是前向传播中的恒等运算符,后向传播中的 all reduce,而 `g` 是前向传播中的 all reduce 和后向传播中的恒等式。 并行化多头注意力层甚至更简单,因为它们本来就是并行的,因为有多个独立的头! ![并行自注意力](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_self_attention.png) 需要特别考虑的是: 由于前向和后向传播中每层都有两个 all reduce,因此 TP 需要设备间有非常快速的互联。因此,除非你有一个非常快的网络,否则不建议跨多个节点进行 TP。我们训练 BLOOM 的硬件配置中,节点间的速度比 PCIe 慢很多。实际上,如果节点有 4 个 GPU,则最高 TP 度设为 4 比较好。如果需要 TP 度为 8,则需要使用至少有 8 个 GPU 的节点。 该组件由 Megatron-LM 实现。Megatron-LM 最近扩展了张量并行能力,新增了序列并行的能力,用于难以使用前述切分算法的算子,如 LayerNorm。[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/abs/2205.05198) 论文提供了此技术的详细信息。序列并行是在训练 BLOOM 之后开发的,所以 BLOOM 训练时并未采用此技术。 ## 流水线并行 朴素流水线并行 (naive PP) 是将模型各层分组分布在多个 GPU 上,并简单地将数据从 GPU 移动到 GPU,就好像它是一个大型复合 GPU 一样。该机制相对简单 - 将所需层用 `.to()` 方法绑到相应设备,现在只要数据进出这些层,这些层就会将数据切换到与该层相同的设备,其余部分保持不变。 这其实就是垂直模型并行,因为如果你还记得我们是怎么画大多数模型的拓扑图的,我们其实是垂直切分模型各层的。例如,如果下图显示一个 8 层模型: ``` =================== =================== | 0 | 1 | 2 | 3 | | 4 | 5 | 6 | 7 | =================== =================== GPU0 GPU1 ``` 我们将它垂直切成 2 部分,将层 0-3 放置在 GPU0 上,将层 4-7 放置在 GPU1 上。 现在,当数据从第 0 层传到第 1 层、第 1 层传到第 2 层以及第 2 层传到第 3 层时,这就跟单 GPU 上的普通前向传播一样。但是当数据需要从第 3 层传到第 4 层时,它需要从 GPU0 传输到 GPU1,这会引入通信开销。如果参与的 GPU 位于同一计算节点 (例如同一台物理机器) 上,则传输非常快,但如果 GPU 位于不同的计算节点 (例如多台机器) 上,通信开销可能会大得多。 然后第 4 到 5 到 6 到 7 层又像普通模型一样,当第 7 层完成时,我们通常需要将数据发送回标签所在的第 0 层 (或者将标签发送到最后一层)。现在可以计算损失,然后使用优化器来进行更新参数了。 问题: - 该方法为什么被称为 朴素 流水线并行呢,它又有什么缺陷呢?主要是因为该方案在任意给定时刻除了一个 GPU 之外的其他所有 GPU 都是空闲的。因此,如果使用 4 个 GPU,则几乎等同于将单个 GPU 的内存量翻两番,而其他资源 (如计算) 相当于没用上。另外还需要加上在设备之间复制数据的开销。所以 4 张 使用朴素流水线并行的 6GB 卡将能够容纳与 1 张 24GB 卡相同大小的模型,而后者训练得更快,因为它没有数据传输开销。但是,比如说,如果你有 40GB 卡,但需要跑 45GB 模型,你可以使用 4x 40GB 卡 (也就刚刚够用,因为还有梯度和优化器状态需要显存)。 - 共享嵌入可能需要在 GPU 之间来回复制。我们使用的流水线并行 (PP) 与上述朴素 PP 几乎相同,但它解决了 GPU 闲置问题,方法是将传入的 batch 分块为 micros batch 并人工创建流水线,从而允许不同的 GPU 同时参与计算过程。 下图来自于 [GPipe 论文](https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html),其上半部分表示朴素 PP 方案,下半部分是 PP 方法: ![mp-pp](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-gpipe-bubble.png) 从图的下半部分很容易看出 PP 的死区 (指 GPU 处于空闲状态) 更少,即 “气泡” 更少。 图上两种方案的并行度均为 4 ,即由 4 张 GPU 组成流水线。于是就有了 F0、F1、F2、F3 这 4 个管级的前向路径,然后是 B3、B2、B1、B0 的逆序后向路径。 PP 引入了一个新的超参数来调整,称为 `块 (chunks)`。它定义了通过同一管级按顺序发送多少数据块。例如,在图的下半部分,你可以看到 `chunks = 4`。GPU0 在 chunk 0、1、2 和 3 (F0,0、F0,1、F0,2、F0,3) 上执行相同的前向路径,然后等待,等其他 GPU 完成工作后,GPU0 会再次开始工作,为块 3、2、1 和 0 (B0,3、B0,2、B0,1、B0,0) 执行后向路径。 请注意,从概念上讲,这与梯度累积 (gradient accumulation steps,GAS) 的意思相同。PyTorch 叫它 `块`,而 DeepSpeed 叫它 `GAS`。 因为 `块`,PP 引入了 micro-batches (MBS) 的概念。DP 将全局 batch size 拆分为小 batch size,因此如果 DP 度为 4,则全局 batch size 1024 将拆分为 4 个小 batch size,每个小 batch size 为 256 (1024/4)。而如果 `块` (或 GAS) 的数量为 32,我们最终得到的 micro batch size 为 8 (256/32)。每个管级一次处理一个 micro batch。 计算 DP + PP 设置的全局批量大小的公式为: `mbs*chunks*dp_degree` (`8*32*4=1024`). 我们回过头再看一下图。 使用 `chunks=1` 你最终得到的是朴素 PP,这是非常低效的。而使用非常大的 `块` 数,你最终会得到很小的微批量大小,这很可能也不是很有效。因此,必须通过实验来找到能最有效地利用 GPU 的 `块` 数。 该图显示存在无法并行化的 “死” 时间气泡,因为最后一个 `forward` 阶段必须等待 `backward` 完成流水。那么,找到最佳的 `块` 数,从而使所有参与的 GPU 达到高的并发利用率,这一问题其实就转化为最小化气泡数了。 这种调度机制被称为 `全前全后`。其他一些可选方案有 [一前一后](https://www.microsoft.com/en-us/research/publication/pipedream-generalized-pipeline-parallelism-for-dnn-training/) 和 [交错一前一后](https://arxiv.org/abs/2104.04473)。 虽然 Megatron-LM 和 DeepSpeed 都有自己的 PP 协议实现,但 Megatron-DeepSpeed 使用的是 DeepSpeed 实现,因为它与 DeepSpeed 的其他功能集成在一起。 这里的另一个重要问题是词嵌入矩阵的大小。虽然通常词嵌入矩阵比 transfomer 块所需的内存更少,但在 BLOOM 有 250k 词汇表的情况下,嵌入层需要 7.2GB 的 bf16 权重,而变换器块仅为 4.9GB。因此,我们不得不让 Megatron-Deepspeed 将嵌入层视为一个转换器块。所以我们有一个 72 级的流水线,其中 2 个是专门用于嵌入的 (第一个和最后一个)。这使得我们可以平衡 GPU 的内存消耗。如果我们不这样做,我们就会让第一级和最后一级消耗很大的 GPU 内存,而 95% 的 GPU 内存使用会很少,因此训练将很不高效。 ## DP+PP DeepSpeed 流水线 [并行教程](https://www.deepspeed.ai/tutorials/pipeline/) 中有一张图演示了如何将 DP 与 PP 结合起来,如下所示。 ![dp-pp-2d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero-dp-pp.png) 这里重要的是要了解 DP rank 0 是看不见 GPU2 的, DP rank 1 是看不到 GPU3 的。对于 DP 而言,只有 GPU 0 和 1,并向它们馈送数据。GPU0 使用 PP “秘密地” 将它的一些负载卸载到 GPU2。同样地, GPU1 也会得到 GPU3 的帮助。 由于每个维度至少需要 2 个 GPU,因此这儿至少需要 4 个 GPU。 ## DP+PP+TP 为了更高效地训练,可以将 PP、TP 和 DP 相结合,称为 3D 并行,如下图所示。 ![dp-pp-tp-3d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-deepspeed-3d.png) 此图来自博文《[3D 并行: 扩展到万亿参数模型](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/)》), 这也是一篇好文章。 由于每个维度至少需要 2 个 GPU,因此在这里你至少需要 8 个 GPU 才能实现完整的 3D 并行。 ## ZeRO DP+PP+TP DeepSpeed 的主要功能之一是 ZeRO,它是 DP 的超级可伸缩增强版,我们在 [ZeRO 数据并行](#ZeRO-数据并行) 一节中已经讨论过了。通常它是一个独立的功能,不需要 PP 或 TP。但它也可以与 PP、TP 结合使用。 当 ZeRO-DP 与 PP (以及 TP) 结合时,它通常只启用 ZeRO 阶段 1,它只对优化器状态进行分片。ZeRO 阶段 2 还会对梯度进行分片,阶段 3 也对模型权重进行分片。 虽然理论上可以将 ZeRO 阶段 2 与 流水线并行 一起使用,但它会对性能产生不良影响。每个 micro batch 都需要一个额外的 reduce-scatter 通信来在分片之前聚合梯度,这会增加潜在的显著通信开销。根据流水线并行的性质,我们会使用小的 micro batch ,并把重点放在算术强度 (micro batch size) 与最小化流水线气泡 (micro batch 的数量) 两者间折衷。因此,增加的通信开销会损害流水线并行。 此外,由于 PP,层数已经比正常情况下少,因此并不会节省很多内存。PP 已经将梯度大小减少了 `1/PP`,因此在此基础之上的梯度分片和纯 DP 相比节省不了多少内存。 ZeRO 阶段 3 也可用于训练这种规模的模型,但是,它需要的通信量比 DeepSpeed 3D 并行更多。一年前,在对我们的环境进行仔细评估后,我们发现 Megatron-DeepSpeed 3D 并行性表现最佳。此后,ZeRO 阶段 3 的性能有了显著提高,如果我们今天要对其进行重新评估,也许我们会选择阶段 3。 ## BF16Optimizer 用 FP16 训练巨型 LLM 模型是一个禁忌。 我们已经通过花费几个月的时间 [训练 104B 模型](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr8-104B-wide) 自证了这一点,你可以从 [Tensorboard](https://huggingface.co/bigscience/tr8-104B-logs/tensorboard) 发现,彻头彻尾地失败了。在与不断发散的 lm-loss 作斗争的过程中,我们学到了很多: ![104B-fail](https://huggingface.co/blog/assets/86_bloom_megatron_deepspeed/104b-lm-loss.png) 我们也从 Megatron-LM 和 DeepSpeed 团队那里得到了相同的建议,在他们训得 [530B 模型](https://arxiv.org/abs/2201.11990) 后。最近发布的 [OPT-175B](https://arxiv.org/abs/2205.01068) 也报告说他们在 FP16 上训练得非常艰难。 所以早在一月份,我们就知道我们要在支持 BF16 格式的 A100 上进行训练。Olatunji Ruwase 开发了一个用来训练 BLOOM 的 `BF16Optimizer`。 如果您不熟悉这种数据格式,请查看它的 [位布局](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format#bfloat16_floating-point_format)。BF16 格式的关键是它的指数位数与 FP32 相同,因此不会溢出,但 FP16 经常溢出!FP16 的最大数值范围为 64k,您只能进行较小数的乘法。例如你可以做 `250*250=62500`,但如果你尝试 `255*255=65025`,你就会溢出,这是导致训练出现问题的主要原因。这意味着你的权重必须保持很小。一种称为损失缩放 (loss scaling) 的技术有助于缓解这个问题,但是当模型变得非常大时,FP16 较小的数值范围仍然是一个问题。 BF16 没有这个问题,你可以很容易地做 `10_000*10_000=100_000_000`, 完全没问题。 当然,由于 BF16 和 FP16 的大小相同,均为 2 个字节,因此,没有免费的午餐,当使用 BF16 时,代价就是它的精度非常差。然而,你应该还记得我们在训练时采用的随机梯度下降法及其变体,该方法有点像蹒跚而行,如果你这步没有找到完美的方向其实没关系,你会在接下来的步骤中纠正自己。 无论使用 BF16 还是 FP16,都有一个权重副本始终在 FP32 中 —— 这是由优化器更新的内容。因此 16 位格式仅用于计算,优化器以全精度更新 FP32 权重,然后将它们转换为 16 位格式以用于下一次迭代。 所有 PyTorch 组件都已更新,以确保它们在 FP32 中执行任何累加,因此不会发生精度损失。 一个关键问题是梯度累积,它是流水线并行的主要特征之一,因为每个 micro batch 处理的梯度都会累积。在 FP32 中实现梯度累积以保证训练的精确性至关重要,这正是 `BF16Optimizer` 所做的。 除了其他改进之外,我们认为使用 BF16 混合精度训练将潜在的噩梦变成了一个相对平稳的过程,这可以从以下 lm 损失图中看出: ![176B - 损失](https://huggingface.co/blog/assets/86_bloom_megatron_deepspeed/176b-lm-loss.png) ## CUDA 融合核函数 GPU 主要做两件事。它可以将数据写到显存或从显存读数据,并对这些数据执行计算。当 GPU 忙于读写数据时, GPU 的计算单元就会空闲。如果我们想有效地利用 GPU,我们希望将空闲时间降至最低。 核函数是一组实现特定 PyTorch 操作的指令。例如,当你调用 `torch.add` 时,它会通过一个 [PyTorch 调度器](http://blog.ezyang.com/2020/09/lets-talk-about-the-pytorch-dispatcher/),它会根据输入张量及其他变量的取值来决定它应该运行哪些代码,最后运行它。CUDA 核函数使用 CUDA 来实现这些代码,因此只能在 NVIDIA GPU 上运行。 现在,当使用 GPU 计算 `c = torch.add (a, b); e = torch.max ([c,d])` 时,一般情况下,PyTorch 将执行的操作是启动两个单独的核函数,一个执行 `a` 和 `b` 的加法,另一个执行取 `c` 和 `d` 两者的最大值。在这种情况下,GPU 从其显存中获取 `a` 和 `b`,执行加法运算,然后将结果写回显存。然后它获取 `c` 和 `d` 并执行 max 操作,然后再次将结果写回显存。 如果我们要融合这两个操作,即将它们放入一个 “融合核函数” 中,然后启动那个内核,我们不会将中间结果 `c` 写到显存中,而是将其保留在 GPU 寄存器中,并且仅需要获取 `d` 来完成最后的计算。这节省了大量开销并防止 GPU 空闲,因此整个操作会更加高效。 融合核函数就是这样。它们主要将多个离散的计算和进出显存的数据移动替换为有很少数据移动的融合计算。此外,一些融合核函数会对操作进行数学变换,以便可以更快地执行某些计算组合。 为了快速高效地训练 BLOOM,有必要使用 Megatron-LM 提供的几个自定义 CUDA 融合核函数。特别地,有一个 LayerNorm 的融合核函数以及用于融合缩放、掩码和 softmax 这些操作的各种组合的核函数。Bias Add 也通过 PyTorch 的 JIT 功能与 GeLU 融合。这些操作都是瓶颈在内存的,因此将它们融合在一起以达到最大化每次显存读取后的计算量非常重要。因此,例如,在执行瓶颈在内存的 GeLU 操作时同时执行 Bias Add,运行时间并不会增加。这些核函数都可以在 [Megatron-LM repository](https://github.com/NVIDIA/Megatron-LM) 代码库 中找到。 ## 数据集 Megatron-LM 的另一个重要特性是高效的数据加载器。在首次训练启动前,每个数据集中的每个样本都被分成固定序列长度 (BLOOM 为 2048) 的样本,并创建索引以对每个样本进行编号。基于训练超参,我们会确定每个数据集所需要参与的 epoch 数,并基于此创建一个有序的样本索引列表,然后打乱它。举个例子,如果一个数据集中有 10 个样本并应参与 2 个 epoch 的训练,则系统首先按 `[0, ..., 9, 0, ..., 9]` 顺序排好样本索引,然后打乱该顺序为数据集创建最终的全局顺序。请注意,这意味着训练不会简单地遍历整个数据集然后重复,你有可能在看到另一个样本之前看到同一个样本两次,但在训练结束时模型将只看到每个样本两次。这有助于确保整个训练过程中的训练曲线平滑。这些索引,包括每个样本在原始数据集中的偏移量,被保存到一个文件中,以避免每次开始训练时都重新计算它们。最后,可以将其中几个数据集以不同的权重混合到训练最终使用的数据中。 ## 嵌入 LayerNorm 在我们努力阻止 104B 模型发散的过程中,我们发现在第一个层词嵌入层之后添加一个额外的 LayerNorm 可以使训练更加稳定。 该洞察来自对 bitsandbytes 的实验,[bitsandbytes](https://github.com/facebookresearch/bitsandbytes) 有一个 `StableEmbedding` 操作,它是一个带有 LayerNorm 的普通嵌入,其使用均匀 xavier 函数来初始化。 ## 位置编码 基于论文 [Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation](https://arxiv.org/abs/2108.12409),我们还用 AliBi 替换了普通的位置嵌入,它允许外推比训练模型的输入序列更长的输入序列。因此,即使我们训练时使用长度为 2048 的序列,模型也可以在推理过程中处理更长的序列。 ## 训练中的困难 随着架构、硬件和软件的就位,我们得以在 2022 年 3 月上旬开始训练。然而,从那时起,事情其实并非一帆风顺。在本节中,我们将讨论我们遇到的一些主要障碍。 在训练开始之前,有很多问题需要弄清楚。特别是,我们发现了几个问题,这些问题只有在我们开始在 48 个节点上进行训练后才会出现,而不会在小规模时出现。例如,需要设 `CUDA_LAUNCH_BLOCKING=1` 来防止框架挂起,我们需要将优化器组分成更小的组,否则框架会再次挂起。你可以在 [训前编年史](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles-prequel.md) 中详细了解这些内容。 训练期间遇到的主要问题类型是硬件故障。由于这是一个拥有大约 400 个 GPU 的新集群,平均每周我们会遇到 1-2 个 GPU 故障。我们每 3 小时 (100 次迭代) 保存一个检查点。因此,我们每周因硬件崩溃平均损失 1.5 小时的训练成果。Jean Zay 系统管理员随后将更换有故障的 GPU 并恢复节点。与此同时,我们有备用节点可供使用。 我们还遇到过多次导致 5-10 小时停机的各种其他问题,其中一些与 PyTorch 中的死锁错误有关,另一些则是由于磁盘空间不足。如果您对具体细节有兴趣,请参阅 [训练编年史](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md)。 在对训练这个模型进行可行性分析时,所有这些停机时间都被计划在内了,我们也据此选择了合适的模型大小和我们希望模型消耗的数据量。因此,即使存在这些停机问题,我们还是成功地在预计时间内完成了训练。如前所述,它需要大约 100 万个计算时才能完成。 另一个问题是 SLURM 并非设计为供一组人使用。SLURM 作业由单个用户拥有,如果他们不在身边,则该组的其他成员无法对正在运行的作业执行任何操作。我们制定了一个终止方案,允许组中的其他用户终止当前进程,而不需要启动该进程的用户在场。这在 90% 的问题上都很有效。如果 SLURM 设计者读到这篇文章,请添加一个 Unix 组的概念,这样一个 SLURM 作业就可以由一个组拥有。 由于训练是全天候 24/7 进行的,我们需要有人随叫随到 - 但由于我们在欧洲和加拿大西海岸都有人,因此不需要有人携带传呼机,我们能很好地互相备份。当然,周末的训练也得有人看着。我们自动化了大部分事情,包括自动从硬件崩溃中恢复,但有时仍需要人工干预。 ## 结论 训练中最困难和最紧张的部分是训练开始前的 2 个月。我们承受着尽快开始训练的巨大压力,因为资源分配的时间有限,我们直到最后一刻才接触到 A100。所以这是一个非常困难的时期,考虑到 `BF16Optimizer` 是在最后一刻编写出来的,我们需要调试它并修复各种 bug。正如上一节所述,我们发现了新问题,这些问题只有在我们开始在 48 个节点上进行训练后才会出现,并且不会在小规模时出现。 但是一旦我们把这些整理完,训练本身出奇的顺利,没有出现大的问题。大多数时候,我们只有一个人看着,只有少数几个人参与故障排除。我们得到了 Jean Zay 管理部门的大力支持,他们迅速解决了训练期间出现的大部分需求。 总的来说,这是一次超级紧张但回报颇丰的经历。 训练大型语言模型仍然是一项具有挑战性的任务,但我们希望通过公开构建和共享这项技术,其他人可以借鉴我们的经验。 ## 资源 ### 重要链接 - [主训练文档](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/README.md) - [Tensorboard](https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard) - [训练用的 slurm 脚本](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/tr11-176B-ml.slurm) - [训练记录](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md) ### 论文与文章 我们不可能在本文中详细解释所有内容,因此如果此处介绍的技术激起你的好奇心,使你想了解更多信息,请阅读以下论文: Megatron-LM: - [Efficient Large-Scale Language Model Training on GPU Clusters](https://arxiv.org/abs/2104.04473). - [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/abs/2205.05198) DeepSpeed: - [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) - [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) - [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) - [DeepSpeed: Extreme-scale model training for everyone](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/) Megatron-LM 和 Deepspeeed 联合: - [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model](https://arxiv.org/abs/2201.11990). ALiBi: - [Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation](https://arxiv.org/abs/2108.12409) - [What Language Model to Train if You Have One Million GPU Hours?](https://openreview.net/forum?id=rI7BL3fHIZq) - 你会在那里找到最终使得我们选择 ALiBi 的实验。 BitsNBytes: - [8-bit Optimizers via Block-wise Quantization](https://arxiv.org/abs/2110.02861) (我们使用了该论文中的嵌入 LaynerNorm,但是论文的其他部分及其技术也很妙,我们没用 8 位优化器的唯一原因是我们已经使用 DeepSpeed-ZeRO 节省了优化器内存)。 ## 博文致谢 非常感谢以下这些人,他们提出了很好的问题并帮助提高了文章的可读性 (按字母序): * Britney Muller, * Douwe Kiela, * Jared Casper, * Jeff Rasley, * Julien Launay, * Leandro von Werra, * Omar Sanseviero, * Stefan Schweter and * Thomas Wang. 本文图表主要由 Chunte Lee 创作。
1
0
hf_public_repos/blog
hf_public_repos/blog/zh/4bit-transformers-bitsandbytes.md
--- title: "用 bitsandbytes、4 比特量化和 QLoRA 打造亲民的 LLM" thumbnail: /blog/assets/96_hf_bitsandbytes_integration/Thumbnail_blue.png authors: - user: ybelkada - user: timdettmers guest: true - user: artidoro guest: true - user: sgugger - user: smangrul translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 用 bitsandbytes、4 比特量化和 QLoRA 打造亲民的 LLM 众所周知,LLM 规模庞大,如果在也能消费类硬件中运行或训练它们将是其亲民化的巨大进步。我们之前撰写的 [LLM.int8 博文](https://huggingface.co/blog/zh/hf-bitsandbytes-integration) 展示了我们是如何将 [LLM.int8 论文](https://arxiv.org/abs/2208.07339) 中的技术通过 `bitsandbytes` 库集成到 `transformers` 中的。在此基础上,我们不断努力以不断降低大模型的准入门槛。在此过程中,我们决定再次与 `bitsandbytes` 联手,支持用户以 4 比特精度运行任何模态 (文本、视觉、多模态等) 上的绝大多数 HF 模型。用户还可以利用 Hugging Face 生态系统中的工具在 4 比特模型之上训练适配器。这一工作基于 Dettmers 等人最近在 QLoRA 这篇论文中介绍的一种新方法,其论文摘要如下: > 我们提出了 QLoRA,这是一种高效的微调方法,可减少内存使用量,使得在单个 48GB GPU 上就可以微调 65B 的模型,而且所得模型的性能与全 16 比特微调相当。QLoRA 通过冻结 4 比特量化的预训练语言模型将梯度反向传播到低秩适配器 (LoRA) 中。我们最好的模型 (我们将其命名为 Guanaco) 仅需在单个 GPU 上进行 24 小时微调,就能在 Vicuna 基准测试中优于所有之前公开发布的模型,且达到了 ChatGPT 性能水平的 99.3%。QLoRA 引入了多项创新技术,在不牺牲性能的情况下节省内存:(a) 4 位 NormalFloat (NF4),一种新的数据类型,在信息论意义上是正态分布权重的最佳表示 (b) 双量化,通过对量化系数进行二次量化来减少平均内存占用,以及 (c) 用于降低峰值内存占用的分页优化器。我们使用 QLoRA 微调了 1000 多个模型,并给出了它们在指令依从、聊天等任务上的详细性能分析,其中涵盖了 8 个指令数据集、多种模型架构 (LLaMA、T5),还包括了无法用常规方法微调的大模型 (例如 33B 和 65B 模型)。结果表明,在小型高质量数据集的进行 QLoRA 微调能带来最先进的性能,且所需的模型尺寸更小。我们使用人类和 GPT-4 对聊天机器人的性能进行了详细评估分析,结果表明 GPT-4 评估是替代人类评估的廉价且合理的方案。此外,我们发现当前的聊天机器人基准测试在准确评估聊天机器人的性能水平这一方面并不十分可信。我们还挑选了一些样本,对 Guanaco 比 ChatGPT 做得不好的案例进行了分析。我们发布了所有模型和代码,包括用于 4 比特训练的 CUDA 核函数。 ## 资源 下面是一些 4 比特模型和 QLoRA 的入门资源: - [原始论文](https://arxiv.org/abs/2305.14314) - [有关 bitsandbytes 基础用法的 Google Colab 笔记本](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf?usp=sharing) - 该笔记本展示了如何对 4 比特模型进行推理,以及如何在免费的 Google Colab 实例上运行 GPT-neo-X 模型 (20B) 🤯。 - [微调的 Google Colab 笔记本](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) - 该笔记本展示了如何使用 Hugging Face 生态系统在下游任务上微调 4 比特模型。我们证明了可以在 Google Colab 实例上微调 GPT-neo-X 20B! - [用于复现论文结果的原始代码库](https://github.com/artidoro/qlora) - [Guanaco 33B 的演示空间](https://huggingface.co/spaces/uwnlp/guanaco-playground-tgi) - 下文中也包含了这个演示空间。 ## 引言 如果你对模型精度及一些常见的数据类型 (float16、float32、bfloat16、int8) 尚不熟悉,建议你仔细阅读 [我们的第一篇博文](https://huggingface.co/blog/zh/hf-bitsandbytes-integration),这篇博文图文并茂地详细介绍了相关概念的细节。 如需更多信息,建议查阅 [这篇 wikibook 文档](https://en.wikibooks.org/wiki/A-level_Computing/AQA/Paper_2/Fundamentals_of_data_representation/Floating_point_numbers#:~:text=In%20decimal%2C%20very%20large%20numbers,be%20used%20for%20binary%20numbers.) 以了解浮点表示的基础知识。 QLoRA 论文中探讨了两种不同的数据类型: 4 比特 Float 和 4 比特 NormalFloat。这里我们将讨论 4 比特 Float 数据类型,因为它更容易理解。 FP8 和 FP4 分别代表浮点 8 比特和 4 比特精度。它们属于 minifloats 浮点值系列 (minifloats 系列还包括其他精度,如 bfloat16 和 float16)。 我们先看一下如何用 FP8 格式表示浮点值,然后了解 FP4 格式是什么样子的。 ### FP8 格式 正如之前的博文中所讨论的,n 比特的浮点数中每个比特都属于一个特定类别,负责表示数字的各个组成部分 (符号、尾数和指数)。 [FP8 for Deep Learning](https://arxiv.org/pdf/2209.05433.pdf) 这篇论文首次引入了 FP8 (浮点 8) 格式,其有两种不同的编码方式: E4M3 (4 位指数,3 位尾数) 和 E5M2 (5 位指数,2 位尾数)。 | ![fp8 编码方案](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/bitsandbytes/FP8-scheme.png) | |:--:| | <b>FP8 格式概览。图源: 内容来自于 [`sgugger`](https://huggingface.co/sgugger)</b>| 尽管随着比特数从 32 减少到 8,精度大大降低了,但这两种 8 比特编码仍有很多用武之地。目前,我们可以通过 [Transformer Engine 库](https://github.com/NVIDIA/TransformerEngine) 来使用它们,HF 生态系统的 accelerate 库也集成了 Transformer Engine 库。 E4M3 格式可以表示的浮点数范围为 -448 到 448。而 E5M2 格式因为增加了指数位数,其表示范围扩大为 -57344 到 57344 - 但其相对 E4M3 而言精度会有损失,因为两者可表示的数的个数保持不变。经验证明,E4M3 最适合前向计算,E5M2 最适合后向计算。 ### FP4 精度简述 符号位表示符号 (+/-),指数位转译成该部分所表示的整数的 2 次方 (例如 `2^{010} = 2^{2} = 4` )。分数或尾数位表示成 -2 的幂的总和,如果第 i 位为 `1` ,则和加上 `2^-i` ,否则保持不变,这里 i 是该位在比特序列中的位置。例如,对于尾数 1010,我们有 `(2^-1 + 0 + 2^-3 + 0) = (0.5 + 0.125) = 0.625` ,然后,我们给分数加上一个 _1_ ,得到 `1.625` 。最后,再将所有结果相乘。举个例子,使用 2 个指数位和 1 个尾数位,编码 1101 对应的数值为: `-1 * 2^(2)*(1 + 2^-1) = -1 * 4 * 1.5 = -6` FP4 没有固定的格式,因此可以尝试不同尾数/指数的组合。一般来说,在大多数情况下,3 个指数位的效果会更好一些。但某些情况下,2 个指数位加上 1 个尾数位性能会更好。 ## QLoRA,经由量化实现大模型自由的新途径 简而言之,与标准 16 比特模型微调相比,QLoRA 在不牺牲性能的前提下减少了 LLM 微调的内存使用量。使用该方法,我们可在单个 24GB GPU 上微调 33B 模型,还可以在单个 46GB GPU 上微调 65B 模型。 更具体地说,QLoRA 使用 4 比特量化来压缩预训练的语言模型。然后冻结基础模型的参数,并将相对少量的可训练参数以低秩适配器的形式添加到模型中。在微调过程中,QLoRA 通过冻结的 4 比特量化预训练语言模型将梯度反向传播到低秩适配器中。LoRA 层的权重是训练期间唯一可更新的参数。你可阅读 [原始 LoRA 论文](https://arxiv.org/abs/2106.09685) 以了解更多有关 LoRA 的信息。 QLoRA 有一个用于存储基础模型权重的数据类型 (通常为 4 比特 NormalFloat) 和一个用于执行计算的数据类型 (16 比特 BrainFloat)。QLoRA 将权重从存储数据类型反量化为计算数据类型,以执行前向和后向传播,但仅计算 bfloat16 的 LoRA 参数的权重梯度。权重仅在需要时才解压缩,因此在训练和推理期间内存使用率都能保持较低水平。 广泛的实验表明 QLoRA 微调与 16 比特微调的性能旗鼓相当。此外,在 [OpenAssistant 数据集 (OASST1)](https://huggingface.co/datasets/OpenAssistant/oasst1) 上对 LLaMA 模型使用 QLoRA 微调而得的 Guanaco 模型是目前最先进的聊天机器人系统,其在 Vicuna 基准测试中表现接近 ChatGPT。这是 QLoRA 微调威力的进一步展示。 ## 如何在 transformers 中使用它? 在本节中,我们将介绍该方法在 `transformers` 中的集成、如何使用它以及目前支持的模型。 ### 入门 作为快速入门,我们可以从源代码安装 `accelerate` 和 `transformers` ,以加载 4 比特模型,另请确保已安装最新版本的 `bitsandbytes` 库 (0.39.0)。 ```bash pip install -q -U bitsandbytes pip install -q -U git+https://github.com/huggingface/transformers.git pip install -q -U git+https://github.com/huggingface/peft.git pip install -q -U git+https://github.com/huggingface/accelerate.git ``` ### 快速开始 以 4 比特加载模型的基本方法是通过在调用 `from_pretrained` 方法时传递参数 `load_in_4bit=True` ,并将设备映射设置成 `“auto”` 。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True, device_map="auto") ... ``` 这样就行了! 一般地,我们建议用户在使用 `device_map` 加载模型后不要手动设置设备。因此,在该行之后应避免对模型或模型的任何子模块进行任何设备分配 - 除非你知道自己在做什么。 请记住,加载量化模型会自动将模型的其他子模块转换为 `float16` 数据类型。你可以通过将 `torch_dtype=dtype` 传递给 `from_pretrained` 方法来修改此行为 (例如,如果你希望在层规一化算子中使用 `float32` )。 ### 高级用法 你可以使用 4 比特量化的不同变体,例如 NF4 (NormalFloat4 (默认) ) 或纯 FP4 量化。从理论分析和实证结果来看,我们建议使用 NF4 量化以获得更好的性能。 其他选项包括 `bnb_4bit_use_double_quant` ,它在第一轮量化之后会进行第二轮量化,为每个参数额外节省 0.4 比特。最后是计算类型,虽然 4 比特 bitsandbytes 以 4 比特存储权重,但计算仍然以 16 或 32 比特进行,这里可以选择任意组合 (float16、bfloat16、float32 等)。 如果使用 16 比特计算数据类型 (默认 torch.float32),矩阵乘法和训练将会更快。用户应该利用 transformers 中最新的 `BitsAndBytesConfig` 来更改这些参数。下面是使用 NF4 量化加载 4 比特模型的示例,例子中使用了双量化以及 bfloat16 计算数据类型以加速训练: ```python from transformers import BitsAndBytesConfig nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16 ) model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config) ``` #### 更改计算数据类型 如上所述,你还可以通过更改 `BitsAndBytesConfig` 中的 `bnb_4bit_compute_dtype` 参数来更改量化模型的计算数据类型。 ```python import torch from transformers import BitsAndBytesConfig quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16 ) ``` #### 嵌套量化 要启用嵌套量化,你可以使用 `BitsAndBytesConfig` 中的 `bnb_4bit_use_double_quant` 参数。这将会在第一轮量化之后启用第二轮量化,以便每个参数额外节省 0.4 比特。我们在上文提及的微调 Google Colab 笔记本中也使用了此功能。 ```python from transformers import BitsAndBytesConfig double_quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config) ``` 当然,正如本节开头提到的,所有这些功能都是可任意组合的。你可以将所有这些参数组合在一起,找到最适合你的配置。经验法则是: 如果内存有限制,使用双量化; 使用 NF4 以获得更高的精度; 使用 16 比特浮点加快微调速度。作为一个例子,在 [推理演示应用](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf?usp=sharing) 中,我们使用嵌套量化、bfloat16 计算数据类型以及 NF4 量化在单个 16GB GPU 中使用 4 比特完成了对 gpt-neo-x-20b (40GB) 模型的拟合。 ### 常见问题 本节我们来回答一些常见问题。 #### FP4 量化有硬件要求吗? 请注意,此方法仅与 GPU 兼容,目前尚无法在 CPU 上对模型进行 4 比特量化。在 GPU 中,此方法没有任何硬件要求,只要安装了 CUDA>=11.2,任何 GPU 都可以用于运行 4 比特量化。 另请记住,计算不是以 4 比特完成的,仅仅是权重和激活被压缩为该格式,而计算仍在指定的或者原始数据类型上进行。 #### 支持哪些模型? 与 [这篇博文](https://huggingface.co/blog/zh/hf-bitsandbytes-integration) 中介绍的 LLM.int8 集成类似,我们的集成严重依赖于 `accelerate` 库。因此,任何支持 `accelerate` 库加载的模型 (即调用 `from_pretrained` 时支持 `device_map` 参数) 都可以进行 4 比特量化。另请注意,这与模态完全无关,只要可以使用 `device_map` 参数加载模型,就可以量化它们。 对于文本模型,截至本文撰写时,最常用的架构都是支持的,例如用于纯文本的 Llama、OPT、GPT-Neo、GPT-NeoX,用于多模态的 Blip2 等。 截至本文撰写时,支持 `accelerate` 的模型有: ```python [ 'bigbird_pegasus', 'blip_2', 'bloom', 'bridgetower', 'codegen', 'deit', 'esm', 'gpt2', 'gpt_bigcode', 'gpt_neo', 'gpt_neox', 'gpt_neox_japanese', 'gptj', 'gptsan_japanese', 'lilt', 'llama', 'longformer', 'longt5', 'luke', 'm2m_100', 'mbart', 'mega', 'mt5', 'nllb_moe', 'open_llama', 'opt', 'owlvit', 'plbart', 'roberta', 'roberta_prelayernorm', 'rwkv', 'switch_transformers', 't5', 'vilt', 'vit', 'vit_hybrid', 'whisper', 'xglm', 'xlm_roberta' ] ``` 请注意,如果你最喜欢的模型不在列表中,你可以提交一个 PR 或在 transformers 中提交一个问题,以添加对该架构的 accelerate 加载的支持。 #### 我们可以训练 4 比特 / 8 比特模型吗? 对这些模型进行全模型 4 比特训练是不可能的。但是,你可以利用参数高效微调 (PEFT) 来训练这些模型,即在基础模型之上训练新增部分如适配器。QLoRA 论文就是这么做的,Hugging Face 的 PEFT 库也正式支持了该方法。我们提供了相应的 [微调笔记本](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing)。如果大家想要复现论文的结果,还可以查阅 [QLoRA 代码库](https://github.com/artidoro/qlora)。 | ![lora 图例](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/blog/133_trl_peft/lora-animated.gif) | |:--:| | <b>原始 (冻结的) 预训练权重 (左侧) 的输出激活需要加上低秩适配器的输出,这个低秩适配器由矩阵 A 和 B 权重组成 (右侧)。</b>| #### 这项工作还有什么其他意义? 这项工作可以为社区和人工智能研究带来一些积极的影响,因为它可以影响很多可能的用法或应用场景。在 RLHF (基于人类反馈的强化学习) 中,可以加载单个 4 比特基础模型,并在其上训练多个适配器,一个用于奖励建模,另一个用于价值策略训练。我们很快就会发布关于此用法的更详细的博文。 我们还针对这种量化方法对在消费类硬件上训练大模型的影响涉及了一些基准测试。我们在英伟达 T4 (16GB) 上对 2 种不同的架构 Llama 7B (fp16 时,模型大小为 15GB) 和 Llama 13B (fp16 时,模型大小为 27GB) 进行了多次微调实验,结果如下: | 模型 | 半精度模型大小(GB)| 硬件 / 总显存 | 量化方法(CD = 计算数据类型 / GC = 梯度 checkpointing / NQ = 双量化)| batch size | 梯度累积步数 | 优化器 | 序列长度 | 结果 | | ----------------------------------- | --------------------------------- | -------------------------- | ------------------------------------------------------------------------------------------- | ---------- | --------------------------- | ----------------- | ------- | ------ | | | | | | | | | | | | <10B 模型 | | | | | | | | | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | LLM.int8 (8-bit) + GC | 1 | 4 | AdamW | 512 | **无 OOM** | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | LLM.int8 (8-bit) + GC | 1 | 4 | AdamW | 1024 | OOM | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + bf16 CD + no GC | 1 | 4 | AdamW | 512 | **无 OOM** | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | 4bit + FP4 + bf16 CD + no GC | 1 | 4 | AdamW | 512 | **无 OOM** | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + bf16 CD + no GC | 1 | 4 | AdamW | 1024 | OOM | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | 4bit + FP4 + bf16 CD + no GC | 1 | 4 | AdamW | 1024 | OOM | | decapoda-research/llama-7b-hf | 14GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + bf16 CD + GC | 1 | 4 | AdamW | 1024 | **无 OOM** | | | | | | | | | | | | 10B+ 模型 | | | | | | | | | | decapoda-research/llama-13b-hf | 27GB | 2xNVIDIA-T4 / 32GB | LLM.int8 (8-bit) + GC | 1 | 4 | AdamW | 512 | **无 OOM** | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | LLM.int8 (8-bit) + GC | 1 | 4 | AdamW | 512 | OOM | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | 4bit + FP4 + bf16 CD + no GC | 1 | 4 | AdamW | 512 | OOM | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | 4bit + FP4 + fp16 CD + no GC | 1 | 4 | AdamW | 512 | OOM | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + fp16 CD + GC | 1 | 4 | AdamW | 512 | **无 OOM** | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + fp16 CD + GC | 1 | 4 | AdamW | 1024 | OOM | | decapoda-research/llama-13b-hf | 27GB | 1xNVIDIA-T4 / 16GB | 4bit + NF4 + fp16 CD + GC + NQ | 1 | 4 | AdamW | 1024 | **无 OOM** | 我们使用了 TRL 库中最新的 `SFTTrainer` ,你可以在 [此处](https://gist.github.com/younesbelkada/f48af54c74ba6a39a7ae4fd777e72fe8) 找到基准测试脚本。 ## 演示空间 想试试论文中的 Guananco 模型的话,可以玩玩这个 [演示空间](https://huggingface.co/spaces/uwnlp/guanaco-playground-tgi),我们还把它直接嵌入到了下面供你直接把玩。 <!-- [SPACE WITH GREEDY DECODING PERFORMANCE NUMBERS] --> <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.32.0/gradio.js" ></script> <gradio-app theme_mode="light" space="uwnlp/guanaco-playground-tgi"></gradio-app> ## 致谢 HF 团队感谢华盛顿大学参与该项目的所有人员,感谢他们向社区贡献了他们的工作。 作者还要感谢 [Pedro Cuenca](https://huggingface.co/pcuenq) 帮忙审阅了博文,并感谢 [Olivier Dehaene](https://huggingface.co/olivierdehaene) 和 [Omar Sanseviero](https://huggingface.co/osanseviero) 对在 HF Hub 上集成该论文提供了快速而有力的支持。
2
0
hf_public_repos/blog
hf_public_repos/blog/zh/jat.md
--- title: "万事通,专精部分领域的多功能 Transformer 智能体" thumbnail: /blog/assets/jat/thumbnail.png authors: - user: qgallouedec - user: edbeeching - user: ClementRomac - user: thomwolf translators: - user: xiaodouzi - user: zhongdongy proofreader: true --- # 万事通,专精部分领域的多功能 Transformer 智能体 ## 介绍 我们很高兴分享“万事通”(Jack of All Trades,简称 JAT) 项目,该项目旨在朝着通用智能体的方向发展。该项目最初是作为对 [Gato](https://huggingface.co/papers/2205.06175) (Reed 等,2022 年) 工作的公开复现启动的,Gato 提出训练一种能够执行视觉与语言以及决策任务的 Transformer。于是我们首先构建了 Gato 数据集的开放版本。随后,我们在此基础上训练了多模态 Transformer 模型,并针对处理顺序数据和连续值引入了若干改进。 总体而言,该项目取得了以下成果: - 发布了大量在各种任务上表现优异的 **专家 RL 智能体**。 - 发布了 **JAT 数据集**,这是第一个用于通用智能体训练的数据集。它包含了由专家智能体收集的数十万条专家轨迹。 - 发布了 **JAT 模型**,这是一种基于 Transformer 的智能体,能够玩电子游戏、控制机器人执行各种任务、理解并在简单的导航环境中执行命令等! <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/jat/global_schema.gif" alt="Global schema"/> ## 数据集和专家策略 ### 专家策略 传统的强化学习 (RL) 涉及在单一环境中训练策略。利用这些专家策略是构建多功能智能体的有效方法。我们选择了各种性质和难度不同的环境,包括 Atari、BabyAI、Meta-World 和 MuJoCo。在每个环境中,我们训练一个智能体,直到它达到最先进的性能水平。(对于 BabyAI,我们使用的是 [BabyAI bot](https://github.com/mila-iqia/babyai))。这些训练结果被称为专家智能体,并已在🤗 Hub 上发布。您可以在 [JAT 数据集卡](https://huggingface.co/datasets/jat-project/jat-dataset) 中找到所有智能体的列表。 ### JAT 数据集 我们发布了 [JAT 数据集](https://huggingface.co/datasets/jat-project/jat-dataset),这是第一个用于通用智能体训练的数据集。JAT 数据集包含由上述专家智能体收集的数十万条专家轨迹。要使用此数据集,只需像从🤗 Hub 加载任何其他数据集一样加载它: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("jat-project/jat-dataset", "metaworld-assembly") >>> first_episode = dataset["train"][0] >>> first_episode.keys() dict_keys(['continuous_observations', 'continuous_actions', 'rewards']) >>> len(first_episode["rewards"]) 500 >>> first_episode["continuous_actions"][0] [6.459120273590088, 2.2422609329223633, -5.914587020874023, -19.799840927124023] ``` 除了强化学习 (RL) 数据,我们还包含了文本数据集,以为用户提供独特的界面。因此,您还会发现 [Wikipedia](https://huggingface.co/datasets/wikipedia)、[Oscar](https://huggingface.co/datasets/oscar)、[OK-VQA](https://okvqa.allenai.org/) 和 [Conceptual-Captions](https://huggingface.co/datasets/conceptual_captions) 的子集。 ## JAT 智能体架构 JAT 的架构基于 Transformer,使用了 [EleutherAI 的 GPT-Neo 实现](https://huggingface.co/docs/transformers/model_doc/gpt_neo)。JAT 的特别之处在于其嵌入机制,该机制专门用于内在地处理顺序决策任务。我们将观测嵌入与动作嵌入交错排列,并结合相应的奖励。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/blog/jat/model.svg" width="100%" alt="Model"> <figcaption>JAT 网络的架构。在顺序决策任务中,一方面将观测和奖励编码,另一方面将动作编码并交错排列。模型使用因果掩码自回归地生成下一个嵌入,并根据预期的模态进行解码。</figcaption> </figure> 每个嵌入因此对应于一个观测 (与奖励相关联) 或一个动作。那么 JAT 是如何编码这些信息的呢?这取决于数据的类型。如果数据 (观测或动作) 是图像 (如在 Atari 中的情况),那么 JAT 使用 CNN。如果是连续向量,则 JAT 使用线性层。最后,如果是离散值,JAT 使用线性投影层。同样的原理也用于模型输出,具体取决于要预测的数据类型。预测是因果的,将观测值移位一个时间步长。通过这种方式,智能体必须根据所有先前的观测和动作来预测下一个动作。 此外,我们认为让我们的智能体执行 NLP 和 CV 任务会很有趣。为此,我们还让编码器可以选择将文本和图像数据作为输入。对于文本数据,我们使用 GPT-2 的标记化策略,对于图像,我们使用 [ViT](https://huggingface.co/docs/transformers/model_doc/vit) 类型的编码器。 考虑到数据的模态可能因环境而异,JAT 如何计算损失呢?它分别计算每种模态的损失。对于图像和连续值,它使用 MSE 损失。对于离散值,它使用交叉熵损失。最终损失是序列中每个元素损失的平均值。 等等,这是否意味着我们对预测动作和观测赋予了相等的权重?实际上并不是这样,但我们将在 [下文](https://chatgpt.com/g/g-5bNPpaVZy-translate-gpt/c/1b2d0139-5625-418c-9bbe-1fb201b4084d#the-surprising-benefits-of-predicting-observations) 中详细讨论。 ## 实验与结果 我们在所有 157 个训练任务上评估 JAT。我们收集了 10 个回合的数据并记录总奖励。为了便于阅读,我们按领域汇总结果。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/blog/jat/score_steps.svg" alt="Score evolution" width="100%;"> <figcaption>每个 RL 领域的汇总专家标准化得分及其 95%置信区间 (CI),作为学习步数的函数。</figcaption> </figure> 如果要用一个数字来总结这些结果,那就是 65.8%,这是在 4 个领域中相对于 JAT 专家的平均表现。这表明 JAT 能够在各种任务中模仿专家的表现。让我们更详细地看看: - 对于 Atari 57,智能体达到了专家得分的 14.1%,相当于人类表现的 37.6%。在 21 个游戏中超过了人类表现。 - 对于 BabyAI,智能体达到了专家得分的 99.0%,仅在 1 个任务上未能超过专家得分的 50%。 - 对于 Meta-World,智能体达到了专家得分的 65.5%。 - 对于 MuJoCo,智能体达到了专家得分的 84.8%。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/blog/jat/human_normalized_atari_jat_small_250000.svg" alt="Score evolution" width="100%" > <figcaption>JAT 智能体在 Atari 57 基准测试中的人类标准化得分。</figcaption> </figure> 最令人印象深刻的是,JAT 在所有领域中使用 **单一网络** 实现了这一性能。为了衡量这一性能,让我们来看看 JAT 在一些任务中的渲染效果: <figure class="image flex flex-col items-center text-center m-0 w-full"> <video alt="jat_hf.mp4" autoplay loop autobuffer muted playsinline> <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/jat/jat_hf.mp4" type="video/mp4"> </video> <figcaption></figcaption> </figure> 想试试吗?你可以的![JAT 模型](https://huggingface.co/jat-project/jat) 已在 🤗 Hub 上提供! 我们的模型显示了初步的文本任务处理能力,详情请参阅 [论文](https://huggingface.co/papers/2402.09844)。 ### 预测观测值的惊人好处 在训练 RL 智能体时,主要目标是最大化未来奖励。但是,如果我们还要求智能体预测它将来会观测到的内容,这个额外的任务会帮助还是妨碍学习过程呢? 对于这个问题有两种对立的观点。一方面,学习预测观测值可以提供对环境更深入的理解,从而导致更好更快的学习。另一方面,这可能会使智能体偏离其主要目标,导致在观测和动作预测方面的表现平平。 为了解决这一争论,我们进行了一个实验,使用了一个结合观测损失和动作损失的损失函数,并用一个加权参数 \( \kappa \) 来平衡这两个目标。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/blog/jat/kappa_aggregated.svg" width="100%" alt="Kappa Aggregated"> <figcaption>对于所选任务的观测预测学习影响研究的汇总度量及 95%置信区间 (CI)。结果覆盖所选的 \( \kappa \) 值范围,并基于每个任务 100 次评估。选择最佳的 \( \kappa \) 值可以显著提高智能体的性能。</figcaption> </figure> 结果非常显著。当 \( \kappa \) 值过高 (0.5) 时,预测观测的额外目标似乎阻碍了学习过程。但是,当 \( \kappa \) 值较低时,对学习的影响可以忽略不计,智能体的表现与不将预测观测作为目标时相似。 然而,我们发现 \( \kappa = 0.005 \) 左右是一个最佳点,此时学习预测观测实际上提高了智能体的学习效率。 我们的研究表明,只要平衡得当,将预测观测添加到学习过程中是有益的。这一发现对这类智能体的设计有重要意义,强调了辅助目标在提高学习效率方面的潜在价值。 所以,下次训练 RL 智能体时,可以考虑让它预测将来会观测到的内容。这可能会带来更好的表现和更快的学习速度! ## 结论 在这项工作中,我们介绍了 JAT,一个能够掌握各种顺序决策任务并在 NLP 和 CV 任务中表现出初步能力的多用途 Transformer 智能体。对于所有这些任务,JAT 都使用单一网络。我们的贡献包括发布专家级 RL 智能体、JAT 数据集和 JAT 模型。我们希望这项工作能够激发未来在通用智能体领域的研究,并有助于开发更多功能和更强大的 AI 系统。 ## 下一步是什么?研究请求 我们相信,JAT 项目为通用智能体领域的研究开辟了新的方向,而我们只是刚刚开始。以下是一些未来工作的想法: - **改进数据**: 尽管具有开创性,JAT 数据集仍处于初期阶段。专家轨迹仅来自每个环境中的一个专家智能体,这可能会导致一些偏差。尽管我们尽力达到了最先进的性能,但有些环境仍然具有挑战性。我们相信,收集更多的数据和训练更多的专家智能体将会 **大有帮助**。 - **使用离线 RL**: JAT 智能体是使用基本的行为克隆训练的。这意味着两件事: (1) 我们无法利用次优轨迹,(2) JAT 智能体不能超过专家的表现。我们选择这种方法是为了简单,但我们相信使用离线 RL 可以 **大大提高** 智能体的性能,同时实现起来也不会太复杂。 - **释放更聪明的多任务采样策略的全部潜力**: 目前,JAT 智能体从所有任务中均匀采样数据,但这种方法可能会限制其表现。通过动态调整采样率以集中于最具挑战性的任务,我们可以加速智能体的学习过程并释放 **显著的性能提升**。 ## 相关链接 - 📄 [论文](https://huggingface.co/papers/2402.09844) - 💻 [源码](https://github.com/huggingface/jat) - 🗂️ [JAT 数据集](https://huggingface.co/datasets/jat-project/jat-dataset) - 🤖 [JAT 模型](https://huggingface.co/jat-project/jat) ## 引文 ```bibtex @article{gallouedec2024jack, title = {{Jack of All Trades, Master of Some, a Multi-Purpose Transformer Agent}}, author = {Gallouédec, Quentin and Beeching, Edward and Romac, Clément and Dellandréa, Emmanuel}, journal = {arXiv preprint arXiv:2402.09844}, year = {2024}, url = {https://arxiv.org/abs/2402.09844} } ```
3
0
hf_public_repos/blog
hf_public_repos/blog/zh/moe.md
--- title: "混合专家模型(MoE)详解" thumbnail: /blog/assets/moe/thumbnail.png authors: - user: osanseviero - user: lewtun - user: philschmid - user: smangrul - user: ybelkada - user: pcuenq translators: - user: xinyu66 - user: zhongdongy proofreader: true --- # 混合专家模型 (MoE) 详解 随着 Mixtral 8x7B ([announcement](https://mistral.ai/news/mixtral-of-experts/), [model card](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)) 的推出,一种称为混合专家模型 (Mixed Expert Models,简称 MoEs) 的 Transformer 模型在开源人工智能社区引起了广泛关注。在本篇博文中,我们将深入探讨 MoEs 的核心组件、训练方法,以及在推理过程中需要考量的各种因素。 让我们开始吧! ## 目录 - [什么是混合专家模型?](#什么是混合专家模型) - [混合专家模型简史](#混合专家模型简史) - [什么是稀疏性?](#什么是稀疏性) - [混合专家模型中令牌的负载均衡](#混合专家模型中令牌的负载均衡) - [MoEs and Transformers](#moes-and-transformers) - [Switch Transformers](#switch-transformers) - [用Router z-loss稳定模型训练](#用router-z-loss稳定模型训练) - [专家如何学习?](#专家如何学习) - [专家的数量对预训练有何影响?](#专家的数量对预训练有何影响) - [微调混合专家模型](#微调混合专家模型) - [稀疏 VS 稠密,如何选择?](#稀疏-VS-稠密如何选择) - [让MoE起飞](#让moe起飞) - [并行计算](#并行计算) - [容量因子和通信开销](#容量因子和通信开销) - [部署技术](#部署技术) - [高效训练](#高效训练) - [开源混合专家模型](#开源混合专家模型) - [一些有趣的研究方向](#一些有趣的研究方向) - [相关资源](#相关资源) ## 简短总结 混合专家模型 (MoEs): - 与稠密模型相比, **预训练速度更快** - 与具有相同参数数量的模型相比,具有更快的 **推理速度** - 需要 **大量显存**,因为所有专家系统都需要加载到内存中 - 在 **微调方面存在诸多挑战**,但 [近期的研究](https://arxiv.org/pdf/2305.14705.pdf) 表明,对混合专家模型进行 **指令调优具有很大的潜力**。 让我们开始吧! ## 什么是混合专家模型? 模型规模是提升模型性能的关键因素之一。在有限的计算资源预算下,用更少的训练步数训练一个更大的模型,往往比用更多的步数训练一个较小的模型效果更佳。 混合专家模型 (MoE) 的一个显著优势是它们能够在远少于稠密模型所需的计算资源下进行有效的预训练。这意味着在相同的计算预算条件下,您可以显著扩大模型或数据集的规模。特别是在预训练阶段,与稠密模型相比,混合专家模型通常能够更快地达到相同的质量水平。 那么,究竟什么是一个混合专家模型 (MoE) 呢?作为一种基于 Transformer 架构的模型,混合专家模型主要由两个关键部分组成: - **稀疏 MoE 层**: 这些层代替了传统 Transformer 模型中的前馈网络 (FFN) 层。MoE 层包含若干“专家”(例如 8 个),每个专家本身是一个独立的神经网络。在实际应用中,这些专家通常是前馈网络 (FFN),但它们也可以是更复杂的网络结构,甚至可以是 MoE 层本身,从而形成层级式的 MoE 结构。 - **门控网络或路由**: 这个部分用于决定哪些令牌 (token) 被发送到哪个专家。例如,在下图中,“More”这个令牌可能被发送到第二个专家,而“Parameters”这个令牌被发送到第一个专家。有时,一个令牌甚至可以被发送到多个专家。令牌的路由方式是 MoE 使用中的一个关键点,因为路由器由学习的参数组成,并且与网络的其他部分一同进行预训练。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/00_switch_transformer.png" alt="Switch Layer"> <figcaption>[Switch Transformers paper](https://arxiv.org/abs/2101.03961) 论文中的 MoE layer</figcaption> </figure> 总结来说,在混合专家模型 (MoE) 中,我们将传统 Transformer 模型中的每个前馈网络 (FFN) 层替换为 MoE 层,其中 MoE 层由两个核心部分组成: 一个门控网络和若干数量的专家。 尽管混合专家模型 (MoE) 提供了若干显著优势,例如更高效的预训练和与稠密模型相比更快的推理速度,但它们也伴随着一些挑战: - **训练挑战**: 虽然 MoE 能够实现更高效的计算预训练,但它们在微调阶段往往面临泛化能力不足的问题,长期以来易于引发过拟合现象。 - **推理挑战**: MoE 模型虽然可能拥有大量参数,但在推理过程中只使用其中的一部分,这使得它们的推理速度快于具有相同数量参数的稠密模型。然而,这种模型需要将所有参数加载到内存中,因此对内存的需求非常高。以 Mixtral 8x7B 这样的 MoE 为例,需要足够的 VRAM 来容纳一个 47B 参数的稠密模型。之所以是 47B 而不是 8 x 7B = 56B,是因为在 MoE 模型中,只有 FFN 层被视为独立的专家,而模型的其他参数是共享的。此外,假设每个令牌只使用两个专家,那么推理速度 (以 FLOPs 计算) 类似于使用 12B 模型 (而不是 14B 模型),因为虽然它进行了 2x7B 的矩阵乘法计算,但某些层是共享的。 了解了 MoE 的基本概念后,让我们进一步探索推动这类模型发展的研究。 ## 混合专家模型简史 混合专家模型 (MoE) 的理念起源于 1991 年的论文 [Adaptive Mixture of Local Experts](https://www.cs.toronto.edu/~hinton/absps/jjnh91.pdf)。这个概念与集成学习方法相似,旨在为由多个单独网络组成的系统建立一个监管机制。在这种系统中,每个网络 (被称为“专家”) 处理训练样本的不同子集,专注于输入空间的特定区域。那么,如何选择哪个专家来处理特定的输入呢?这就是门控网络发挥作用的地方,它决定了分配给每个专家的权重。在训练过程中,这些专家和门控网络都同时接受训练,以优化它们的性能和决策能力。 在 2010 至 2015 年间,两个独立的研究领域为混合专家模型 (MoE) 的后续发展做出了显著贡献: 1. **组件专家**: 在传统的 MoE 设置中,整个系统由一个门控网络和多个专家组成。在支持向量机 (SVMs) 、高斯过程和其他方法的研究中,MoE 通常被视为整个模型的一部分。然而,[Eigen、Ranzato 和 Ilya 的研究](https://arxiv.org/abs/1312.4314) 探索了将 MoE 作为更深层网络的一个组件。这种方法允许将 MoE 嵌入到多层网络中的某一层,使得模型既大又高效。 2. **条件计算**: 传统的神经网络通过每一层处理所有输入数据。在这一时期,Yoshua Bengio 等研究人员开始探索基于输入令牌动态激活或停用网络组件的方法。 这些研究的融合促进了在自然语言处理 (NLP) 领域对混合专家模型的探索。特别是在 2017 年,[Shazeer 等人](https://arxiv.org/abs/1701.06538) (团队包括 Geoffrey Hinton 和 Jeff Dean,后者有时被戏称为 [“谷歌的 Chuck Norris”](https://www.informatika.bg/jeffdean)) 将这一概念应用于 137B 的 LSTM (当时被广泛应用于 NLP 的架构,由 Schmidhuber 提出)。通过引入稀疏性,这项工作在保持极高规模的同时实现了快速的推理速度。这项工作主要集中在翻译领域,但面临着如高通信成本和训练不稳定性等多种挑战。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/01_moe_layer.png" alt="MoE layer in LSTM"> <figcaption>Outrageously Large Neural Network 论文中的 MoE layer</figcaption> </figure> 混合专家模型 (MoE) 的引入使得训练具有数千亿甚至万亿参数的模型成为可能,如开源的 1.6 万亿参数的 Switch Transformers 等。这种技术不仅在自然语言处理 (NLP) 领域得到了广泛应用,也开始在计算机视觉领域进行探索。然而,本篇博客文章将主要聚焦于自然语言处理领域的应用和探讨。 ## 什么是稀疏性? 稀疏性的概念采用了条件计算的思想。在传统的稠密模型中,所有的参数都会对所有输入数据进行处理。相比之下,稀疏性允许我们仅针对整个系统的某些特定部分执行计算。这意味着并非所有参数都会在处理每个输入时被激活或使用,而是根据输入的特定特征或需求,只有部分参数集合被调用和运行。 让我们深入分析 Shazeer 对混合专家模型 (MoE) 在翻译应用中的贡献。条件计算的概念 (即仅在每个样本的基础上激活网络的不同部分) 使得在不增加额外计算负担的情况下扩展模型规模成为可能。这一策略在每个 MoE 层中实现了数以千计甚至更多的专家的有效利用。 这种稀疏性设置确实带来了一些挑战。例如,在混合专家模型 (MoE) 中,尽管较大的批量大小通常有利于提高性能,但当数据通过激活的专家时,实际的批量大小可能会减少。比如,假设我们的输入批量包含 10 个令牌, **可能会有五个令牌被路由到同一个专家,而剩下的五个令牌分别被路由到不同的专家。这导致了批量大小的不均匀分配和资源利用效率不高的问题**。在接下来的部分中,将会讨论 [让 MoE 高效运行](#让moe起飞) 的其他挑战以及相应的解决方案。 那我们应该如何解决这个问题呢?一个可学习的门控网络 (G) 决定将输入的哪一部分发送给哪些专家 (E): $$ y = \sum_{i=1}^{n} G(x)_i E_i(x) $$ 在这种设置下,虽然所有专家都会对所有输入进行运算,但通过门控网络的输出进行加权乘法操作。但是,如果 G (门控网络的输出) 为 0 会发生什么呢?如果是这种情况,就没有必要计算相应的专家操作,因此我们可以节省计算资源。那么一个典型的门控函数是什么呢?一个典型的门控函数通常是一个带有 softmax 函数的简单的网络。这个网络将学习将输入发送给哪个专家。 $$ G_\sigma(x) = \text{Softmax}(x \cdot W_g) $$ Shazeer 等人的工作还探索了其他的门控机制,其中包括带噪声的 TopK 门控 (Noisy Top-K Gating)。这种门控方法引入了一些可调整的噪声,然后保留前 k 个值。具体来说: 1. 添加一些噪声 $$ H(x)_i = (x \cdot W_{\text{g}})_i + \text{StandardNormal()} \cdot \text{Softplus}((x \cdot W_{\text{noise}})_i) $$ 2. 选择保留前 K 个值 $$ \text{KeepTopK}(v, k)_i = \begin{cases} v_i & \text{if } v_i \text{ is in the top } k \text{ elements of } v, \\ -\infty & \text{otherwise.} \end{cases} $$ 3. 应用 Softmax 函数 $$ G(x) = \text{Softmax}(\text{KeepTopK}(H(x), k)) $$ 这种稀疏性引入了一些有趣的特性。通过使用较低的 k 值 (例如 1 或 2),我们可以比激活多个专家时更快地进行训练和推理。为什么不仅选择最顶尖的专家呢?最初的假设是,需要将输入路由到不止一个专家,以便门控学会如何进行有效的路由选择,因此至少需要选择两个专家。[Switch Transformers](#switch-transformers) 就这点进行了更多的研究。 我们为什么要添加噪声呢?这是为了专家间的负载均衡! ## 混合专家模型中令牌的负载均衡 正如之前讨论的,如果所有的令牌都被发送到只有少数几个受欢迎的专家,那么训练效率将会降低。在通常的混合专家模型 (MoE) 训练中,门控网络往往倾向于主要激活相同的几个专家。这种情况可能会自我加强,因为受欢迎的专家训练得更快,因此它们更容易被选择。为了缓解这个问题,引入了一个 **辅助损失**,旨在鼓励给予所有专家相同的重要性。这个损失确保所有专家接收到大致相等数量的训练样本,从而平衡了专家之间的选择。接下来的部分还将探讨专家容量的概念,它引入了一个关于专家可以处理多少令牌的阈值。在 `transformers` 库中,可以通过 `aux_loss` 参数来控制辅助损失。 ## MoEs and Transformers Transformer 类模型明确表明,增加参数数量可以提高性能,因此谷歌使用 [GShard](https://arxiv.org/abs/2006.16668) 尝试将 Transformer 模型的参数量扩展到超过 6000 亿并不令人惊讶。 GShard 将在编码器和解码器中的每个前馈网络 (FFN) 层中的替换为使用 Top-2 门控的混合专家模型 (MoE) 层。下图展示了编码器部分的结构。这种架构对于大规模计算非常有效: 当扩展到多个设备时,MoE 层在不同设备间共享,而其他所有层则在每个设备上复制。我们将在 [“让 MoE 起飞”](#让moe起飞) 部分对这一点进行更详细的讨论。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/02_moe_block.png" alt="MoE Transformer Encoder"> <figcaption>GShard 论文中的 MoE Transformer Encoder</figcaption> </figure> 为了保持负载平衡和训练效率,GShard 的作者除了引入了上一节中讨论的类似辅助损失外,还引入了一些关键变化: - **随机路由**: 在 Top-2 设置中,我们始终选择排名最高的专家,但第二个专家是根据其权重比例随机选择的。 - **专家容量**: 我们可以设定一个阈值,定义一个专家能处理多少令牌。如果两个专家的容量都达到上限,令牌就会溢出,并通过残差连接传递到下一层,或在某些情况下被完全丢弃。专家容量是 MoE 中最重要的概念之一。为什么需要专家容量呢?因为所有张量的形状在编译时是静态确定的,我们无法提前知道多少令牌会分配给每个专家,因此需要一个固定的容量因子。 GShard 的工作对适用于 MoE 的并行计算模式也做出了重要贡献,但这些内容的讨论超出了这篇博客的范围。 **注意**: 在推理过程中,只有部分专家被激活。同时,有些计算过程是共享的,例如自注意力 (self-attention) 机制,它适用于所有令牌。这就解释了为什么我们可以使用相当于 12B 稠密模型的计算资源来运行一个包含 8 个专家的 47B 模型。如果我们采用 Top-2 门控,模型会使用高达 14B 的参数。但是,由于自注意力操作 (专家间共享) 的存在,实际上模型运行时使用的参数数量是 12B。 ## Switch Transformers 尽管混合专家模型 (MoE) 显示出了很大的潜力,但它们在训练和微调过程中存在稳定性问题。[Switch Transformers](https://arxiv.org/abs/2101.03961) 是一项非常激动人心的工作,它深入研究了这些话题。作者甚至在 Hugging Face 上发布了一个 [1.6 万亿参数的 MoE](https://huggingface.co/google/switch-c-2048),拥有 2048 个专家,你可以使用 `transformers` 库来运行它。Switch Transformers 实现了与 T5-XXL 相比 4 倍的预训练速度提升。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/03_switch_layer.png" alt="Switch Transformer Layer"> <figcaption>Switch Transformer 论文中的 Switch Transformer Layer</figcaption> </figure> 就像在 GShard 中一样,作者用混合专家模型 (MoE) 层替换了前馈网络 (FFN) 层。Switch Transformers 提出了一个 Switch Transformer 层,它接收两个输入 (两个不同的令牌) 并拥有四个专家。 与最初使用至少两个专家的想法相反,Switch Transformers 采用了简化的单专家策略。这种方法的效果包括: - 减少门控网络 (路由) 计算负担 - 每个专家的批量大小至少可以减半 - 降低通信成本 - 保持模型质量 Switch Transformers 也对 **专家容量** 这个概念进行了研究。 $$ \text{Expert Capacity} = \left(\frac{\text{tokens per batch}}{\text{number of experts}}\right) \times \text{capacity factor} $$ 上述建议的容量是将批次中的令牌数量均匀分配到各个专家。如果我们使用大于 1 的容量因子,我们为令牌分配不完全平衡时提供了一个缓冲。增加容量因子会导致更高的设备间通信成本,因此这是一个需要考虑的权衡。特别值得注意的是,Switch Transformers 在低容量因子 (例如 1 至 1.25) 下表现出色。 Switch Transformer 的作者还重新审视并简化了前面章节中提到的负载均衡损失。在训练期间,对于每个 Switch 层的辅助损失被添加到总模型损失中。这种损失鼓励均匀路由,并可以使用超参数进行加权。 作者还尝试了混合精度的方法,例如用 `bfloat16` 精度训练专家,同时对其余计算使用全精度进行。较低的精度可以减少处理器间的通信成本、计算成本以及存储张量的内存。然而,在最初的实验中,当专家和门控网络都使用 `bfloat16` 精度训练时,出现了不稳定的训练现象。这种不稳定性特别是由路由计算引起的,因为路由涉及指数函数等操作,这些操作对精度要求较高。因此,为了保持计算的稳定性和精确性,保持更高的精度是重要的。为了减轻不稳定性,路由过程也使用了全精度。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/04_switch_table.png" alt="Table shows that selective precision does not degrade quality."> <figcaption>使用混合精度不会降低模型质量并可实现更快的训练</figcaption> </figure> 这个 [Jupyter Notebook](https://colab.research.google.com/drive/1aGGVHZmtKmcNBbAwa9hbu58DDpIuB5O4?usp=sharing) 展示了如何对 Switch Transformers 进行微调以进行摘要生成的详细指南。然而,在开始微调 Switch Transformers 之前,强烈建议您先阅读关于 [微调混合专家模型](#微调混合专家模型) 部分的内容。 Switch Transformers 采用了编码器 - 解码器的架构,实现了与 T5 类似的混合专家模型 (MoE) 版本。[GLaM](https://arxiv.org/abs/2112.06905) 这篇工作探索了如何使用仅为原来 1/3 的计算资源 (因为 MoE 模型在训练时需要的计算量较少,从而能够显著降低碳足迹) 来训练与 GPT-3 质量相匹配的模型来提高这些模型的规模。作者专注于仅解码器 (decoder-only) 的模型以及少样本和单样本评估,而不是微调。他们使用了 Top-2 路由和更大的容量因子。此外,他们探讨了将容量因子作为一个动态度量,根据训练和评估期间所使用的计算量进行调整。 ## 用 Router z-loss 稳定模型训练 之前讨论的平衡损失可能会导致稳定性问题。我们可以使用许多方法来稳定稀疏模型的训练,但这可能会牺牲模型质量。例如,引入 dropout 可以提高稳定性,但会导致模型质量下降。另一方面,增加更多的乘法分量可以提高质量,但会降低模型稳定性。 [ST-MoE](https://arxiv.org/abs/2202.08906) 引入的 `Router z-loss` 在保持了模型性能的同时显著提升了训练的稳定性。这种损失机制通过惩罚门控网络输入的较大 `logits` 来起作用,目的是促使数值的绝对大小保持较小,这样可以有效减少计算中的舍入误差。这一点对于那些依赖指数函数进行计算的门控网络尤其重要。为了深入了解这一机制,建议参考原始论文以获得更全面的细节。 ## 专家如何学习? ST-MoE 的研究者们发现,编码器中不同的专家倾向于专注于特定类型的令牌或浅层概念。例如,某些专家可能专门处理标点符号,而其他专家则专注于专有名词等。与此相反,解码器中的专家通常具有较低的专业化程度。此外,研究者们还对这一模型进行了多语言训练。尽管人们可能会预期每个专家处理一种特定语言,但实际上并非如此。由于令牌路由和负载均衡的机制,没有任何专家被特定配置以专门处理某一特定语言。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/05_experts_learning.png" alt="Experts specialize in some token groups"> <figcaption>ST-MoE 论文中显示了哪些令牌组被发送给了哪个专家的表格</figcaption> </figure> ## 专家的数量对预训练有何影响? 增加更多专家可以提升处理样本的效率和加速模型的运算速度,但这些优势随着专家数量的增加而递减 (尤其是当专家数量达到 256 或 512 之后更为明显)。同时,这也意味着在推理过程中,需要更多的显存来加载整个模型。值得注意的是,Switch Transformers 的研究表明,其在大规模模型中的特性在小规模模型下也同样适用,即便是每层仅包含 2、4 或 8 个专家。 ## 微调混合专家模型 > `4.36.0` 版本的 `transformers` 库支持 Mixtral 模型。你可以用以下命令进行安装: `pip install "transformers==4.36.0 --upgrade` 稠密模型和稀疏模型在过拟合的动态表现上存在显著差异。稀疏模型更易于出现过拟合现象,因此在处理这些模型时,尝试更强的内部正则化措施是有益的,比如使用更高比例的 dropout。例如,我们可以为稠密层设定一个较低的 dropout 率,而为稀疏层设置一个更高的 dropout 率,以此来优化模型性能。 在微调过程中是否使用辅助损失是一个需要决策的问题。ST-MoE 的作者尝试关闭辅助损失,发现即使高达 11% 的令牌被丢弃,模型的质量也没有显著受到影响。令牌丢弃可能是一种正则化形式,有助于防止过拟合。 Switch Transformers 的作者观察到,在相同的预训练困惑度下,稀疏模型在下游任务中的表现不如对应的稠密模型,特别是在重理解任务 (如 SuperGLUE) 上。另一方面,对于知识密集型任务 (如 TriviaQA),稀疏模型的表现异常出色。作者还观察到,在微调过程中,较少的专家的数量有助于改善性能。另一个关于泛化问题确认的发现是,模型在小型任务上表现较差,但在大型任务上表现良好。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/06_superglue_curves.png" alt="Fine-tuning learning curves"> <figcaption>在小任务 (左图) 中,我们可以看到明显的过拟合,因为稀疏模型在验证集中的表现要差得多。在较大的任务 (右图) 中,MoE 则表现良好。该图来自 ST-MoE 论文</figcaption> </figure> 一种可行的微调策略是尝试冻结所有非专家层的权重。实践中,这会导致性能大幅下降,但这符合我们的预期,因为混合专家模型 (MoE) 层占据了网络的主要部分。我们可以尝试相反的方法: 仅冻结 MoE 层的参数。实验结果显示,这种方法几乎与更新所有参数的效果相当。这种做法可以加速微调过程,并降低显存需求。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/07_superglue_bars.png" alt="Only updating the non MoE layers works well in fine-tuning"> <figcaption>通过仅冻结 MoE 层,我们可以在保持质量的同时加快训练速度。该图来自 ST-MoE 论文</figcaption> </figure> 在微调稀疏混合专家模型 (MoE) 时需要考虑的最后一个问题是,它们有特别的微调超参数设置——例如,稀疏模型往往更适合使用较小的批量大小和较高的学习率,这样可以获得更好的训练效果。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/08_superglue_dense_vs_sparse.png" alt="Table comparing fine-tuning batch size and learning rate between dense and sparse models."> <figcaption>提高学习率和调小批量可以提升稀疏模型微调质量。该图来自 ST-MoE 论文</figcaption> </figure> 此时,您可能会对人们微调 MoE 中遇到的这些挑战而感到沮丧,但最近的一篇论文 [《MoEs Meets Instruction Tuning》](https://arxiv.org/pdf/2305.14705.pdf) (2023 年 7 月) 带来了令人兴奋的发现。这篇论文进行了以下实验: - 单任务微调 - 多任务指令微调 - 多任务指令微调后接单任务微调 当研究者们对 MoE 和对应性能相当的 T5 模型进行微调时,他们发现 T5 的对应模型表现更为出色。然而,当研究者们对 Flan T5 (一种 T5 的指令优化版本) 的 MoE 版本进行微调时,MoE 的性能显著提升。更值得注意的是,Flan-MoE 相比原始 MoE 的性能提升幅度超过了 Flan T5 相对于原始 T5 的提升,这意味着 MoE 模型可能从指令式微调中获益更多,甚至超过了稠密模型。此外,MoE 在多任务学习中表现更佳。与之前关闭 **辅助损失** 函数的做法相反,实际上这种损失函数可以帮助防止过拟合。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/09_fine_tune_evals.png" alt="MoEs benefit even more from instruct tuning than dense models"> <figcaption>与稠密模型相比,稀疏模型从指令微调中受益更多。该图来自 MoEs Meets instructions Tuning 论文</figcaption> </figure> ## 稀疏 VS 稠密,如何选择? 稀疏混合专家模型 (MoE) 适用于拥有多台机器且要求高吞吐量的场景。在固定的预训练计算资源下,稀疏模型往往能够实现更优的效果。相反,在显存较少且吞吐量要求不高的场景,稠密模型则是更合适的选择。 **注意**: 直接比较稀疏模型和稠密模型的参数数量是不恰当的,因为这两类模型基于的概念和参数量的计算方法完全不同。 ## 让 MoE 起飞 最初的混合专家模型 (MoE) 设计采用了分支结构,这导致了计算效率低下。这种低效主要是因为 GPU 并不是为处理这种结构而设计的,而且由于设备间需要传递数据,网络带宽常常成为性能瓶颈。在接下来的讨论中,我们会讨论一些现有的研究成果,旨在使这些模型在预训练和推理阶段更加高效和实用。我们来看看如何优化 MoE 模型,让 MoE 起飞。 ### 并行计算 让我们简要回顾一下并行计算的几种形式: - **数据并行**: 相同的权重在所有节点上复制,数据在节点之间分割。 - **模型并行**: 模型在节点之间分割,相同的数据在所有节点上复制。 - **模型和数据并行**: 我们可以在节点之间同时分割模型和数据。注意,不同的节点处理不同批次的数据。 - **专家并行**: 专家被放置在不同的节点上。如果与数据并行结合,每个节点拥有不同的专家,数据在所有节点之间分割。 在专家并行中,专家被放置在不同的节点上,每个节点处理不同批次的训练样本。对于非 MoE 层,专家并行的行为与数据并行相同。对于 MoE 层,序列中的令牌被发送到拥有所需专家的节点。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/10_parallelism.png" alt="Image illustrating model, expert, and data prallelism"> <figcaption>Switch Transformers 论文中展示如何使用不同的并行技术在节点上分割数据和模型的插图</figcaption> </figure> ### 容量因子和通信开销 提高容量因子 (Capacity Factor, CF) 可以增强模型的性能,但这也意味着更高的通信成本和对保存激活值的显存的需求。在设备通信带宽有限的情况下,选择较小的容量因子可能是更佳的策略。一个合理的初始设置是采用 Top-2 路由、1.25 的容量因子,同时每个节点配置一个专家。在评估性能时,应根据需要调整容量因子,以在设备间的通信成本和计算成本之间找到一个平衡点。 ### 部署技术 > 您可以在 `Inference Endpoints` 部署 [mistralai/Mixtral-8x7B-Instruct-v0.1](https://ui.endpoints.huggingface.co/new?repository=mistralai%2FMixtral-8x7B-Instruct-v0.1&vendor=aws&region=us-east-1&accelerator=gpu&instance_size=2xlarge&task=text-generation&no_suggested_compute=true&tgi=true&tgi_max_batch_total_tokens=1024000&tgi_max_total_tokens=32000)。 部署混合专家模型 (MoE) 的一个关键挑战是其庞大的参数规模。对于本地使用情况,我们可能希望使用更小的模型。为了使模型更适合部署,下面是几种有用的技术: - 预先蒸馏实验: Switch Transformers 的研究者们进行了预先蒸馏的实验。他们通过将 MoE 模型蒸馏回其对应的稠密模型,成功保留了 30-40%的由稀疏性带来的性能提升。预先蒸馏不仅加快了预训练速度,还使得在推理中使用更小型的模型成为可能。 - 任务级别路由: 最新的方法中,路由器被修改为将整个句子或任务直接路由到一个专家。这样做可以提取出一个用于服务的子网络,有助于简化模型的结构。 - 专家网络聚合: 这项技术通过合并各个专家的权重,在推理时减少了所需的参数数量。这样可以在不显著牺牲性能的情况下降低模型的复杂度。 ### 高效训练 FasterMoE (2022 年 3 月) 深入分析了 MoE 在不同并行策略下的理论性能极限,并且探索了一系列创新技术,包括用于专家权重调整的方法、减少延迟的细粒度通信调度技术,以及一个基于最低延迟进行专家选择的拓扑感知门控机制。这些技术的结合使得 MoE 运行速度提升高达 17 倍。 Megablocks (2022 年 11 月) 则专注于通过开发新的 GPU kernel 来处理 MoE 模型中的动态性,以实现更高效的稀疏预训练。其核心优势在于,它不会丢弃任何令牌,并能高效地适应现代硬件架构 (支持块稀疏矩阵乘),从而达到显著的加速效果。Megablocks 的创新之处在于,它不像传统 MoE 那样使用批量矩阵乘法 (这通常假设所有专家形状相同且处理相同数量的令牌),而是将 MoE 层表示为块稀疏操作,可以灵活适应不均衡的令牌分配。 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/moe/11_expert_matmuls.png" alt="Matrix multiplication optimized for block-sparse operations."> <figcaption>针对不同规模的专家和令牌数量的块稀疏矩阵乘法。该图来自 [MegaBlocks](https://arxiv.org/abs/2211.15841) 论文</figcaption> </figure> ## 开源混合专家模型 目前,下面这些开源项目可以用于训练混合专家模型 (MoE): - Megablocks: <https://github.com/stanford-futuredata/megablocks> - Fairseq: <https://github.com/facebookresearch/fairseq/tree/main/examples/moe_lm> - OpenMoE: <https://github.com/XueFuzhao/OpenMoE> 对于开源的混合专家模型 (MoE),你可以关注下面这些: - [Switch Transformers (Google)](https://huggingface.co/collections/google/switch-transformers-release-6548c35c6507968374b56d1f): 基于 T5 的 MoE 集合,专家数量从 8 名到 2048 名。最大的模型有 1.6 万亿个参数。 - [NLLB MoE (Meta)](https://huggingface.co/facebook/nllb-moe-54b): NLLB 翻译模型的一个 MoE 变体。 - [OpenMoE](https://huggingface.co/fuzhao): 社区对基于 Llama 的模型的 MoE 尝试。 - [Mixtral 8x7B (Mistral)](https://huggingface.co/mistralai): 一个性能超越了 Llama 2 70B 的高质量混合专家模型,并且具有更快的推理速度。此外,还发布了一个经过指令微调的模型。有关更多信息,可以在 Mistral 的 [公告博客文章](https://mistral.ai/news/mixtral-of-experts/) 中了解。 ## 一些有趣的研究方向 首先是尝试将稀疏混合专家模型 (SMoE) **蒸馏** 回到具有更少实际参数但相似等价参数量的稠密模型。 MoE 的 **量化** 也是一个有趣的研究领域。例如,[QMoE](https://arxiv.org/abs/2310.16795) (2023 年 10 月) 通过将 MoE 量化到每个参数不到 1 位,将 1.6 万亿参数的 Switch Transformer 所需的存储从 3.2TB 压缩到仅 160GB。 简而言之,一些值得探索的有趣领域包括: - 将 Mixtral 蒸馏成一个稠密模型。 - 探索合并专家模型的技术及其对推理时间的影响。 - 尝试对 Mixtral 进行极端量化的实验。 ## 相关资源 - [Adaptive Mixture of Local Experts (1991)](https://www.cs.toronto.edu/~hinton/absps/jjnh91.pdf) - [Learning Factored Representations in a Deep Mixture of Experts (2013)](https://arxiv.org/abs/1312.4314) - [Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer (2017)](https://arxiv.org/abs/1701.06538) - [GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding (Jun 2020)](https://arxiv.org/abs/2006.16668) - [GLaM: Efficient Scaling of Language Models with Mixture-of-Experts (Dec 2021)](https://arxiv.org/abs/2112.06905) - [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity (Jan 2022)](https://arxiv.org/abs/2101.03961) - [ST-MoE: Designing Stable and Transferable Sparse Expert Models (Feb 2022)](https://arxiv.org/abs/2202.08906) - [FasterMoE: modeling and optimizing training of large-scale dynamic pre-trained models(April 2022)](https://dl.acm.org/doi/10.1145/3503221.3508418) - [MegaBlocks: Efficient Sparse Training with Mixture-of-Experts (Nov 2022)](https://arxiv.org/abs/2211.15841) - [Mixture-of-Experts Meets Instruction Tuning:A Winning Combination for Large Language Models (May 2023)](https://arxiv.org/abs/2305.14705) - [Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1), [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1). ## Citation ```bibtex @misc {sanseviero2023moe, author = { Omar Sanseviero and Lewis Tunstall and Philipp Schmid and Sourab Mangrulkar and Younes Belkada and Pedro Cuenca }, title = { Mixture of Experts Explained }, year = 2023, url = { https://huggingface.co/blog/moe }, publisher = { Hugging Face Blog } } ``` ``` Sanseviero, et al., "Mixture of Experts Explained", Hugging Face Blog, 2023. ```
4
0
hf_public_repos/blog
hf_public_repos/blog/zh/train-your-controlnet.md
--- title: "使用 diffusers 训练你自己的 ControlNet 🧨" thumbnail: /blog/assets/136_train-your-controlnet/thumbnail.png authors: - user: multimodalart - user: pcuenq translators: - user: hugging-hoi2022 - user: zhongdongy proofreader: true --- # 使用 diffusers 训练你自己的 ControlNet 🧨 ## 简介 [ControlNet](https://huggingface.co/blog/controlnet) 这个神经网络模型使得用户可以通过施加额外条件,细粒度地控制扩散模型的生成过程。这一技术最初由 [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) 这篇论文提出,并很快地风靡了扩散模型的开源社区。作者开源了 8 个不同的模型,使得用户可以用 8 种条件去控制 Stable Diffusion 模型(包括版本 1 到 5 )。这 8 种条件包括姿态估计、深度图、边缘图、素描图 [等等](https://huggingface.co/lllyasviel)。 ![ControlNet pose examples](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/pose_image_1-min.png "ControlNet pose examples") 在这篇博客中,我们首先介绍训练 _Uncanny_ Faces model 的步骤。这是一个基于 3D 合成人脸的人脸姿态模型(这里的 uncanny faces 只是一个无意得到的结果,后面我们会讲到)。 ## 开始着手用 Stable Diffusion 训练你的 ControlNet 训练你自己的 ControlNet 需要 3 个步骤: 1. **设计你想要的生成条件**: 使用 ControlNet 可以灵活地“驯服” Stable Diffusion,使它朝着你想的方向生成。预训练的模型已经展示出了大量可用的生成条件,此外开源社区也已经开发出了很多其它条件,比如这里 [像素化的色彩板](https://huggingface.co/thibaud/controlnet-sd21-color-diffusers)。 2. **构建你自己的数据集**: 当生成条件确定好后,就该构建数据集了。你既可以从头构建一个数据集,也可以使用现有数据集中的数据。为了训练模型,这个数据集需要有三个维度的信息: 图片、作为条件的图片,以及语言提示。 3. **训练模型**: 一旦数据集建好了,就可以训练模型了。如果你使用 [这个基于 diffusers 的训练脚本](https://github.com/huggingface/diffusers/tree/main/examples/controlnet),训练其实是最简单的。这里你需要一个至少 8G 显存的 GPU。 ## 1. 设计你想要的生成条件 在设计你自己的生成条件前,有必要考虑一下两个问题: 1. 哪种生成条件是我想要的? 2. 是否已有现存的模型可以把正常图片转换成我的条件图片? 举个例子,假如我们想要使用人脸关键点作为生成条件。我们的思考过程应该是这样: 1. 一般基于关键点的 ControlNet 效果都还挺好。2. 人脸关键点检测也是一个很常见的任务,也有很多模型可以在普通图片上检测人脸关键点。3. 让 Stable Diffusion 去根据关键点生成人脸图片也挺有意思,还能让生成的人脸模仿别人的表情。 ![Example of face landmarks](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/segmentation_examples.png "Example of face landmarks") ## 2. 构建你自己的数据集 好!那我们现在已经决定用人脸关键点作为生成条件了。接下来我们需要这样构建数据集: - 准备 ground truth 图片 (`image`): 这里指的就是真实人脸图片 - 准备 条件图片 (`conditioning_image`): 这里指的就是画出来的关键点 - 准备 说明文字 (`caption`): 描述图片的文字 针对这个项目,我们使用微软的 `FaceSynthetics` 数据集: 这是一个包含了 10 万合成人脸的数据集。你可能会想到其它一些人脸数据集,比如 `Celeb-A HQ` 和 `FFHQ`,但这个项目我们决定还是采用合成人脸。 ![Face synthetics example dataset](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/face_synethtics_example.jpeg "Face synthetics example dataset") 这里的 `FaceSynthetics` 数据集看起来是个不错的选择: 它包含了真实的人脸图片,同时也包含了被标注过的人脸关键点(按照 iBUG 68 关键点的格式),同时还有人脸的分割图。 ![Face synthetics descriptions](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/segmentation_sequence.png "Face synthetics descriptions") 然而,这个数据集也不是完美的。我们前面说过,我们应该有模型可以将真实图片转换到条件图片。但这里似乎没有这样的模型,把人脸图片转换成我们关键点标注形式(无法把关键点转换为分割图)。 ![No known segmentation model](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/segmentation_no_known.png "No known segmentation model") 所以我们需要用另一种方法: - 使用 `FaceSynthetics` 中的真实图片 (`image`) - 使用一个现有的模型把人脸图片转换为 68 个关键点的形式。这里我们使用 [SPIGA](https://github.com/andresprados/SPIGA) 这个模型 - 使用自己的代码把人脸关键点转换为人脸分割图,以此作为“条件图片” (`conditioning_image`) - 把这些数据保存为 [Hugging Face Dataset](https://huggingface.co/docs/datasets/index) [这里](https://huggingface.co/datasets/pcuenq/face_synthetics_spiga) 是将真实图片转换到分割图的代码,以及将数据保存为 Hugging Face Dataset 的代码。 现在我们准备好了 ground truth 图片和“条件图片”,我们还缺少说明文字。我们强烈推荐你把说明文字加进去,但你也可以试试使用空的说明文字来看看效果。因为 `FaceSynthetics` 数据集并没有自带说明文字,我们使用 [BLIP captioning](https://huggingface.co/docs/transformers/model_doc/blip) 去给图片加上文字(代码在[这里](https://huggingface.co/datasets/multimodalart/facesyntheticsspigacaptioned))。 至此,我们就完成了数据集的构建。这个 [Face Synthetics SPIGA with captions](https://huggingface.co/datasets/multimodalart/facesyntheticsspigacaptioned) 数据集包含了 ground truth 图片、条件图片,以及对应的说明文字,总计有 10 万条数据。一切就绪,我们现在可以开始训练模型了。 ![New dataset](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/136_train-your-controlnet/new_dataset.png "New dataset") ## 3. 模型训练 有了 [数据](https://huggingface.co/datasets/multimodalart/facesyntheticsspigacaptioned),下一步就是训练模型。即使这部分很难,但有了 [这个脚本](https://github.com/huggingface/diffusers/tree/main/examples/controlnet),这个过程却变成了最简单的部分。我们用了一个 A100 GPU去训练(在 [LambdaLabs](https://lambdalabs.com) 每小时 1.1 美元租的)。 ### 我们的训练经验 我们以 batch size 为 4 训练了 3 个 epoch。结果表明此策略有些太激进,导致结果出现过拟合现象。模型有点忘记人脸的概念了,即使提示语中包含“怪物史莱克”或“一只猫”,模型也只会生成人脸而不是“史莱克”或猫;同时模型也对各种风格变得不敏感。 如果我们只训练 1 个 epoch (即模型仅学习了 10 万张照片),模型倒是能遵循输入的姿态,同时也没什么过拟合。看起来还行,但由于我们用的是合成数据,模型最终生成的都是些看起来很 3D 的人脸,而不是真实人脸。当然,基于我们用的数据集,生成这样的效果也正常。这里是训练好的模型: [uncannyfaces_25K](https://huggingface.co/multimodalart/uncannyfaces_25K)。 <iframe src="https://wandb.ai/apolinario/controlnet/reports/ControlNet-Uncanny-Faces-Training--VmlldzozODcxNDY0" style="border:none;height:512px;width:100%"></iframe> 在这张可交互表格中,你可以看看的步数如何影响模型效果。在训练了大约 15k 步后,模型就已经开始学习姿态了。最终模型在 25k 步后趋于成熟。 ### 训练具体怎么做 首先我们安装各种依赖: ```shell pip install git+https://github.com/huggingface/diffusers.git transformers accelerate xformers==0.0.16 wandb huggingface-cli login wandb login ``` 然后运行 [train_controlnet.py](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) 这个脚本: ```shell !accelerate launch train_controlnet.py \ --pretrained_model_name_or_path="stabilityai/stable-diffusion-2-1-base" \ --output_dir="model_out" \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./face_landmarks1.jpeg" "./face_landmarks2.jpeg" "./face_landmarks3.jpeg" \ --validation_prompt "High-quality close-up dslr photo of man wearing a hat with trees in the background" "Girl smiling, professional dslr photograph, dark background, studio lights, high quality" "Portrait of a clown face, oil on canvas, bittersweet expression" \ --train_batch_size=4 \ --num_train_epochs=3 \ --tracker_project_name="controlnet" \ --enable_xformers_memory_efficient_attention \ --checkpointing_steps=5000 \ --validation_steps=5000 \ --report_to wandb \ --push_to_hub ``` 我们详细看看这些设置参数,同时也看看有哪些优化方法可以用于 8GB 以下显存的 GPU 训练。 - `pretrained_model_name_or_path`: 基础的 Stable Diffusion 模型,这里我们使用 v2-1 版本,因为这一版生成人脸效果更好 - `output_dir`: 保存模型的目录文件夹 - `dataset_name`: 用于训练的数据集,这里我们使用 [Face Synthetics SPIGA with captions](https://huggingface.co/datasets/multimodalart/facesyntheticsspigacaptioned) - `conditioning_image_column`: 数据集中包含条件图片的这一栏的名称,这里我们用 `spiga_seg` - `image_column`: 数据集中包含 ground truth 图片的这一栏的名称,这里我们用 `image` - `caption_column`: 数据集中包含文字说明的这一栏的名称,这里我们用 `image_caption` - `resolution`: ground truth 图片和条件图片的分辨率,这里我们用 `512x512` - `learning_rate`: 学习率。我们发现设成 `1e-5` 效果很好,但你也可以试试介于 `1e-4` 和 `2e-6` 之间的其它值 - `validation_image`: 这里是让你在训练过程中偷窥一下效果的。每隔 `validation_steps` 步训练,这些验证图片都会跑一下,让你看看当前的训练效果。请在这里插入一个指向一系列条件图片的本地路径 - `validation_prompt`: 这里是一句文本提示,用于和你的验证图片一起验证当前模型。你可以根据你的需要设置 - `train_batch_size`: 这是训练时使用的 batch size。因为我们用的是 V100,所以我们还有能力把它设成 4。但如果你的 GPU 显存比较小,我们推荐直接设成 1。 - `num_train_epochs`: 训练模型使用的轮数。每一轮模型都会看一遍整个数据集。我们实验用的是 3 轮,但似乎最好的结果应该是出现在一轮多一点的地方。当训练了 3 轮时,我们的模型过拟合了。 - `checkpointing_steps`: 每隔这么多步,我们都会保存一下模型的中间结果检查点。这里我们设置成 5000,也就是每训练 5000 步就保存一下检查点。 - `validation_steps`: 每隔这么多步,`validation_image` 和 `validation_prompt` 就会跑一下,来验证训练过程。 - `report_to`: 向哪里报告训练情况。这里我们使用 Weights and Biases 这个平台,它可以给出 [这样美观的训练报告]()。 - `push_to_hub`: 将最终结果推到 Hugging Face Hub. 但是将 `train_batch_size` 从 `4` 减小到 `1` 可能还不足以使模型能够在低配置 GPU 上运行,这里针对不同 GPU 的 VRAM 提供一些其它配置信息: ### 适配 16GB 显存的 GPU ```shell pip install bitsandbytes --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam ``` 这里 batch size 设为 1,同时使用 4 步的梯度累计等同于你使用原始的 batch size 为 4 的情况。除此之外,我们开启了对梯度保存检查点,以及 8 bit 的 Adam 优化器训练,以此更多地节省显存。 ### 适配 12GB 显存的 GPU ```shell --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam --set_grads_to_none ``` ### 适配 8GB 显存的 GPU 请参考 [我们的教程](https://github.com/huggingface/diffusers/tree/main/examples/controlnet#training-on-an-8-gb-gpu) ## 4. 总结 训练 ControlNet 的过程非常有趣。我们已经成功地训练了一个可以模仿真实人脸姿态的模型。然而这个模型更多是生成 3D 风格的人脸图片而不是真实人脸图片,这是由于我们使用了合成人脸的数据执行训练。当然这也让生成的模型有了独特的魅力。 试试我们的 [Hugging Face Space](https://huggingface.co/spaces/pcuenq/uncanny-faces): <iframe src="https://pcuenq-uncanny-faces.hf.space" frameborder="0" width="100%" height="1150" style="border:0" ></iframe> 下一步,为了生成真实的人脸图片,同时还不使用真实人脸数据集,我们可以用 Stable Diffusion Image2Image 跑一遍所有的 `FaceSynthetics` 图片,把看起来很 3D 的人脸转换成真实人脸图片,然后再训练 ControlNet。 请继续关注我们,接下来我们将举办 ControlNet 训练赛事。请在 [Twitter](https://twitter.com/huggingface) 关注 Hugging Face,或者加入我们的 [Discord](http://hf.co/join/discord) 以便接收最新消息!
5
0
hf_public_repos/blog
hf_public_repos/blog/zh/setfit.md
--- title: "SetFit: 高效的无提示少样本学习" thumbnail: /blog/assets/103_setfit/intel_hf_logo.png authors: - user: Unso - user: lewtun - user: luketheduke - user: danielkorat - user: orenpereg - user: moshew translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # SetFit: 高效的无提示少样本学习 <p align="center"> <img src="../assets/103_setfit/setfit_curves.png" width=500> </p> <p align="center"> <em>与标准微调相比,SetFit 能更高效地利用训练样本,同时对噪声也更健壮。</em> </p> 如何处理少标签或无标签的训练数据是每个数据科学家的梦魇 😱。最近几年来,基于预训练语言模型的少样本 (few-shot) 学习出现并成为解决这类问题的颇有前途的方案。 因此,我们非常高兴地向大家介绍 SetFit: 一个基于 [Sentence Transformers](https://sbert.net/) 的高效的少样本微调 (fine-tune) 框架,该工作由 HuggingFace 和我们的研究伙伴 [Intel Labs](https://www.intel.com/content/www/us/en/research/overview.html) 以及 [UKP Lab](https://www.informatik.tu-darmstadt.de/ukp/ukp_home/index.en.jsp) 合作完成。SetFit 仅需使用很少的标注数据就能达到较高的准确率。举个例子,在客户评论情感数据集 (Customer Reviews (CR) sentiment dataset) 上,每类仅使用 8 个标注样本,SetFit 的准确率就和在 3 千个标注样本的训练全集上微调 RoBERTa Large 相当了 🤯! 与其他少样本学习方案相比,SetFit 有很多独有的特点: <p>🗣 <strong>无需提示词或语言器 (verbalisers)</strong>: 目前的少样本微调技术都需要手工设计的提示或语言器,用于将训练样本转换成适合目标语言模型的格式。SetFit 通过直接从少量标注训练样本中生成丰富的嵌入,完全省去了提示。</p> <p>🏎 <strong>快速训练</strong>: SetFit 不需要使用像 T0 或 GPT-3 这样的大规模语言模型就能达到高准确率。因此,典型情况下,它的训练和推理会快一个数量级或以上。</p> <p>🌎 <strong>支持多语言</strong>: SetFit 可与 Hub 上的任一 Sentence Tranformer 一起使用,这意味着如果你想让它支持多语言文本分类,你只要简单地微调一个多语言的 checkpoint 就好了。</p> 如果你想知道更多细节,可以在下方链接获取我们的 [论文](https://arxiv.org/abs/2209.11055)、[数据](https://huggingface.co/SetFit) 及 [代码](https://github.com/huggingface/setfit)。在本文中,我们主要解释 SetFit 是如何工作的以及如何使用 SetFit 训练一个你自己的模型。让我们开始吧! ## SetFit 如何工作? 在设计 SetFit 时,我们始终牢记高效、简单两个原则。SetFit 主要包含两个阶段:首先在少量标注样例 (典型值是每类 8 个或 16 个样例) 上微调一个 Sentence Transformer 模型。然后,用微调得到的 Sentence Tranformer 的模型生成文本的嵌入 (embedding) ,并用这些嵌入训练一个分类头 (classification head) 。 <p align="center"> <img src="../assets/103_setfit/setfit_diagram_process.png" width=700> </p> <p align="center"> <em>SetFit 的两阶段训练过程</em> </p> SetFit 利用 Sentence Transformer 的能力去生成基于句对 (paired sentences) 的稠密嵌入。在第一步微调阶段,它使用对比训练 (contrastive training) 来最大化利用有限的标注数据。首先,通过选择类内 (in-class) 和类外 (out-class) 句子来构造正句对和负句对,然后在这些句对 (或三元组 (triplets) ) 上训练 Sentence Transformer 模型并生成每个样本的稠密向量。第二步,根据每个样本的嵌入向量和各自的类标签,训练分类头。推理时,未见过的样本通过微调后的 Sentence Transformer 并生成嵌入,生成的嵌入随后被送入分类头并输出类标签的预测。 只需要把基础 Sentence Transformer 模型换成多语言版的,SetFit 就可以无缝地在多语言环境下运行。在我们的 [实验](https://arxiv.org/abs/2209.11055) 中,SetFit 在德语、日语、中文、法语以及西班牙语中,在单语言和跨语言的条件下,都取得了不错的分类性能。 ## 测试 SetFit 尽管与现存的少样本模型相比,SetFit 的模型要小得多,但在各种各样的测试基准上,SetFit 还是表现出了与当前最先进的方法相当或更好的性能。在 [RAFT](https://huggingface.co/spaces/ought/raft-leaderboard) 这个少样本分类测试基准上,参数量为 335M 的 SetFit Roberta (使用 [`all-roberta-large-v1` 模型](https://huggingface.co/sentence-transformers/all-roberta-large-v1)) 性能超过了 PET 和 GPT-3。它的排名仅在人类平均性能以及 11B 参数的 T-few 之后,而 T-few 模型的参数量是 SetFit Roberta 的 30 倍。SetFit 还在 11 个 RAFT 任务中的 7 个任务上表现好于人类基线。 | Rank | Method | Accuracy | Model Size | | :------: | ------ | :------: | :------: | | 2 | T-Few | 75.8 | 11B | | 4 | Human Baseline | 73.5 | N/A | | 6 | SetFit (Roberta Large) | 71.3 | 355M | | 9 | PET | 69.6 | 235M | | 11 | SetFit (MP-Net) | 66.9 | 110M | | 12 | GPT-3 | 62.7 | 175 B | <p align="center"> <em>RAFT 排行榜上表现突出的方法 (截至 2022 年 9 月)</em> </p> 在其他的数据集上,SeiFit 在各种各样的任务中也展示出了鲁棒的性能。如下图所示,每类仅需 8 个样本,其典型性能就超越了 PERFECT、ADAPET 以及微调后的原始 transformer 模型。SetFit 还取得了与 T-Few 3B 相当的结果,尽管它无需提示且模型小了 27 倍。 <p align="center"> <img src="../assets/103_setfit/three-tasks.png" width=700> </p> <p align="center"> <em>在 3 个分类数据集上比较 SetFit 与其他方法的性能。</em> </p> ## 快速训练与推理 <p align="center"> <img src="../assets/103_setfit/bars.png" width=400> </p> <p align="center">在每类 8 个标注样本的条件下,比较 T-Few 3B 和 SetFit (MPNet) 的训练成本和平均性能。</p> 因为 SetFit 可以用相对较小的模型取得高准确率,所以它训练起来可以非常快,而且成本也低不少。举个例子,在每类 8 个标注样本的数据集上使用 NVIDIA V100 训练 SetFit 只需要 30 秒,共花费 0.025 美金;相比较而言,相同的实验下,训练 T-Few 3B 需要一张 NVIDIA A100,时间上要 11 分钟,需花费 0.7 美金,成本高 28 倍以上。事实上,SetFit 不仅可以运行在那种你在 Google Colab 找到的 GPU 单卡上,甚至在 CPU 上你也仅需几分钟即可以训练一个模型。如上图所示,SetFit 的加速与模型大小相当,因此 [推理](https://arxiv.org/abs/2209.11055) 时,我们也可以获得相似的性能提升,进一步地,对 SetFit 模型进行蒸馏可以获得 123 倍的加速 🤯。 ## 训练你自己的模型 为了利于社区用户使用 SetFit,我们创建了一个小型 `setfit` [库](https://github.com/huggingface/setfit),这样你仅需几行代码就可以训练自己的模型了。 第一件事就是运行如下命令安装库: ```sh pip install setfit ``` 接着,我们导入 `SetFitModel` 和 `SetFitTrainer`,它们是流水线化 SetFit 训练过程的两个核心类: ```python from datasets import load_dataset from sentence_transformers.losses import CosineSimilarityLoss from setfit import SetFitModel, SetFitTrainer ``` 现在,我们开始从 HuggingFace Hub 上下载一个文本分类数据集。我们使用 [SentEval-CR](https://huggingface.co/datasets/SetFit/SentEval-CR) 数据集,它是一个客户评论数据集。 ```python dataset = load_dataset("SetFit/SentEval-CR") ``` 为了模拟仅有几个标注样例的真实场景,我们从数据集中每类采样 8 个样本: ```python # Select N examples per class (8 in this case) train_ds = dataset["train"].shuffle(seed=42).select(range(8 * 2)) test_ds = dataset["test"] ``` 既然我们有数据集了,下一步是从 Hub 里加载一个预训练 Sentence Transformer 模型,并用它去实例化 `SetFitTrainer`。这里我们使用 [paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) 模型,我们发现该模型在多个数据集下都能得出很好的结果: ```python # Load SetFit model from Hub model = SetFitModel.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2") # Create trainer trainer = SetFitTrainer( model=model, train_dataset=train_ds, eval_dataset=test_ds, loss_class=CosineSimilarityLoss, batch_size=16, num_iterations=20, # Number of text pairs to generate for contrastive learning num_epochs=1 # Number of epochs to use for contrastive learning ) ``` 最后一步是训练和评估模型: ```python # Train and evaluate! trainer.train() metrics = trainer.evaluate() ``` 就这样,你已经训练了你的第一个 SetFit 模型!记得把你训练后的模型上传到 Hub 里 🤗。 ```python # Push model to the Hub # Make sure you're logged in with huggingface-cli login first trainer.push_to_hub("my-awesome-setfit-model") ``` 虽然在上面的例子中我们只展示了如何用一个特定类型的模型走完全程,但其实我们可以针对不同的性能和任务,切换使用任意的 [Sentence Transformer](https://huggingface.co/models?library=sentence-transformers&sort=downloads) 模型。举个例子,使用多语言 Sentence Transformer 可以将少样本分类扩展至多语言的场景。 ## 下一步 我们已经向大家展示了 SetFit 是用于少样本分类任务的有效方法。在接下来的几个月里,我们会继续探索将该方法扩展至自然语言推理和词分类任务并观察其效果。同时,我们也会很高兴看到业界从业者如何应用 SetFit 到他们自己的应用场景。如果你有任何问题或者反馈,请在我们的 [GitHub 仓库](https://github.com/huggingface/setfit) 上提出问题 🤗。 少样本学习快乐!
6
0
hf_public_repos/blog
hf_public_repos/blog/zh/docmatix.md
--- title: "Docmatix - 超大文档视觉问答数据集" thumbnail: /blog/assets/183_docmatix/thumbnail_new.png authors: - user: andito - user: HugoLaurencon translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # Docmatix - 超大文档视觉问答数据集 本文,我们将发布 [Docmatix - 一个超大的文档视觉问答 (DocVQA) 数据集](https://huggingface.co/datasets/HuggingFaceM4/Docmatix),比之前的数据集大 100 倍。当使用 Docmatix 微调 Florence-2 时,消融实验显示 DocVQA 任务的性能提高了 20%。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/docmatix_example.png" alt="数据集样本实例" style="width: 90%; height: auto;"><br> <em>Docmatix 数据集样本示例</em> </p> 缘起于 [丹鼎 (The Cauldron)](https://huggingface.co/datasets/HuggingFaceM4/the_cauldron) 的开发,丹鼎包含了 50 个数据集,旨在用于视觉语言模型 (VLM) 的微调,我们的 [Idefics2](https://huggingface.co/blog/idefics2) 就是由此训得。在丹鼎的开发过程中,我们发现缺乏大规模文档视觉问答 (DocVQA) 数据集。Idefics2 依赖的视觉问答数据集主要是 DocVQA,其中仅包含 1 万张图像以及 3 万 9 千对问答 (Q/A)。基于其以及其他数据集微调出的开源模型在性能上与闭源模型差距很大。 为了解决这一问题,我们很高兴推出 Docmatix,这是一个 DocVQA 数据集,包含 240 万张图像以及源自 130 万个 PDF 文档的 950 万对问答。与之前的数据集相比,规模扩大了 **240 倍**。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/docmatix_dataset_comp.png" alt="Docmatix 和其它 DocVQA 数据集的对比" style="width: 90%; height: auto;"><br> <em>Docmatix 和其它 DocVQA 数据集的对比</em> </p> 你可以通过下面的页面自由探索数据集并查阅 Docmatix 中包含的文档类型以及问答对。 <iframe src="https://huggingface.co/datasets/HuggingFaceM4/Docmatix/embed/viewer/default/train" frameborder="0" width="100%" height="560px" ></iframe> Docmatix 是基于 [PDFA - 一个包含 210 万个 PDF 的 OCR 数据集](https://huggingface.co/datasets/pixparse/pdfa-eng-wds) 生成的。我们从 PDFA 中转录出文本,然后用 [Phi-3-small](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) 模型生成 Q/A 对。为了确保数据集的质量,我们对模型生成的回答进行了过滤,丢弃了 15% 被识别为幻觉的 Q/A 对。另外,我们还使用正则表达式来检测代码并删除了包含关键字 “unanswerable” 的答案。Docmatix 数据集中的每一行对应于一个 PDF 文件,我们将 PDF 转换为分辨率为 150 dpi 的图像,并将处理后的图像上传至 Hugging Face Hub 以便于访问。所有样本的原始 PDF 都可以溯源至 PDFA 数据集,以最大程度提供透明度和可靠性。但考虑到将这么多 PDF 转换为图像会消耗不少资源,为方便数据集的用户起见,数据集中的样本用的是处理后的图像。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/docmatix_processing.png" alt="生成 Docmatix 的数据处理流水线" style="width: 90%; height: auto;"><br> <em>生成 Docmatix 的数据处理流水线</em> </p> 我们先处理了一小批数据集,并对其进行多次消融研究以对提示进行优化。我们的目标是每页生成大约 4 对问答。太多的话,它们之间会有很大的重叠,太少的话,则说明当前页的内容中细节较少。此外,我们的目标是让生成的答案与人类回答相似,避免过短或过长的答案。我们还比较重视问题的多样性,以确保尽量减少重复问题。有趣的是,当我们引导 [Phi-3 模型](https://huggingface.co/docs/transformers/main/en/model_doc/phi3) 根据文档中的具体信息提出问题时 (例如,“某甲的头衔是什么?”),问题几乎没有重复。下图展示了我们得到的一些关键统计分析数据: <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/docmatix_prompt_analysis.png" alt="从提示的维度分析 Docmatix" style="width: 90%; height: auto;"><br> <em>从提示的维度分析 Docmatix</em> </p> 为了评估 Docmatix 的质量,我们使用 Florence-2 模型进行了消融实验。我们训练了两个版本的模型以进行比较。第一个版本在 DocVQA 数据集上训练数个 epoch。第二个版本先在 Docmatix 上训练 1 个 epoch (仅使用 20% 的图像、4% 的 Q/A 对),然后再在 DocVQA 上训练 1 个 epoch,以确保模型的输出格式符合 DocVQA 评估的要求。结果很明显: 先对 Docmatix 进行微调可带来近 20% 的相对指标提升。此外,所得的 0.7B Florence-2 模型的性能仅比基于混合训练集训练的 8B Idefics2 模型差 5%,要知道从模型尺寸上来看 8B 可以比 0.7B 大得远不止 5%。 <div align="center"> | 数据集 | DocVQA 上的 ANSL 值 | 模型尺寸 | |--------------------------------------|----------------|----------------| | 在 DocVQA 上微调的 Florence 2 | 60.1 | 700M | | 在 Docmatix 上微调的 Florence 2 | 71.4 | 700M | | Idefics2 | 74.0 | 8B | </div> <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/4.36.1/gradio.js"></script> <gradio-app theme_mode="light" src="https://HuggingFaceM4-Docmatix-Florence-2.hf.space"></gradio-app> ## 总结 本文介绍了 Docmatix,一个用于 DocVQA 的超大数据集。我们的结果表明,使用 Docmatix 在微调 Florence-2 时,我们可以将 DocVQA 性能提高 20%。该数据集有助用户弥合开源 VLM 相对于闭源 VLM 的性能差距。我们鼓励开源社区利用 Docmatix 去训练新的的 DocVQA 模型,创造新的 SOTA!我们迫不及待地想在 🤗 Hub 上看到你的模型! ## 有用的资源 - [Docmatix 微调 Florence-2 所得模型的演示](https://huggingface.co/spaces/HuggingFaceM4/Docmatix-Florence-2) - [微调 Florence-2 - 微软的尖端视觉语言模型](https://huggingface.co/blog/zh/finetune-florence2) - [Florence-2 微调的 Github 代码库](https://github.com/andimarafioti/florence2-finetuning) - [视觉语言模型详解](https://huggingface.co/blog/zh/vlms) 我们要感谢 merve 和 leo 对本文的审阅并提供了缩略图。
7
0
hf_public_repos/blog
hf_public_repos/blog/zh/whisper-speculative-decoding.md
--- title: "使用推测解码使 Whisper 实现 2 倍的推理加速" thumbnail: /blog/assets/whisper-speculative-decoding/thumbnail.png authors: - user: sanchit-gandhi translators: - user: yaoqih - user: zhongdongy proofreader: true --- # 使用推测解码使 Whisper 实现 2 倍的推理加速 <a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> Open AI 推出的 [Whisper](https://openai.com/research/whisper) 是一个通用语音转录模型,在各种基准和音频条件下都取得了非常棒的结果。最新的 [large-v3](https://huggingface.co/openai/whisper-large-v3) 模型登顶了 [OpenASR 排行榜](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard),被评为最佳的开源英语语音转录模型。该模型在 Common Voice 15 数据集的 58 种语言中也展现出了强大的多语言性能,在 42 种语言上的单词错误率 (WER) 低于 30%。 尽管转录准确度非常优秀,但推理速度非常缓慢。即使利用 [flash attention](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2) 、半精度和 [分块](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s) 等优化推理技术,1 小时长度的音频在 16GB T4 GPU 上也需要超过 6 分钟的转录时间。 在本文中,我们将演示如何运用推测解码将 Whisper 的推理时间缩减 **2 倍**,同时在数学上确保完全取得与原模型 **相同的输出**。因此,这种方法可以完美地替换现有的 Whisper 流水线,因为它可以在不降低准确性的情况下免费获得 2 倍的加速。想要看附带有更简洁解释的全部代码,请参阅配套的 [Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb)。 ## 推测解码 推测解码由 Yaniv Leviathan 等人在 [Fast Inference from Transformers via Speculative Decoding](https://arxiv.org/abs/2211.17192) 中提出。其思想是,一个更快的 **辅助模型** 通常会生成和更大的 **主模型** 相同的 token。 首先,辅助模型会通过自回归生成 $N$ 个 _候选 token_ 序列: $\hat{\boldsymbol{y}}_{1:N}$。在下图中,辅助模型生成了一个包含 5 个候选 token 的序列: `The quick brown sock jumps` 。 <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_1.mp4" ></video> </figure> 尽管这些候选 token 可以快速生成,但它们可能与主模型预测的 token 不同。因此,在第二步中,候选 token 被传入主模型以进行“验证”。主模型将候选 token 作为输入,并执行 **单次前馈传播**。主模型的输出是每个步骤中“正确”token 的序列 $ \boldsymbol{y}_{1:N}$。 <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_2.mp4" ></video> </figure> 在上图中,我们看到主模型预测的前三个 token 与辅助模型的 token 一致: `<span style="color:green">` The quick brown 但是,辅助模型的第四个候选 token: “ `<span style="color:red">` sock”与主模型的正确 token: “ `<span style="color:green">` fox”不一致。 我们知道,所有候选 token 一直到第一个不匹配之前都是正确的 ( `<span style="color:green">` The quick brown),因为这些与主模型的预测一致。但是,在第一个不匹配之后,候选 token 开始偏离主模型实际预测的 token。因此,我们可以用主模型的正确 token ( `<span style="color:green">` fox) 替换第一个不正确的候选 token ( `<span style="color:red">` sock),并放弃之后所有预测的 token,因为这些已经逐渐偏离主模型的预测。经过校正的序列 `The quick brown fox` 现在成为辅助模型的新输入: <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_3.mp4" ></video> </figure> 然后,辅助模型再次通过自回归推理,生成一组新的 $N$ 个候选 token,这些 token 再次通过主模型的单次前馈传播进行验证。 <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_4.mp4" ></video> </figure> 由于我们在生成的时候使用的快速的辅助模型进行自回归,并且缓慢的主模型仅用于验证前馈传播,解码过程将大大加快。此外,经过主模型前馈传播验证后可以确保与仅使用主模型时获得完全相同的输出。这使得推测解码可以完美地替换现有的 Whisper 流水线,因为我们可以确定会取得相同质量的输出。 为了最大限度地减少延迟,辅助模型应该比主模型快得多,同时尽可能频繁地预测相同的 token 分布。实际上,这两个属性之间需要权衡: 模型越快,其准确度越低。然而,由于所有预测 token 中的 70-80% 往往是“较易”的 token,此权衡倾向于选择一个更快的模型,而不是一个更准确的模型。因此,辅助模型应该至少比主模型快 3 倍 (越快越好),同时在示例中正确预测所有较“易”token。剩余的 20-30% 更“难”的 token 可以由更大的主模型进行验证。 选择辅助模型的唯一约束是它必须与主模型使用相同的词汇表。也就是说,辅助模型必须使用与主模型完全一对一相同的分词器。因此,如果我们想对诸如 [large-v2](https://huggingface.co/openai/whisper-large-v2) (多语言) 的 Whisper 多语言版本使用推测解码,我们需要选择诸如 [tiny](https://huggingface.co/openai/tiny) 的 Whisper 多语言版本作为辅助模型。而如果我们想对诸如 [medium.en](https://huggingface.co/openai/whisper-medium.en) 的 Whisper 英文版本使用推测解码,我们需要选择诸如 [tiny.en](https://huggingface.co/openai/tiny.en) 的 Whisper 英文版本作为辅助模型。目前,[large-v3](https://huggingface.co/openai/whisper-large-v3) 是唯一一个扩展了词汇量的 Whisper 检查点,因此与以前的 Whisper 检查点不兼容。 现在我们已经了解了推测解码背后的原理,我们准备实际实现它。在 [🤗 Transformers](https://huggingface.co/docs/transformers/index) 库中,推测解码被实现为“辅助生成 (Assisted Generation)”推理策略。欲了解更多实现细节,建议读者阅读 Joao Gante 关于 [辅助生成](https://huggingface.co/blog/assisted-generation) 的精彩博文。 ## 英文语音转录 ### 基准实现 我们首先使用 Whisper [large-v2](https://huggingface.co/openai/whisper-large-v2) 进行基准测试,以获得推理速度的基准数值。我们可以通过便捷的 [`AutoModelForSpeechSeq2Seq`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForSpeechSeq2Seq) 和 [`AutoProcessor`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor) 类加载主模型及其对应的处理器。我们将以 `float16` 精度加载模型,并通过传递 [`low_cpu_mem_usage=True`](https://huggingface.co/docs/transformers/main_classes/model#large-model-loading) 确保加载时间尽可能少。此外,我们要确保模型以 [safetensors](https://huggingface.co/docs/diffusers/main/en/using-diffusers/using_safetensors) 格式加载,方法是传递 [`use_safetensors=True`](https://huggingface.co/docs/transformers/main_classes/model#transformers.PreTrainedModel.from_pretrained.use_safetensors)。最后,我们将传递参数 `attn_implementation="sdpa"` ,以通过 PyTorch 的 [SDPA 注意力内核](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) 进行 Flash 注意力加速。 ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) ``` 让我们加载将用于基准测试的英语语音转录数据集。我们将加载 [LibriSpeech ASR](https://huggingface.co/datasets/librispeech_asr) 中验证数据集的 clean 分组中的 73 个样本组成的小型数据集。这大约有 9MB 的数据,因此非常轻量且可以快速下载到设备上。 ```python from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ``` 对于基准测试,我们只想测量生成时间,所以让我们编写一个简短的辅助函数来测量此步骤运行的时间。下面的函数将同时返回解码的 token 和运行模型所需的时间: ```python import time def generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` 现在我们可以迭代语音数据集中的音频样本,并统计整体生成时间: ```python from tqdm import tqdm all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Output:** ``` 100%|██████████| 73/73 [01:37<00:00, 1.33s/it] 72.99542546272278 ``` 很好!我们看到转录 73 个样本花了 73 秒。让我们检查一下预测的 WER: ```python from evaluate import load wer = load("wer") print(wer.compute(predictions=predictions, references=references)) ``` **Output:** ``` 0.03507271171941831 ``` 我们的最终基准数值为 73 秒,WER 为 3.5%。 ### 推测解码 现在让我们加载推测解码的辅助模型。在此示例中,我们将使用 Whisper 蒸馏后的版本 [distil-large-v2](https://huggingface.co/distil-whisper/distil-large-v2)。蒸馏模型只使用了 Whisper 中 32 个解码器层中的 2 个编码器。因此,它比 Whisper 快 6 倍,同时在分布测试集上的 WER 性能相比于蒸馏前仅下降了 1%。这使其成为理想的辅助模型,因为它在转录准确性和生成速度方面都非常优秀${}^1$。 --- ${}^1$ 我们即将发布 Distil-Whisper 的改进版本,在 token 分布中具有更佳的对齐性,这将进一步提高推测解码性能。关注 [Distil-Whisper 存储库](https://github.com/huggingface/distil-whisper) 来追踪最新的更新信息。 --- 由于 Distil-Whisper 使用与 Whisper 模型完全相同的编码器,我们可以在主模型和辅助模型之间共享编码器。然后,我们只需要从 Distil-Whisper 加载 2 层解码器作为“仅解码器”模型。我们可以通过便捷的 [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) 自动类实现这一点。在实践中,相比于仅使用主模型,这仅增加了 8%的 VRAM 占用量。 ```python from transformers import AutoModelForCausalLM assistant_model_id = "distil-whisper/distil-large-v2" assistant_model = AutoModelForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device) ``` 我们可以为推测解码的基准测试定义一个新的函数。与前面的函数唯一的区别是,我们在对 `.generate` 的调用中传递辅助模型: ```python def assisted_generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, assistant_model=assistant_model, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` 让我们使用 Distil-Whisper 作为 Whisper 的助手运行推测解码的基准测试: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Outputs:** ``` 100%|██████████| 73/73 [00:38<00:00, 1.88it/s] 32.69683289527893 ``` 使用推测解码,推理时间仅为 33 秒,比之前快 2.2 倍!让我们验证一下 WER 是否相同: ```python print(wer.compute(predictions=predictions, references=references)) ``` **Outputs:** ``` 0.03507271171941831 ``` 太完美了!再次达到 3.5%的 WER,因为我们的输出与仅使用主模型的时候完全相同。 推测解码也可以与基础的 🤗 Transformers [pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API 一起用于推理。下面,我们使用模型和处理器实例化管道,然后使用它来转录测试数据集中的第一个样本。这可以扩展为转录任意长度的音频样本,包括进行批处理: ```python from transformers import pipeline pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=4, generate_kwargs={"assistant_model": assistant_model}, torch_dtype=torch_dtype, device=device, ) sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` **Outputs:** ``` Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel. ``` 使用 Whisper 和 Distil-Whisper 运行推测解码的端到端代码示例可在 [Distil-Whisper 模型卡](https://huggingface.co/distil-whisper/distil-large-v2#speculative-decoding) 中找到。它将本文中涵盖的推理阶段组合成一个代码示例。 ## 多语言语音转录 Distil-Whisper 是英语语音转录的最佳辅助模型,因为它与原始 Whisper 模型的 WER 误差率仅相差 1%,而对短长语音样本的推理速度提高了 6 倍。然而,官方的 Distil-Whisper 检查点仅支持英语,这意味着它们无法用于多语言语音转录。 要使用推测解码进行多语言语音转录,您可以使用 [官方 Whisper 多语言检查点](https://huggingface.co/openai/whisper-large-v2#model-details) 之一,或者 Whisper 的微调版本。在撰写本文时,Hugging Face Hub 上已有超过 5000 个微调过的 Whisper 检查点,支持超过 100 种语言。这些为选择表现出色的辅助模型提供了极好的起点。在此示例中,我们将使用最小的官方多语言检查点 Whisper [tiny](https://huggingface.co/openai/whisper-tiny)。您可以使用任意一个您的语言中微调过的不同检查点! 让我们为新的辅助模型 Whisper tiny 加载权重。由于 Whisper tiny 的编码器与 large-v2 不同,这次我们将使用 `AutoModelForSpeechSeq2Seq` 类同时加载编码器和解码器: ```python assistant_model_id = "openai/whisper-tiny" assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device); ``` 我们的基准数据集,将从 [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) 数据集的荷兰语 (“nl”) 部分中加载 73 个样本: ```python dataset = load_dataset("sanchit-gandhi/voxpopuli_dummy", "nl", split="validation") ``` 非常好!现在我们可以像前面一样重新运行我们的 Whisper large-v2 模型的基准测试。我们所做的唯一更改是在 generate 函数中传递语言和任务参数,以确保执行语音转录 (而不是语音翻译)。推测解码完全兼容语音转录和翻译任务。只需如下所示设置任务参数即可: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [02:05<00:00, 1.72s/it] Time: 116.50992178916931 WER: 0.127190136275146 ``` 没错!我们的基准时间为 117 秒,WER 为 12.8%。让我们使用推测解码重新运行生成过程: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [01:08<00:00, 1.06it/s] Time: 62.10229682922363 WER: 0.127190136275146 ``` Nice!我们达到了 12.8% 的 WER,但这次的推理时间只有 62 秒,表示速度提高了 1.9 倍。考虑到加载辅助模型的低开销和确保获得完全相同输出的数学证明,推测解码为现有的 Whisper 管道提供了完美的即插即用的替代方案。 ## 高效推测解码的策略 在本最终部分,我们将介绍两种策略,以确保使用推测解码时获得可能最快的推理时间。 #### 辅助模型 我们的目标是选择一个至少比主模型快 3 倍 **并且** 正确转录至少 70-80% 的预测 token (通常是示例中的“更简单”token) 的辅助模型。如果您想要转录某种特定语言,一种有效的策略是训练两个不同大小的 Whisper 模型,并将其中一个用作另一个的辅助模型: - 首先,微调 Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) 以用作主模型 - 其次,在同一数据集上蒸馏 Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) 以用作快速的辅助模型 微调和蒸馏都可以提高主模型和辅助模型在您选择的语言上的 WER 性能,同时最大化 token 分布的对齐。有关 Whisper 微调的完整指南,请参阅 [此处](https://huggingface.co/blog/fine-tune-whisper),有关蒸馏的指南请参阅 [此处](https://github.com/huggingface/distil-whisper/tree/main/training)。 #### 批次大小 值得注意的是,使用推测解码获得的最大速度提升来自批次大小为 1。对于批处理推测解码,批处理中的所有候选 token 必须与验证 token 相匹配,才能被接受。如果批处理中给定位置的 token 不一致,则所有在该位置之前的候选 token 将被丢弃。因此,推测解码更倾向于较小的批次大小。在实践中,我们发现推测解码可以提供速度提升,直到批次大小达到 4 为止。当批次大小超过 4 时,推测解码的推理速度比仅用主模型还要慢。有关完整结果,请参阅 [Distil-Whisper 论文](https://arxiv.org/pdf/2311.00430.pdf) 的第 D.3 节。 ## 结论 在本博文中,我们介绍了推测解码的推理策略,以及如何将其应用于语音转录的 Whisper 模型。我们展示了如何实现 2 倍的速度提升,同时数学上确保获得与仅使用原始模型相同的输出。我们鼓励您尝试将推测解码用作现有 Whisper 管道的即插即用替代方案,因为使用额外的辅助模型的开销很小,并且可以保证获得相同的转录结果。 ## 致谢 本博客由 [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi) 撰写。非常感谢 [Patrick von Platen](https://huggingface.co/patrickvonplaten) 和 [Pedro Cuenca](https://huggingface.co/pcuenq) 的建设性意见,以及 [Joao Gante](https://huggingface.co/joaogante) 在 🤗 Transformers 中实现辅助生成的贡献。
8
0
hf_public_repos/blog
hf_public_repos/blog/zh/assisted-generation.md
--- title: "辅助生成:低延迟文本生成的新方向" thumbnail: /blog/assets/assisted-generation/thumbnail.png authors: - user: joaogante translators: - user: gxy-gxy - user: zhongdongy proofreader: true --- # 辅助生成: 低延迟文本生成的新方向 大型语言模型如今风靡一时,许多公司投入大量资源来扩展它们规模并解锁新功能。然而,作为注意力持续时间不断缩短的人类,我们并不喜欢大模型缓慢的响应时间。由于延迟对于良好的用户体验至关重要,人们通常使用较小的模型来完成任务,尽管它们的质量较低 (例如 [代码补全任务](https://ai.googleblog.com/2022/07/ml-enhanced-code-completion-improves.html))。 为什么文本生成这么慢?是什么阻止你在不破产的情况下部署低延迟大型语言模型?在这篇博文中,我们将重新审视自回归文本生成的瓶颈,并介绍一种新的解码方法来解决延迟问题。你会发现,通过使用我们的新的辅助生成方法,你可以将硬件中的延迟降低多达 10 倍! ## 理解文本生成延迟 文本生成的核心很容易理解。让我们看看核心部分 (即 ML 模型),它的输入包含一个文本序列,其中包括到目前为止生成的文本,以及其他特定于模型的组件 (例如 Whisper 还有一个音频输入)。该模型接受输入并进行前向传递: 输入被喂入模型并一层一层顺序传递,直到预测出下一个 token 的非标准化对数概率 (也称为 logits)。一个 token 可能包含整个词、子词,或者是单个字符,这取决于具体模型。如果你想深入了解文本生成的原理,[GPT-2 插图](https://jalammar.github.io/illustrated-gpt2/) 是一个很好的参考。 <!-- [GIF 1 -- FWD PASS] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov" ></video> </figure> 模型的前向传递提供了下一个 token 的概率,你可以自由操作 (例如,将不需要的单词或序列的概率设置为 0)。文本生成的步骤就是从这些概率中选择下一个 token。常见的策略包括选择最有可能的 token (贪心解码),或从它们的分布中抽样 (多项式抽样)。在选择了下一个 token 之后,我们将模型前向传递与下一个 token 迭代地连接起来,继续生成文本。这个解释只是解码方法的冰山一角; 请参阅我们 [关于文本生成的博客](https://huggingface.co/blog/zh/how-to-generate) 以进行深入探索。 <!-- [GIF 2 -- TEXT GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov" ></video> </figure> 从上面的描述中可以看出,文本生成的延迟瓶颈很明显: 运行大型模型的前向传递很慢,你可能需要依次执行数百次迭代。但让我们深入探讨一下: 为什么前向传递速度慢?前向传递通常以矩阵乘法为主,通过查阅相应的 [维基百科](https://en.wikipedia.org/wiki/Matrix_multiplication_algorithm#Communication-avoiding_and_distributed_algorithms),你可以看出内存带宽是此操作的限制 (例如,从 GPU RAM 到 GPU 计算核心)。换句话说, _前向传递的瓶颈来自将模型权重加载到设备的计算核心中,而不是来自执行计算本身_。 目前,你可以探索三个主要途径来充分理解文本生成,所有这些途径都用于解决模型前向传递的性能问题。首先,对于特定硬件的模型优化。例如,如果你的设备可能与 [Flash Attention](https://github.com/HazyResearch/flash-attention) 兼容,你可以使用它通可以过重新排序操作或 [INT8 量化](https://huggingface.co/blog/zh/hf-bitsandbytes-integration) 来加速注意力层,其减少了模型权重的大小。 其次,如果你有并发文本生成需求,你可以对输入进行批处理,从而实现较小的延迟损失并大幅增加吞吐量。你可以将模型对于多个输入并行计算,这意味着你将在大致相同的内存带宽负担情况下获得了更多 token。批处理的问题在于你需要额外的设备内存 (或在某处卸载内存)。你可以看到像 [FlexGen](https://github.com/FMInference/FlexGen) 这样的项目以延迟为代价来优化吞吐量。 ```python # Example showcasing the impact of batched generation. Measurement device: RTX3090 from transformers import AutoModelForCausalLM, AutoTokenizer import time tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to("cuda") inputs = tokenizer(["Hello world"], return_tensors="pt").to("cuda") def print_tokens_per_second(batch_size): new_tokens = 100 cumulative_time = 0 # warmup model.generate( **inputs, do_sample=True, max_new_tokens=new_tokens, num_return_sequences=batch_size ) for _ in range(10): start = time.time() model.generate( **inputs, do_sample=True, max_new_tokens=new_tokens, num_return_sequences=batch_size ) cumulative_time += time.time() - start print(f"Tokens per second: {new_tokens * batch_size * 10 / cumulative_time:.1f}") print_tokens_per_second(1) # Tokens per second: 418.3 print_tokens_per_second(64) # Tokens per second: 16266.2 (~39x more tokens per second) ``` 最后,如果你有多个可用设备,你可以使用 [Tensor 并行](https://huggingface.co/docs/transformers/main/en/perf_train_gpu_many#tensor-parallelism) 分配工作负载并获得更低的延迟。使用 Tensor 并行,你可以将内存带宽负担分摊到多个设备上,但除了在多个设备运行计算的成本之外,你还需要考虑设备间的通信瓶颈。该方法的收益在很大程度上取决于模型大小: 对于可以轻松在单个消费级设备上运行的模型,通常效果并不显著。根据这篇 [DeepSpeed 博客](https://www.microsoft.com/en-us/research/blog/deepspeed-accelerating-large-scale-model-inference-and-training-via-system-optimizations-and-compression/),你会发现你可以将大小为 17B 的模型分布在 4 个 GPU 上,从而将延迟减少 1.5 倍 (图 7)。 这三种类型的改进可以串联使用,从而产生 [高通量解决方案](https://github.com/huggingface/text-generation-inference)。然而,在应用特定于硬件的优化后,降低延迟的方法有限——并且现有的方法很昂贵。让我们接下来解决这个问题! ## 重新回顾语言模型解码器的正向传播 上文我们讲到,每个模型前向传递都会产生下一个 token 的概率,但这实际上是一个不完整的描述。在文本生成期间,典型的迭代包括模型接收最新生成的 token 作为输入,加上所有其他先前输入的缓存内部计算,再返回下一个 token 得概率。缓存用于避免冗余计算,从而实现更快的前向传递,但它不是强制性的 (并且可以设置部分使用)。禁用缓存时,输入包含到目前为止生成的整个 token 序列,输出包含 _所有位置_的下一个 token 对应的概率分布!如果输入由前 N 个 token 组成,则第 N 个位置的输出对应于其下一个 token 的概率分布,并且该概率分布忽略了序列中的所有后续 token。在贪心解码的特殊情况下,如果你将生成的序列作为输入传递并将 argmax 运算符应用于生成的概率,你将获得生成的序列。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer tok = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2") inputs = tok(["The"], return_tensors="pt") generated = model.generate(**inputs, do_sample=False, max_new_tokens=10) forward_confirmation = model(generated).logits.argmax(-1) # We exclude the opposing tips from each sequence: the forward pass returns # the logits for the next token, so it is shifted by one position. print(generated[:-1].tolist() == forward_confirmation[1:].tolist()) # True ``` 这意味着你可以将模型前向传递用于不同的目的: 除了提供一些 token 来预测下一个标记外,你还可以将序列传递给模型并检查模型是否会生成相同的序列 (或部分相同序列)。 <!-- [GIF 3 -- FWD CONFIRMATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_3_1080p.mov" ></video> </figure> 让我们想象,你可以访问一个神奇的无延迟的预测辅助模型,该模型针对任何给定输入生成与你的模型相同的序列。顺便说一句,这个模型不能直接用,只能辅助你的生成程序。使用上述属性,你可以使用此辅助模型获取候选输出 token,然后使用你的模型进行前向传递以确认它们的正确性。在这个乌托邦式的场景中,文本生成的延迟将从 `O(n)` 减少到 `O(1)`,其中生成的 token 数量为 `n`。对于需要多次迭代生成的过程,我们谈论的是其数量级。 向现实迈出一步,我们假设辅助模型失去了它的预测属性。根据你的模型,现在它是一个无延迟模型,但它会弄错一些候选 token。由于任务的自回归性质,一旦辅助模型得到一个错误的 token,所有后续候选 token 都必须无效。但是,你可以使用模型更正错误 token 并反复重复此过程后再次查询辅助模型。即使辅助模型失败了几个 token,文本生成的延迟也会比原始形式小得多。 显然,世界上没有无延迟的辅助模型。然而,找到一个近似于模型的文本生成输出的其它模型相对容易,例如经过类似训练的相同架构的较小版本模型通常符合此需求。当模型大小的差异变得显著时,使用较小的模型作为辅助模型的成本在跳过几个前向传递后就显得无关紧要了!现在,你了解了 _ 辅助生成 _ 的核心。 ## 使用辅助模型的贪心解码 辅助生成是一种平衡行为。你希望辅助模型快速生成候选序列,同时尽可能准确。如果辅助模型的质量很差,你将承担使用辅助模型的成本,而收益却很少甚至没有。另一方面,优化候选序列的质量可能意味着使用更慢的辅助模型,从而导致网络减速。虽然我们无法为你自动选择辅助模型,但我们包含了一个额外的要求和一个启发式方法,以确保模型与辅助模型一起花费的时间保持在可控范围内。 首先,我们要求辅助模型必须具有与你的模型完全相同的分词器。如果没有此要求,则必须添加昂贵的 token 解码和重新编码步骤。此外,这些额外的步骤必须在 CPU 上进行,这反过来可能增加了设备间数据传输。能够快速地使用辅助模型对于辅助生成的好处是至关重要的。 最后,启发式。至此,你可能已经注意到电影盗梦空间和辅助生成之间的相似之处——毕竟你是在文本生成中运行文本生成。每个候选 token 有一个辅助模型前向传播,我们知道前向传播是昂贵的。虽然你无法提前知道辅助模型将获得的 token 数量,但你可以跟踪此信息并使用它来限制向辅助模型请求的候选 token 数量——输出的某些部分比其它一些部分更容易被预计。 总结一下,这是我们最初实现的辅助生成的循环 ([代码](https://github.com/huggingface/transformers/blob/849367ccf741d8c58aa88ccfe1d52d8636eaf2b7/src/transformers/generation/utils.py#L4064)): 1. 使用贪心解码与辅助模型生成一定数量的`候选 token`。当第一次调用辅助生成时,生成的`候选 token` 的数量被初始化为 `5`。 2. 使用我们的模型,对`候选 token `进行前向计算,获得每个 token 对应的概率。 3. 使用 token 选择方法 (使用`.argmax()` 进行贪心搜索或使用 `.multinomial()` 用于采样方法) 来从概率中选取 `next_tokens`。 4. 比较步骤 3 中选择的 `next_tokens` 和 `候选 token` 中相同的 token 数量。请注意,我们需要从左到右进行比较, 在第一次不匹配后,后续所有 `候选 token`都无效。 5. 使用步骤 4 得到的匹配数量将`候选 token` 分割。也就是,将输入 tokens 加上刚刚验证得到的正确的 tokens。 6. 调整下一次迭代中生成的`候选 token` 的数量 —— 使用启发式方法,如果步骤 3 中所有 token 都匹配,则`候选 token` 的长度增加 `2`,否则减少 `1`。 <!-- [GIF 4 -- ASSISTED GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_4_1080p.mov" ></video> </figure> 我们在 🤗 Transformers 中设计了 API,因此使用该方法对你来说是无痛的。你需要做的就是将辅助模型作为 `assistant_model` 参数传入从而获得延迟收益!我们暂时限制了辅助生成的批量大小为 `1`。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch prompt = "Alice and Bob" checkpoint = "EleutherAI/pythia-1.4b-deduped" assistant_checkpoint = "EleutherAI/pythia-160m-deduped" device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint).to(device) outputs = model.generate(**inputs, assistant_model=assistant_model) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a'] ``` 额外的内部复杂性是否值得?让我们看一下贪心解码情况下的延迟数 (采样结果在下一节)。考虑批量大小为 1,这些结果是直接从 🤗 Transformers 中提取的,没有任何额外的优化,因此你应该能够在你的设置中复现它们。 <!-- [SPACE WITH GREEDY DECODING PERFORMANCE NUMBERS] --> <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.28.2/gradio.js" ></script> <gradio-app theme_mode="light" space="joaogante/assisted_generation_benchmarks"></gradio-app> 通过观察收集到的数据,我们发现辅助生成可以在不同的设置中显著减少延迟,但这不是灵丹妙药——你应该在应用之前对其进行系统的评估以清晰使用该方法的代价。对于辅助生成方法,我们可以得出结论: 1. 🤏 需要访问至少比你的模型小一个数量级的辅助模型 (差异越大越好) ; 2. 🚀 在存在 INT8 的情况下获得高达 3 倍的加速,否则能够达到 2 倍的加速; 3. 🤯 如果你正在使用不适合你的模型的 GPU 并且依赖于内存卸载的模型,你可以看到高达 10 倍的加速; 4. 📄 在输入驱动任务中大放异彩,例如自动语音识别或摘要。 ## 辅助生成的采样方法 贪心解码适用于以输入为基础的任务 (自动语音识别、翻译、摘要……) 或事实知识寻求。对于需要大量创造力的开放式任务,例如使用语言模型作为聊天机器人的大多数任务,应该改用采样方法。虽然辅助生成方法是为贪心解码而设计的,但这并不意味着你不能使用多项式采样进行辅助生成! 从 `next token` 的概率分布中抽取样本将导致我们的基于贪心的辅助生产更频繁地失败,从而降低其延迟优势。但是,我们可以使用采样中的温度系数来控制下一个标记的概率分布有多尖锐。在一种极端情况下,当温度接近 0 时,采样将近似于贪心解码,有利于最有可能的 token。在另一个极端,当温度设置为远大于 1 的值时,采样将是混乱的,从均匀分布中抽取。因此,低温对你的辅助模型更有利,能够保留辅助生成的大部分延迟优势,如下所示。 <!-- [TEMPERATURE RESULTS, SHOW THAT LATENCY INCREASES STEADILY WITH TEMP] --> <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/temperature.png"/> </div> 不妨亲眼看一看,感受一下辅助生成的魅力? <!-- [DEMO] --> <gradio-app theme_mode="light" space="joaogante/assisted_generation_demo"></gradio-app> ## 未来发展方向 辅助生成表明当前文本生成策略已经到了可优化的阶段。我们意识到它目前的难点不在于计算量的问题,因此可以应用简单的启发式方法来充分利用可用的内存带宽,缓解瓶颈。我们相信,进一步优化辅助模型将使我们获得更大的延迟降低——例如,如果我们请求辅助模型生成多个连续候选 token,我们可能能够跳过更多的前向传递。自然地,使用高质量的小模型作为辅助模型对于实现和扩大收益至关重要。 该方法最初在我们的 🤗 Transformers 库下发布,用于 `.generate()` 函数,我们预期将其纳入整个 Hugging Face 宇宙。它的实现也是完全开源的。因此,如果你正在进行文本生成而没有使用我们的工具,你可以随时将其作为参考。 最后,辅助生成重新提出了文本生成中的一个关键问题: 模型中所有新 token 都是给定模型以自回归方式计算的结果,同质地前向传递每一个 token。这篇博文提出了这样的想法: 生成的大部分序列也可以由小尺寸的模型同样生成。为此,我们需要新的模型架构和解码方法——我们很高兴看到未来会带来什么! ## 相关工作 在这篇博文最初发布后,我注意到其他作品也探索了相同的核心原则 (使用前向传递来验证更长的连续性)。特别地,请看以下作品: - [分块并行解码](https://proceedings.neurips.cc/paper/2018/file/c4127b9194fe8562c64dc0f5bf2c93bc-Paper.pdf), 来自 Google Brain - [推测性采样](https://arxiv.org/abs/2302.01318), 来自 DeepMind ## Citation ```bibtex @misc {gante2023assisted, author = { {Joao Gante} }, title = { Assisted Generation: a new direction toward low-latency text generation }, year = 2023, url = { https://huggingface.co/blog/assisted-generation }, doi = { 10.57967/hf/0638 }, publisher = { Hugging Face Blog } } ``` ## 致谢 我要感谢 Sylvain Gugger、Nicolas Patry 和 Lewis Tunstall 分享了许多宝贵的建议来改进这篇博文。最后,感谢 Chunte Lee 设计了精美的封面,你可以在我们的网页上看到。
9
0
hf_public_repos
hf_public_repos/blog/ai-residency.md
--- title: "Announcing the 🤗 AI Research Residency Program" thumbnail: /blog/assets/57_ai_residency/residency-thumbnail.jpg authors: - user: douwekiela --- # Announcing the 🤗 AI Research Residency Program 🎉 🎉 🎉 The 🤗 Research Residency Program is a 9-month opportunity to launch or advance your career in machine learning research 🚀. The goal of the residency is to help you grow into an impactful AI researcher. Residents will work alongside Researchers from our Science Team. Together, you will pick a research problem and then develop new machine learning techniques to solve it in an open & collaborative way, with the hope of ultimately publishing your work and making it visible to a wide audience. Applicants from all backgrounds are welcome! Ideally, you have some research experience and are excited about our mission to democratize responsible machine learning. The progress of our field has the potential to exacerbate existing disparities in ways that disproportionately hurt the most marginalized people in society — including people of color, people from working-class backgrounds, women, and LGBTQ+ people. These communities must be centered in the work we do as a research community. So we strongly encourage proposals from people whose personal experience reflects these identities.. We encourage applications relating to AI that demonstrate a clear and positive societal impact. ## How to Apply Since the focus of your work will be on developing Machine Learning techniques, your application should show evidence of programming skills and of prerequisite courses, like calculus or linear algebra, or links to an open-source project that demonstrates programming and mathematical ability. More importantly, your application needs to present interest in effecting positive change through AI in any number of creative ways. This can stem from a topic that is of particular interest to you and your proposal would capture concrete ways in which machine learning can contribute. Thinking through the entire pipeline, from understanding where ML tools are needed to gathering data and deploying the resulting approach, can help make your project more impactful. We are actively working to build a culture that values diversity, equity, and inclusivity. We are intentionally building a workplace where people feel respected and supported—regardless of who you are or where you come from. We believe this is foundational to building a great company and community. Hugging Face is an equal opportunity employer and we do not discriminate on the basis of race, religion, color, national origin, gender, sexual orientation, age, marital status, veteran status, or disability status. [Submit your application here](https://apply.workable.com/huggingface/j/1B77519961). ## FAQs * **Can I complete the program part-time?**<br>No. The Residency is only offered as a full-time position. * **I have been out of school for several years. Can I apply?**<br>Yes. We will consider applications from various backgrounds. * **Can I be enrolled as a student at a university or work for another employer during the residency?**<br>No, the residency can’t be completed simultaneously with any other obligations. * **Will I receive benefits during the Residency?**<br>Yes, residents are eligible for most benefits, including medical (depending on location). * **Will I be required to relocate for this residency?**<br>Absolutely not! We are a distributed team and you are welcome to work from wherever you are currently located. * **Is there a deadline?**<br>Applications close on April 3rd, 2022!
0
0
hf_public_repos
hf_public_repos/blog/whisper-speculative-decoding.md
--- title: "Speculative Decoding for 2x Faster Whisper Inference" thumbnail: /blog/assets/whisper-speculative-decoding/thumbnail.png authors: - user: sanchit-gandhi --- # Speculative Decoding for 2x Faster Whisper Inference <a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> Open AI's [Whisper](https://openai.com/research/whisper) is a general purpose speech transcription model that achieves state-of-the-art results across a range of different benchmarks and audio conditions. The latest [large-v3](https://huggingface.co/openai/whisper-large-v3) model tops the [OpenASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard), ranking as the best open-source speech transcription model for English. The model also demonstrates strong multilingual performance, achieving less than 30% word error rate (WER) on 42 of the 58 languages tested in the Common Voice 15 dataset. While the transcription accuracy is exceptional, the inference time is very slow. A 1 hour audio clip takes upwards of 6 minutes to transcribe on a 16GB T4 GPU, even after leveraging inference optimisations like [flash attention](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2), half-precision, and [chunking](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s). In this blog post, we demonstrate how Speculative Decoding can be employed to reduce the inference time of Whisper by a **factor of 2**, while mathematically ensuring exactly the **same outputs** are achieved from the model. As a result, this method provides a perfect drop-in replacement for existing Whisper pipelines, since it provides free 2x speed-up while maintaining the same accuracy. For a more streamlined version of the blog post with fewer explanations but all the code, see the accompanying [Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb). ## Speculative Decoding Speculative Decoding was proposed in [Fast Inference from Transformers via Speculative Decoding](https://arxiv.org/abs/2211.17192) by Yaniv Leviathan et. al. from Google. It works on the premise that a faster, **assistant model** very often generates the same tokens as a larger **main model**. First, the assistant model auto-regressively generates a sequence of \\( N \\) *candidate tokens*, \\( \hat{\boldsymbol{y}}_{1:N} \\). In the diagram below, the assistant model generates a sequence of 5 candidate tokens: `The quick brown sock jumps`. <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_1.mp4" ></video> </figure> While these candidate tokens are generated quickly, they may differ from those predicted by the main model. Therefore, in the second step, the candidate tokens are passed to the main model to be "verified". The main model takes the candidate tokens as input and performs a **single forward pass**. The outputs of the main model are the "correct" token for each step in the token sequence \\( \boldsymbol{y}_{1:N} \\). <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_2.mp4" ></video> </figure> In the diagram above, we see that the first three tokens predicted by the main model agree with those from the assistant model: <span style="color:green">The quick brown</span>. However, the fourth candidate token from the assistant model, <span style="color:red">sock</span>, mismatches with the correct token from the main model, <span style="color:green">fox</span>. We know that all candidate tokens up to the first mismatch are correct (<span style="color:green">The quick brown</span>), since these agree with the predictions from the main model. However, after the first mismatch, the candidate tokens diverge from the actual tokens predicted by the main model. Therefore, we can replace the first incorrect candidate token (<span style="color:red">sock</span>) with the correct token from the main model (<span style="color:green">fox</span>), and discard all predicted tokens that come after this, since these have diverged. The corrected sequence, `The quick brown fox`, now forms the new input to the assistant model: <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_3.mp4" ></video> </figure> The inference process then repeats, the assistant model generating a new set of \\( N \\) candidate tokens, which are verified in a single forward pass by the main model. <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_4.mp4" ></video> </figure> Since we auto-regressively generate using the fast, assistant model, and only perform verification forward passes with the slow, main model, the decoding process is sped-up substantially. Furthermore, the verification forward passes performed by the main model ensures that **exactly the same outputs** are achieved as if we were using the main model standalone. This makes speculative decoding a perfect drop-in for existing Whisper pipelines, since one can be certain that the same quality will be attained. To get the biggest improvement in latency, the assistant model should be significantly faster than the main model, while predicting the same token distribution as often as possible. In practice, these two attributes form a trade-off: the faster a model is, the less accurate it is. However, since 70-80% of all predicted tokens tend to be "easier" tokens, this trade-off is heavily biased towards selecting a faster model, rather than a more accurate one. Thus, the assistant model should be at least 3x faster than the main model (the more the better), while predicting all the "easy" tokens in the examples correctly. The remaining 20-30% of more "difficult" tokens can then be verified by the larger, main model. The only constraint for selecting an assistant model is that it must share the same vocabulary as the main model. That is to say, the assistant model must use one-to-one the same tokenizer as the main model. Therefore, if we want to use speculative decoding with a multilingual variant of Whisper, e.g. [large-v2](https://huggingface.co/openai/whisper-large-v2) (multilingual), we need to select a multilingual variant of Whisper as the assistant model, e.g. [tiny](https://huggingface.co/openai/tiny). Whereas, if we want to use speculative decoding with and English-only version of Whisper, e.g. [medium.en](https://huggingface.co/openai/whisper-medium.en), we need an English-only of version as the assistant model, e.g. [tiny.en](https://huggingface.co/openai/tiny.en). At the current time, Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) is an exception, since it is the only Whisper checkpoint with an expanded vocabulary size, and thus is not compatible with previous Whisper checkpoints. Now that we know the background behind speculative decoding, we're ready to dive into the practical implementation. In the [🤗 Transformers](https://huggingface.co/docs/transformers/index) library, speculative decoding is implemented as the "assisted generation" inference strategy. For more details about the implementation, the reader is advised to read Joao Gante's excellent blog post on [Assisted Generation](https://huggingface.co/blog/assisted-generation). ## English Speech Transcription ### Baseline Implementation We start by benchmarking Whisper [large-v2](https://huggingface.co/openai/whisper-large-v2) to get our baseline number for inference speed. We can load the main model and it's corresponding processor via the convenient [`AutoModelForSpeechSeq2Seq`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForSpeechSeq2Seq) and [`AutoProcessor`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor) classes. We'll load the model in `float16` precision and make sure that loading time takes as little time as possible by passing [`low_cpu_mem_usage=True`](https://huggingface.co/docs/transformers/main_classes/model#large-model-loading). In addition, we want to make sure that the model is loaded in [safetensors](https://huggingface.co/docs/diffusers/main/en/using-diffusers/using_safetensors) format by passing [`use_safetensors=True`](https://huggingface.co/docs/transformers/main_classes/model#transformers.PreTrainedModel.from_pretrained.use_safetensors). Finally, we'll pass the argument `attn_implementation="sdpa"` to benefit from Flash Attention speed-ups through PyTorch's [SDPA attention kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html): ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) ``` Let's load the English speech transcription dataset that we will use for benchmarking. We'll load a small dataset consisting of 73 samples from the [LibriSpeech ASR](https://huggingface.co/datasets/librispeech_asr) validation-clean dataset. This amounts to ~9MB of data, so it's very lightweight and quick to download on device: ```python from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ``` For the benchmark, we only want to measure the generation time, so let's write a short helper function that measures this step. The following function will return both the decoded tokens and the time it took to run the model: ```python import time def generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` We can now iterate over the audio samples in our dataset and sum up the overall generation time: ```python from tqdm import tqdm all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Output:** ``` 100%|██████████| 73/73 [01:37<00:00, 1.33s/it] 72.99542546272278 ``` Alright! We see that transcribing the 73 samples took 73 seconds. Let's check the WER of the predictions: ```python from evaluate import load wer = load("wer") print(wer.compute(predictions=predictions, references=references)) ``` **Output:** ``` 0.03507271171941831 ``` Our final baseline number is 73 seconds for a WER of 3.5%. ### Speculative Decoding Now let's load the assistant model for speculative decoding. In this example, we'll use a distilled variant of Whisper, [distil-large-v2](https://huggingface.co/distil-whisper/distil-large-v2). The distilled model copies the entire encoder from Whisper, but only 2 of the 32 decoder layers. As such, it runs 6x faster than Whisper, while performing to within 1% WER on out-of-distribution test sets. This makes it the perfect choice as an assistant model, since it has both high transcription accuracy and fast generation \\({}^1\\). Since Distil-Whisper uses exactly the same encoder as the Whisper model, we can share the encoder across the main and assistant models. We then only have to load the 2-layer decoder from Distil-Whisper as a "decoder-only" model. We can do this through the convenient [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) auto class. In practice, this results in only an 8% increase to VRAM over using the main model alone. ```python from transformers import AutoModelForCausalLM assistant_model_id = "distil-whisper/distil-large-v2" assistant_model = AutoModelForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device) ``` ------------------------------------------------------------------------ \\({}^1\\) We intend to release an improved variant of Distil-Whisper with a stronger alignment in the token distribution that will improve speculative decoding performance further. Follow the [Distil-Whisper repository](https://github.com/huggingface/distil-whisper) for updates. ------------------------------------------------------------------------ We can define a modified function for our speculative decoding benchmark. The only difference from the previous function is that we pass the assistant model to our call to `.generate`: ```python def assisted_generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, assistant_model=assistant_model, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` Let's run the benchmark with speculative decoding, using Distil-Whisper as the assistant to Whisper: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Outputs:** ``` 100%|██████████| 73/73 [00:38<00:00, 1.88it/s] 32.69683289527893 ``` With speculative decoding, the inference time was just 33 seconds, 2.2x faster than before! Let's verify we have the same WER: ```python print(wer.compute(predictions=predictions, references=references)) ``` **Outputs:** ``` 0.03507271171941831 ``` Perfect! 3.5% WER again, as we have identical outputs to using the main model standalone. Speculative decoding can also be used with the easy 🤗 Transformers [pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API for inference. Below, we instantiate the pipeline using the model and processor, and then use it to transcribe the first sample from the toy dataset. This can be extended to transcribe audio samples of arbitrary length, including with the use of batching: ```python from transformers import pipeline pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=4, generate_kwargs={"assistant_model": assistant_model}, torch_dtype=torch_dtype, device=device, ) sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` **Outputs:** ``` Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel. ``` An end-to-end code snippet for running speculative decoding with Whisper and Distil-Whisper can be found on the [Distil-Whisper model card](https://huggingface.co/distil-whisper/distil-large-v2#speculative-decoding). It combines the stages of inference covered in this notebook into a single code example. ## Multilingual Speech Transcription Distil-Whisper is the perfect assistant model for English speech transcription, since it performs to within 1% WER of the original Whisper model, while being 6x faster over short and long-form audio samples. However, the official Distil-Whisper checkpoints are English only, meaning they cannot be used for multilingual speech transcription. To use speculative decoding for multilingual speech transcription, one could either use one of the [official multilingual Whisper checkpoints](https://huggingface.co/openai/whisper-large-v2#model-details), or a fine-tuned variant of Whisper. At the time of writing, there are over 5,000 [fine-tuned Whisper checkpoints](https://huggingface.co/models?other=whisper) on the Hugging Face Hub in over 100 languages. These provide an excellent starting point for selecting assistant Whisper checkpoints that perform very well on a single language. In this example, we'll use the smallest official multilingual checkpoint, Whisper [tiny](https://huggingface.co/openai/whisper-tiny). Feel free to experiment with different checkpoints fine-tuned in your language! Let's load the weights for our new assistant model, Whisper tiny. Since the encoder in Whisper tiny differs from that in large-v2, this time we'll load both the encoder and decoder using the `AutoModelForSpeechSeq2Seq` class: ```python assistant_model_id = "openai/whisper-tiny" assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device); ``` For our benchmarking dataset, we'll load 73 samples from the Dutch ("nl") split of the [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) dataset: ```python dataset = load_dataset("sanchit-gandhi/voxpopuli_dummy", "nl", split="validation") ``` Great! We can now re-run our benchmark for our baseline Whisper large-v2 model as before. The only change we make is that we pass the language and task arguments to our generate function, in order to ensure we perform speech transcription (not speech translation). Speculative decoding is fully compatible with both the speech transcription and translation tasks. Simply set the task argument as required below: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [02:05<00:00, 1.72s/it] Time: 116.50992178916931 WER: 0.127190136275146 ``` Right! We have our baseline time of 117 seconds and a WER of 12.8%. Let's re-run the generation process using speculative decoding: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [01:08<00:00, 1.06it/s] Time: 62.10229682922363 WER: 0.127190136275146 ``` Again, we achieve 12.8% WER, but this time in just 62 seconds of inference time, representing a speed-up of 1.9x. Given the low overhead of loading the assistant model and the mathematical property that exactly the same outputs are achieved, speculative decoding offers the perfect drop-in replacement to existing Whisper pipelines. ## Strategies for Efficient Speculative Decoding In this final section, we cover two strategies for ensuring the fastest possible inference time with speculative decoding. #### Assistant Model Our objective is to select an assistant model that is at least 3x faster than the main model **and** transcribes at least 70-80% of the predicted tokens correctly, typically the "easier" tokens in the examples. If you have a particular language in which you want to transcribe, an effective strategy is to train two Whisper models of different sizes, and use one as the assistant to the other: * First, fine-tune Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) to act as your main model * Second, distil Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) on the same dataset to act as a fast assistant model Fine-tuning and distillation can improve the WER performance of both the main and assistant models on your chosen language, while maximising the alignment in the token distributions. A complete guide to Whisper fine-tuning can be found [here](https://huggingface.co/blog/fine-tune-whisper), and distillation [here](https://github.com/huggingface/distil-whisper/tree/main/training). #### Batch Size It is worth noting that the largest speed gains with speculative decoding come with a batch size of 1. For batched speculative decoding, all candidate tokens **across the batch** must match the validation tokens in order for the tokens to be accepted. If a token in the batch at a given position does not agree, all candidate tokens that proceed the position are discarded. Consequently, speculative decoding favours lower batch sizes. In practice, we find that speculative decoding provides a speed-up until a batch size of 4. Above batch size 4, speculative decoding returns slower inference than the main model alone. For full results, refer to Section D.3 of the [Distil-Whisper paper](https://arxiv.org/pdf/2311.00430.pdf). ## Conclusion In this blog post, we covered the inference strategy of speculative decoding, as applied to the Whisper model for speech transcription. We demonstrated how 2x speed-ups can be achieved, while mathematically ensuring the same outputs as using the original model alone. We encourage you to try speculative decoding as a drop-in replacement for existing Whisper pipelines, given the low overhead of using the additional assistant model and the guarantee of the same transcription results. ## Acknowledgements Blog post by [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi). Many thanks to [Patrick von Platen](https://huggingface.co/patrickvonplaten) and [Pedro Cuenca](https://huggingface.co/pcuenq) for their constructive comments, and to [Joao Gante](https://huggingface.co/joaogante) for the assisted generation implementation in 🤗 Transformers.
1
0
hf_public_repos
hf_public_repos/blog/sagemaker-distributed-training-seq2seq.md
--- title: 'Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker' thumbnail: /blog/assets/19_sagemaker_distributed_training_seq2seq/thumbnail.png authors: - user: philschmid --- # Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker <a target="_blank" href="https://github.com/huggingface/notebooks/blob/master/sagemaker/08_distributed_summarization_bart_t5/sagemaker-notebook.ipynb"> <img src="https://badgen.net/badge/Github/Open/black?icon=github" alt="Open on Github"/> </a> In case you missed it: on March 25th [we announced a collaboration with Amazon SageMaker](https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face) to make it easier to create State-of-the-Art Machine Learning models, and ship cutting-edge NLP features faster. Together with the SageMaker team, we built 🤗 Transformers optimized [Deep Learning Containers](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to accelerate training of Transformers-based models. Thanks AWS friends!🤗 🚀 With the new HuggingFace estimator in the [SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/), you can start training with a single line of code. ![thumbnail](assets/19_sagemaker_distributed_training_seq2seq/thumbnail.png) The [announcement blog post](https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face) provides all the information you need to know about the integration, including a "Getting Started" example and links to documentation, examples, and features. listed again here: - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) If you're not familiar with Amazon SageMaker: *"Amazon SageMaker is a fully managed service that provides every developer and data scientist with the ability to build, train, and deploy machine learning (ML) models quickly. SageMaker removes the heavy lifting from each step of the machine learning process to make it easier to develop high quality models." [[REF](https://aws.amazon.com/sagemaker/faqs/)]* --- ## Tutorial We will use the new [Hugging Face DLCs](https://github.com/aws/deep-learning-containers/tree/master/huggingface) and [Amazon SageMaker extension](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#huggingface-estimator) to train a distributed Seq2Seq-transformer model on the `summarization` task using the `transformers` and `datasets` libraries, and then upload the model to [huggingface.co](http://huggingface.co) and test it. As [distributed training strategy](https://huggingface.co/transformers/sagemaker.html#distributed-training-data-parallel) we are going to use [SageMaker Data Parallelism](https://aws.amazon.com/blogs/aws/managed-data-parallelism-in-amazon-sagemaker-simplifies-training-on-large-datasets/), which has been built into the [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) API. To use data-parallelism we only have to define the `distribution` parameter in our `HuggingFace` estimator. ```python # configuration for running training on smdistributed Data Parallel distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}} ``` In this tutorial, we will use an Amazon SageMaker Notebook Instance for running our training job. You can learn [here how to set up a Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi.html). **What are we going to do:** - Set up a development environment and install sagemaker - Choose 🤗 Transformers `examples/` script - Configure distributed training and hyperparameters - Create a `HuggingFace` estimator and start training - Upload the fine-tuned model to [huggingface.co](http://huggingface.co) - Test inference #### Model and Dataset We are going to fine-tune [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the [samsum](https://huggingface.co/datasets/samsum) dataset. *"BART is sequence-to-sequence model trained with denoising as pretraining objective."* [[REF](https://github.com/pytorch/fairseq/blob/master/examples/bart/README.md)] The `samsum` dataset contains about 16k messenger-like conversations with summaries. ```json {"id": "13818513", "summary": "Amanda baked cookies and will bring Jerry some tomorrow.", "dialogue": "Amanda: I baked cookies. Do you want some?\r\nJerry: Sure!\r\nAmanda: I'll bring you tomorrow :-)"} ``` --- ### Set up a development environment and install sagemaker After our SageMaker Notebook Instance is running we can select either Jupyer Notebook or JupyterLab and create a new Notebook with the `conda_pytorch_p36 kernel`. _**Note:** The use of Jupyter is optional: We could also launch SageMaker Training jobs from anywhere we have an SDK installed, connectivity to the cloud and appropriate permissions, such as a Laptop, another IDE or a task scheduler like Airflow or AWS Step Functions._ After that we can install the required dependencies ```bash !pip install transformers "datasets[s3]" sagemaker --upgrade ``` [install](https://github.com/git-lfs/git-lfs/wiki/Installation) `git-lfs` for model upload. ```bash !curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash !sudo yum install git-lfs -y !git lfs install ``` To run training on SageMaker we need to create a sagemaker Session and provide an IAM role with the right permission. This IAM role will be later attached to the `TrainingJob` enabling it to download data, e.g. from Amazon S3. ```python import sagemaker sess = sagemaker.Session() role = sagemaker.get_execution_role() print(f"IAM role arn used for running training: {role}") print(f"S3 bucket used for storing artifacts: {sess.default_bucket()}") ``` --- ## Choose 🤗 Transformers `examples/` script The [🤗 Transformers repository](https://github.com/huggingface/transformers/tree/master/examples) contains several `examples/`scripts for fine-tuning models on tasks from `language-modeling` to `token-classification`. In our case, we are using the `run_summarization.py` from the `seq2seq/` examples. ***Note**: you can use this tutorial as-is to train your model on a different examples script.* Since the `HuggingFace` Estimator has git support built-in, we can specify a [training script stored in a GitHub repository](https://sagemaker.readthedocs.io/en/stable/overview.html#use-scripts-stored-in-a-git-repository) as `entry_point` and `source_dir`. We are going to use the `transformers 4.4.2` DLC which means we need to configure the `v4.4.2` as the branch to pull the compatible example scripts. ```python #git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.4.2'} # v4.4.2 is referring to the `transformers_version you use in the estimator. # used due an missing package in v4.4.2 git_config = {'repo': 'https://github.com/philschmid/transformers.git','branch': 'master'} # v4.4.2 is referring to the `transformers_version you use in the estimator. ``` --- ### Configure distributed training and hyperparameters Next, we will define our `hyperparameters` and configure our distributed training strategy. As hyperparameter, we can define any [Seq2SeqTrainingArguments](https://huggingface.co/transformers/main_classes/trainer.html#seq2seqtrainingarguments) and the ones defined in [run_summarization.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/seq2seq#sequence-to-sequence-training-and-evaluation). ```python # hyperparameters, which are passed into the training job hyperparameters={ 'per_device_train_batch_size': 4, 'per_device_eval_batch_size': 4, 'model_name_or_path':'facebook/bart-large-cnn', 'dataset_name':'samsum', 'do_train':True, 'do_predict': True, 'predict_with_generate': True, 'output_dir':'/opt/ml/model', 'num_train_epochs': 3, 'learning_rate': 5e-5, 'seed': 7, 'fp16': True, } # configuration for running training on smdistributed Data Parallel distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}} ``` Since, we are using [SageMaker Data Parallelism](https://aws.amazon.com/blogs/aws/managed-data-parallelism-in-amazon-sagemaker-simplifies-training-on-large-datasets/) our `total_batch_size` will be `per_device_train_batch_size` * `n_gpus`. --- ### Create a `HuggingFace` estimator and start training The last step before training is creating a `HuggingFace` estimator. The Estimator handles the end-to-end Amazon SageMaker training. We define which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, and which `hyperparameters` are passed in. ```python from sagemaker.huggingface import HuggingFace # create the Estimator huggingface_estimator = HuggingFace( entry_point='run_summarization.py', # script source_dir='./examples/seq2seq', # relative path to example git_config=git_config, instance_type='ml.p3dn.24xlarge', instance_count=2, transformers_version='4.4.2', pytorch_version='1.6.0', py_version='py36', role=role, hyperparameters = hyperparameters, distribution = distribution ) ``` As `instance_type` we are using `ml.p3dn.24xlarge`, which contains 8x NVIDIA A100 with an `instance_count` of 2. This means we are going to run training on 16 GPUs and a `total_batch_size` of 16*4=64. We are going to train a 400 Million Parameter model with a `total_batch_size` of 64, which is just wow. To start our training we call the `.fit()` method. ```python # starting the training job huggingface_estimator.fit() ``` ```bash 2021-04-01 13:00:35 Starting - Starting the training job... 2021-04-01 13:01:03 Starting - Launching requested ML instancesProfilerReport-1617282031: InProgress 2021-04-01 13:02:23 Starting - Preparing the instances for training...... 2021-04-01 13:03:25 Downloading - Downloading input data... 2021-04-01 13:04:04 Training - Downloading the training image............... 2021-04-01 13:06:33 Training - Training image download completed. Training in progress .... .... 2021-04-01 13:16:47 Uploading - Uploading generated training model 2021-04-01 13:27:49 Completed - Training job completed Training seconds: 2882 Billable seconds: 2882 ``` The training seconds are 2882 because they are multiplied by the number of instances. If we calculate 2882/2=1441 is it the duration from "Downloading the training image" to "Training job completed". Converted to real money, our training on 16 NVIDIA Tesla V100-GPU for a State-of-the-Art summarization model comes down to ~28$. --- ### Upload the fine-tuned model to [huggingface.co](http://huggingface.co) Since our model achieved a pretty good score we are going to upload it to [huggingface.co](http://huggingface.co), create a `model_card` and test it with the Hosted Inference widget. To upload a model you need to [create an account here](https://huggingface.co/join). We can download our model from Amazon S3 and unzip it using the following snippet. ```python import os import tarfile from sagemaker.s3 import S3Downloader local_path = 'my_bart_model' os.makedirs(local_path, exist_ok = True) # download model from S3 S3Downloader.download( s3_uri=huggingface_estimator.model_data, # s3 uri where the trained model is located local_path=local_path, # local path where *.tar.gz will be saved sagemaker_session=sess # sagemaker session used for training the model ) # unzip model tar = tarfile.open(f"{local_path}/model.tar.gz", "r:gz") tar.extractall(path=local_path) tar.close() os.remove(f"{local_path}/model.tar.gz") ``` Before we are going to upload our model to [huggingface.co](http://huggingface.co) we need to create a `model_card`. The `model_card` describes the model and includes hyperparameters, results, and specifies which dataset was used for training. To create a `model_card` we create a `README.md` in our `local_path` ```python # read eval and test results with open(f"{local_path}/eval_results.json") as f: eval_results_raw = json.load(f) eval_results={} eval_results["eval_rouge1"] = eval_results_raw["eval_rouge1"] eval_results["eval_rouge2"] = eval_results_raw["eval_rouge2"] eval_results["eval_rougeL"] = eval_results_raw["eval_rougeL"] eval_results["eval_rougeLsum"] = eval_results_raw["eval_rougeLsum"] with open(f"{local_path}/test_results.json") as f: test_results_raw = json.load(f) test_results={} test_results["test_rouge1"] = test_results_raw["test_rouge1"] test_results["test_rouge2"] = test_results_raw["test_rouge2"] test_results["test_rougeL"] = test_results_raw["test_rougeL"] test_results["test_rougeLsum"] = test_results_raw["test_rougeLsum"] ``` After we extract all the metrics we want to include we are going to create our `README.md`. Additionally to the automated generation of the results table we add the metrics manually to the `metadata` of our model card under `model-index` ```python import json MODEL_CARD_TEMPLATE = """ --- language: en tags: - sagemaker - bart - summarization license: apache-2.0 datasets: - samsum model-index: - name: {model_name} results: - task: name: Abstractive Text Summarization type: abstractive-text-summarization dataset: name: "SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization" type: samsum metrics: - name: Validation ROGUE-1 type: rogue-1 value: 42.621 - name: Validation ROGUE-2 type: rogue-2 value: 21.9825 - name: Validation ROGUE-L type: rogue-l value: 33.034 - name: Test ROGUE-1 type: rogue-1 value: 41.3174 - name: Test ROGUE-2 type: rogue-2 value: 20.8716 - name: Test ROGUE-L type: rogue-l value: 32.1337 widget: - text: | Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face --- ## `{model_name}` This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container. For more information look at: - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) ## Hyperparameters {hyperparameters} ## Usage from transformers import pipeline summarizer = pipeline("summarization", model="philschmid/{model_name}") conversation = '''Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face ''' nlp(conversation) ## Results | key | value | | --- | ----- | {eval_table} {test_table} """ # Generate model card (todo: add more data from Trainer) model_card = MODEL_CARD_TEMPLATE.format( model_name=f"{hyperparameters['model_name_or_path'].split('/')[1]}-{hyperparameters['dataset_name']}", hyperparameters=json.dumps(hyperparameters, indent=4, sort_keys=True), eval_table="\n".join(f"| {k} | {v} |" for k, v in eval_results.items()), test_table="\n".join(f"| {k} | {v} |" for k, v in test_results.items()), ) with open(f"{local_path}/README.md", "w") as f: f.write(model_card) ``` After we have our unzipped model and model card located in `my_bart_model` we can use the either `huggingface_hub` SDK to create a repository and upload it to [huggingface.co](https://huggingface.co) – or just to https://huggingface.co/new an create a new repository and upload it. ```python from getpass import getpass from huggingface_hub import HfApi, Repository hf_username = "philschmid" # your username on huggingface.co hf_email = "[email protected]" # email used for commit repository_name = f"{hyperparameters['model_name_or_path'].split('/')[1]}-{hyperparameters['dataset_name']}" # repository name on huggingface.co password = getpass("Enter your password:") # creates a prompt for entering password # get hf token token = HfApi().login(username=hf_username, password=password) # create repository repo_url = HfApi().create_repo(token=token, name=repository_name, exist_ok=True) # create a Repository instance model_repo = Repository(use_auth_token=token, clone_from=repo_url, local_dir=local_path, git_user=hf_username, git_email=hf_email) # push model to the hub model_repo.push_to_hub() ``` --- ### Test inference After we uploaded our model we can access it at `https://huggingface.co/{hf_username}/{repository_name}` ```python print(f"https://huggingface.co/{hf_username}/{repository_name}") ``` And use the "Hosted Inference API" widget to test it. [https://huggingface.co/philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) ![inference](assets/19_sagemaker_distributed_training_seq2seq/inference-test.png)
2
0
hf_public_repos
hf_public_repos/blog/sagemaker-huggingface-llm.md
--- title: Introducing the Hugging Face LLM Inference Container for Amazon SageMaker thumbnail: /blog/assets/145_sagemaker-huggingface-llm/thumbnail.jpg authors: - user: philschmid --- # Introducing the Hugging Face LLM Inference Container for Amazon SageMaker This is an example on how to deploy the open-source LLMs, like [BLOOM](https://huggingface.co/bigscience/bloom) to Amazon SageMaker for inference using the new Hugging Face LLM Inference Container. We will deploy the 12B [Pythia Open Assistant Model](https://huggingface.co/OpenAssistant/pythia-12b-sft-v8-7k-steps), an open-source Chat LLM trained with the Open Assistant dataset. The example covers: 1. [Setup development environment](#1-setup-development-environment) 2. [Retrieve the new Hugging Face LLM DLC](#2-retrieve-the-new-hugging-face-llm-dlc) 3. [Deploy Open Assistant 12B to Amazon SageMaker](#3-deploy-deploy-open-assistant-12b-to-amazon-sagemaker) 4. [Run inference and chat with our model](#4-run-inference-and-chat-with-our-model) 5. [Create Gradio Chatbot backed by Amazon SageMaker](#5-create-gradio-chatbot-backed-by-amazon-sagemaker) You can find the code for the example also in the [notebooks repository](https://github.com/huggingface/notebooks/blob/main/sagemaker/27_deploy_large_language_models/sagemaker-notebook.ipynb). ## What is Hugging Face LLM Inference DLC? Hugging Face LLM DLC is a new purpose-built Inference Container to easily deploy LLMs in a secure and managed environment. The DLC is powered by [Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference), an open-source, purpose-built solution for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation using Tensor Parallelism and dynamic batching for the most popular open-source LLMs, including StarCoder, BLOOM, GPT-NeoX, Llama, and T5. Text Generation Inference is already used by customers such as IBM, Grammarly, and the Open-Assistant initiative implements optimization for all supported model architectures, including: - Tensor Parallelism and custom cuda kernels - Optimized transformers code for inference using [flash-attention](https://github.com/HazyResearch/flash-attention) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) - [Continuous batching of incoming requests](https://github.com/huggingface/text-generation-inference/tree/main/router) for increased total throughput - Accelerated weight loading (start-up time) with [safetensors](https://github.com/huggingface/safetensors) - Logits warpers (temperature scaling, topk, repetition penalty ...) - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Stop sequences, Log probabilities - Token streaming using Server-Sent Events (SSE) Officially supported model architectures are currently: - [BLOOM](https://huggingface.co/bigscience/bloom) / [BLOOMZ](https://huggingface.co/bigscience/bloomz) - [MT0-XXL](https://huggingface.co/bigscience/mt0-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [GPT-Neox 20B](https://huggingface.co/EleutherAI/gpt-neox-20b) (joi, pythia, lotus, rosey, chip, RedPajama, open assistant) - [FLAN-T5-XXL](https://huggingface.co/google/flan-t5-xxl) (T5-11B) - [Llama](https://github.com/facebookresearch/llama) (vicuna, alpaca, koala) - [Starcoder](https://huggingface.co/bigcode/starcoder) / [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) / [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) With the new Hugging Face LLM Inference DLCs on Amazon SageMaker, AWS customers can benefit from the same technologies that power highly concurrent, low latency LLM experiences like [HuggingChat](https://hf.co/chat), [OpenAssistant](https://open-assistant.io/), and Inference API for LLM models on the Hugging Face Hub. Let's get started! ## 1. Setup development environment We are going to use the `sagemaker` python SDK to deploy BLOOM to Amazon SageMaker. We need to make sure to have an AWS account configured and the `sagemaker` python SDK installed. ```python !pip install "sagemaker==2.175.0" --upgrade --quiet ``` If you are going to use Sagemaker in a local environment, you need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it. ```python import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker session region: {sess.boto_region_name}") ``` ## 2. Retrieve the new Hugging Face LLM DLC Compared to deploying regular Hugging Face models, we first need to retrieve the container uri and provide it to our `HuggingFaceModel` model class with a `image_uri` pointing to the image. To retrieve the new Hugging Face LLM DLC in Amazon SageMaker, we can use the `get_huggingface_llm_image_uri` method provided by the `sagemaker` SDK. This method allows us to retrieve the URI for the desired Hugging Face LLM DLC based on the specified `backend`, `session`, `region`, and `version`. You can find the available versions [here](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-text-generation-inference-containers) ```python from sagemaker.huggingface import get_huggingface_llm_image_uri # retrieve the llm image uri llm_image = get_huggingface_llm_image_uri( "huggingface", version="1.0.3" ) # print ecr image uri print(f"llm image uri: {llm_image}") ``` ## 3. Deploy Open Assistant 12B to Amazon SageMaker _Note: Quotas for Amazon SageMaker can vary between accounts. If you receive an error indicating you've exceeded your quota, you can increase them through the [Service Quotas console](https://console.aws.amazon.com/servicequotas/home/services/sagemaker/quotas)._ To deploy [Open Assistant Model](OpenAssistant/pythia-12b-sft-v8-7k-steps) to Amazon SageMaker we create a `HuggingFaceModel` model class and define our endpoint configuration including the `hf_model_id`, `instance_type` etc. We will use a `g5.12xlarge` instance type, which has 4 NVIDIA A10G GPUs and 96GB of GPU memory. _Note: We could also optimize the deployment for cost and use `g5.2xlarge` instance type and enable int-8 quantization._ ```python import json from sagemaker.huggingface import HuggingFaceModel # sagemaker config instance_type = "ml.g5.12xlarge" number_of_gpu = 4 health_check_timeout = 300 # Define Model and Endpoint configuration parameter config = { 'HF_MODEL_ID': "OpenAssistant/pythia-12b-sft-v8-7k-steps", # model_id from hf.co/models 'SM_NUM_GPUS': json.dumps(number_of_gpu), # Number of GPU used per replica 'MAX_INPUT_LENGTH': json.dumps(1024), # Max length of input text 'MAX_TOTAL_TOKENS': json.dumps(2048), # Max length of the generation (including input text) # 'HF_MODEL_QUANTIZE': "bitsandbytes", # comment in to quantize } # create HuggingFaceModel with the image uri llm_model = HuggingFaceModel( role=role, image_uri=llm_image, env=config ) ``` After we have created the `HuggingFaceModel` we can deploy it to Amazon SageMaker using the `deploy` method. We will deploy the model with the `ml.g5.12xlarge` instance type. TGI will automatically distribute and shard the model across all GPUs. ```python # Deploy model to an endpoint # https://sagemaker.readthedocs.io/en/stable/api/inference/model.html#sagemaker.model.Model.deploy llm = llm_model.deploy( initial_instance_count=1, instance_type=instance_type, # volume_size=400, # If using an instance with local SSD storage, volume_size must be None, e.g. p4 but not p3 container_startup_health_check_timeout=health_check_timeout, # 10 minutes to be able to load the model ) ``` SageMaker will now create our endpoint and deploy the model to it. This can take 5-10 minutes. ## 4. Run inference and chat with our model After our endpoint is deployed we can run inference on it using the `predict` method from the `predictor`. We can use different parameters to control the generation, defining them in the `parameters` attribute of the payload. As of today TGI supports the following parameters: - `temperature`: Controls randomness in the model. Lower values will make the model more deterministic and higher values will make the model more random. Default value is 1.0. - `max_new_tokens`: The maximum number of tokens to generate. Default value is 20, max value is 512. - `repetition_penalty`: Controls the likelihood of repetition, defaults to `null`. - `seed`: The seed to use for random generation, default is `null`. - `stop`: A list of tokens to stop the generation. The generation will stop when one of the tokens is generated. - `top_k`: The number of highest probability vocabulary tokens to keep for top-k-filtering. Default value is `null`, which disables top-k-filtering. - `top_p`: The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling, default to `null` - `do_sample`: Whether or not to use sampling; use greedy decoding otherwise. Default value is `false`. - `best_of`: Generate best_of sequences and return the one if the highest token logprobs, default to `null`. - `details`: Whether or not to return details about the generation. Default value is `false`. - `return_full_text`: Whether or not to return the full text or only the generated part. Default value is `false`. - `truncate`: Whether or not to truncate the input to the maximum length of the model. Default value is `true`. - `typical_p`: The typical probability of a token. Default value is `null`. - `watermark`: The watermark to use for the generation. Default value is `false`. You can find the open api specification of TGI in the [swagger documentation](https://huggingface.github.io/text-generation-inference/) The `OpenAssistant/pythia-12b-sft-v8-7k-steps` is a conversational chat model meaning we can chat with it using the following prompt: ``` <|prompter|>[Instruction]<|endoftext|> <|assistant|> ``` lets give it a first try and ask about some cool ideas to do in the summer: ```python chat = llm.predict({ "inputs": """<|prompter|>What are some cool ideas to do in the summer?<|endoftext|><|assistant|>""" }) print(chat[0]["generated_text"]) # <|prompter|>What are some cool ideas to do in the summer?<|endoftext|><|assistant|>There are many fun and exciting things you can do in the summer. Here are some ideas: ``` Now we will show how to use generation parameters in the `parameters` attribute of the payload. In addition to setting custom `temperature`, `top_p`, etc, we also stop generation after the turn of the `bot`. ```python # define payload prompt="""<|prompter|>How can i stay more active during winter? Give me 3 tips.<|endoftext|><|assistant|>""" # hyperparameters for llm payload = { "inputs": prompt, "parameters": { "do_sample": True, "top_p": 0.7, "temperature": 0.7, "top_k": 50, "max_new_tokens": 256, "repetition_penalty": 1.03, "stop": ["<|endoftext|>"] } } # send request to endpoint response = llm.predict(payload) # print(response[0]["generated_text"][:-len("<human>:")]) print(response[0]["generated_text"]) ``` ## 5. Create Gradio Chatbot backed by Amazon SageMaker We can also create a gradio application to chat with our model. Gradio is a python library that allows you to quickly create customizable UI components around your machine learning models. You can find more about gradio [here](https://gradio.app/). ```python !pip install gradio --upgrade ``` ```python import gradio as gr # hyperparameters for llm parameters = { "do_sample": True, "top_p": 0.7, "temperature": 0.7, "top_k": 50, "max_new_tokens": 256, "repetition_penalty": 1.03, "stop": ["<|endoftext|>"] } with gr.Blocks() as demo: gr.Markdown("## Chat with Amazon SageMaker") with gr.Column(): chatbot = gr.Chatbot() with gr.Row(): with gr.Column(): message = gr.Textbox(label="Chat Message Box", placeholder="Chat Message Box", show_label=False) with gr.Column(): with gr.Row(): submit = gr.Button("Submit") clear = gr.Button("Clear") def respond(message, chat_history): # convert chat history to prompt converted_chat_history = "" if len(chat_history) > 0: for c in chat_history: converted_chat_history += f"<|prompter|>{c[0]}<|endoftext|><|assistant|>{c[1]}<|endoftext|>" prompt = f"{converted_chat_history}<|prompter|>{message}<|endoftext|><|assistant|>" # send request to endpoint llm_response = llm.predict({"inputs": prompt, "parameters": parameters}) # remove prompt from response parsed_response = llm_response[0]["generated_text"][len(prompt):] chat_history.append((message, parsed_response)) return "", chat_history submit.click(respond, [message, chatbot], [message, chatbot], queue=False) clear.click(lambda: None, None, chatbot, queue=False) demo.launch(share=True) ``` ![Gradio Chat application](assets/145_sagemaker-huggingface-llm/gradio.png "Gradio Chat application") Awesome! 🚀 We have successfully deployed Open Assistant Model to Amazon SageMaker and run inference on it. Additionally, we have built a quick gradio application to chat with our model. Now, it's time for you to try it out yourself and build Generation AI applications with the new Hugging Face LLM DLC on Amazon SageMaker. To clean up, we can delete the model and endpoint. ```python llm.delete_model() llm.delete_endpoint() ``` ## Conclusion The new Hugging Face LLM Inference DLC enables customers to easily and securely deploy open-source LLMs on Amazon SageMaker. The easy-to-use API and deployment process allows customers to build scalable AI chatbots and virtual assistants with state-of-the-art models like Open Assistant. Overall, this new DLC is going to empower developers and businesses to leverage the latest advances in natural language generation. --- Thanks for reading! If you have any questions, feel free to contact me on [Twitter](https://twitter.com/_philschmid) or [LinkedIn](https://www.linkedin.com/in/philipp-schmid-a6a2bb196/).
3
0
hf_public_repos
hf_public_repos/blog/fl-with-flower.md
--- title: "Federated Learning using Hugging Face and Flower" thumbnail: /blog/assets/fl-with-flower/thumbnail.png authors: - user: charlesbvll guest: true --- # Federated Learning using Hugging Face and Flower <a target="_blank" href="https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/fl-with-flower.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> This tutorial will show how to leverage Hugging Face to federate the training of language models over multiple clients using [Flower](https://flower.ai/). More specifically, we will fine-tune a pre-trained Transformer model (distilBERT) for sequence classification over a dataset of IMDB ratings. The end goal is to detect if a movie rating is positive or negative. A notebook is also available [here](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/fl-with-flower.ipynb) but instead of running on multiple separate clients it utilizes the simulation functionality of Flower (using `flwr['simulation']`) in order to emulate a federated setting inside Google Colab (this also means that instead of calling `start_server` we will call `start_simulation`, and that a few other modifications are needed). ## Dependencies To follow along this tutorial you will need to install the following packages: `datasets`, `evaluate`, `flwr`, `torch`, and `transformers`. This can be done using `pip`: ```sh pip install datasets evaluate flwr torch transformers ``` ## Standard Hugging Face workflow ### Handling the data To fetch the IMDB dataset, we will use Hugging Face's `datasets` library. We then need to tokenize the data and create `PyTorch` dataloaders, this is all done in the `load_data` function: ```python import random import torch from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoTokenizer, DataCollatorWithPadding DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") CHECKPOINT = "distilbert-base-uncased" def load_data(): """Load IMDB data (training and eval)""" raw_datasets = load_dataset("imdb") raw_datasets = raw_datasets.shuffle(seed=42) # remove unnecessary data split del raw_datasets["unsupervised"] tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) def tokenize_function(examples): return tokenizer(examples["text"], truncation=True) # We will take a small sample in order to reduce the compute time, this is optional train_population = random.sample(range(len(raw_datasets["train"])), 100) test_population = random.sample(range(len(raw_datasets["test"])), 100) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) tokenized_datasets["train"] = tokenized_datasets["train"].select(train_population) tokenized_datasets["test"] = tokenized_datasets["test"].select(test_population) tokenized_datasets = tokenized_datasets.remove_columns("text") tokenized_datasets = tokenized_datasets.rename_column("label", "labels") data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainloader = DataLoader( tokenized_datasets["train"], shuffle=True, batch_size=32, collate_fn=data_collator, ) testloader = DataLoader( tokenized_datasets["test"], batch_size=32, collate_fn=data_collator ) return trainloader, testloader trainloader, testloader = load_data() ``` ### Training and testing the model Once we have a way of creating our trainloader and testloader, we can take care of the training and testing. This is very similar to any `PyTorch` training or testing loop: ```python from evaluate import load as load_metric from transformers import AdamW def train(net, trainloader, epochs): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: batch = {k: v.to(DEVICE) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() def test(net, testloader): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: batch = {k: v.to(DEVICE) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits loss += outputs.loss.item() predictions = torch.argmax(logits, dim=-1) metric.add_batch(predictions=predictions, references=batch["labels"]) loss /= len(testloader.dataset) accuracy = metric.compute()["accuracy"] return loss, accuracy ``` ### Creating the model itself To create the model itself, we will just load the pre-trained distillBERT model using Hugging Face’s `AutoModelForSequenceClassification` : ```python from transformers import AutoModelForSequenceClassification net = AutoModelForSequenceClassification.from_pretrained( CHECKPOINT, num_labels=2 ).to(DEVICE) ``` ## Federating the example The idea behind Federated Learning is to train a model between multiple clients and a server without having to share any data. This is done by letting each client train the model locally on its data and send its parameters back to the server, which then aggregates all the clients’ parameters together using a predefined strategy. This process is made very simple by using the [Flower](https://github.com/adap/flower) framework. If you want a more complete overview, be sure to check out this guide: [What is Federated Learning?](https://flower.ai/docs/framework/tutorial-series-what-is-federated-learning.html) ### Creating the IMDBClient To federate our example to multiple clients, we first need to write our Flower client class (inheriting from `flwr.client.NumPyClient`). This is very easy, as our model is a standard `PyTorch` model: ```python from collections import OrderedDict import flwr as fl class IMDBClient(fl.client.NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in net.state_dict().items()] def set_parameters(self, parameters): params_dict = zip(net.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) net.load_state_dict(state_dict, strict=True) def fit(self, parameters, config): self.set_parameters(parameters) print("Training Started...") train(net, trainloader, epochs=1) print("Training Finished.") return self.get_parameters(config={}), len(trainloader), {} def evaluate(self, parameters, config): self.set_parameters(parameters) loss, accuracy = test(net, testloader) return float(loss), len(testloader), {"accuracy": float(accuracy)} ``` The `get_parameters` function lets the server get the client's parameters. Inversely, the `set_parameters` function allows the server to send its parameters to the client. Finally, the `fit` function trains the model locally for the client, and the `evaluate` function tests the model locally and returns the relevant metrics. We can now start client instances using: ```python fl.client.start_numpy_client( server_address="127.0.0.1:8080", client=IMDBClient(), ) ``` ### Starting the server Now that we have a way to instantiate clients, we need to create our server in order to aggregate the results. Using Flower, this can be done very easily by first choosing a strategy (here, we are using `FedAvg`, which will define the global weights as the average of all the clients' weights at each round) and then using the `flwr.server.start_server` function: ```python def weighted_average(metrics): accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] losses = [num_examples * m["loss"] for num_examples, m in metrics] examples = [num_examples for num_examples, _ in metrics] return {"accuracy": sum(accuracies) / sum(examples), "loss": sum(losses) / sum(examples)} # Define strategy strategy = fl.server.strategy.FedAvg( fraction_fit=1.0, fraction_evaluate=1.0, evaluate_metrics_aggregation_fn=weighted_average, ) # Start server fl.server.start_server( server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) ``` The `weighted_average` function is there to provide a way to aggregate the metrics distributed amongst the clients (basically this allows us to display a nice average accuracy and loss for every round). ## Putting everything together If you want to check out everything put together, you should check out the code example we wrote for the Flower repo: [https://github.com/adap/flower/tree/main/examples/quickstart-huggingface](https://github.com/adap/flower/tree/main/examples/quickstart-huggingface). Of course, this is a very basic example, and a lot can be added or modified, it was just to showcase how simply we could federate a Hugging Face workflow using Flower. Note that in this example we used `PyTorch`, but we could have very well used `TensorFlow`.
4
0
hf_public_repos
hf_public_repos/blog/1b-sentence-embeddings.md
--- title: "Train a Sentence Embedding Model with 1B Training Pairs" authors: - user: asi guest: true --- # Train a Sentence Embedding Model with 1 Billion Training Pairs **Sentence embedding** is a method that maps sentences to vectors of real numbers. Ideally, these vectors would capture the semantic of a sentence and be highly generic. Such representations could then be used for many downstream applications such as clustering, text mining, or question answering. We developed state-of-the-art sentence embedding models as part of the project ["Train the Best Sentence Embedding Model Ever with 1B Training Pairs"](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). This project took place during the [Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organized by Hugging Face. We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as guidance from Google’s Flax, JAX, and Cloud team members about efficient deep learning frameworks! ## Training methodology ### Model Unlike words, we can not define a finite set of sentences. Sentence embedding methods, therefore, compose inner words to compute the final representation. For example, SentenceBert model ([Reimers and Gurevych, 2019](https://aclanthology.org/D19-1410.pdf)) uses Transformer, the cornerstone of many NLP applications, followed by a pooling operation over the contextualized word vectors. (c.f. Figure below.) ![snippet](assets/32_1b_sentence_embeddings/model.png) ### Multiple Negative Ranking Loss The parameters from the composition module are usually learned using a self-supervised objective. For the project, we used a contrastive training method illustrated in the figure below. We constitute a dataset with sentence pairs \\( (a_i, p_i) \\) such that sentences from the pair have a close meaning. For example, we consider pairs such as (query, answer-passage), (question, duplicate_question),(paper title, cited paper title). Our model is then trained to map pairs \\( (a_i , p_i) \\) to close vectors while assigning unmatched pairs \\( (a_i , p_j), i \neq j \\) to distant vectors in the embedding space. This training method is also called training with in-batch negatives, InfoNCE or NTXentLoss. ![snippet](assets/32_1b_sentence_embeddings/contrastive_1.png) Formally, given a batch of training samples, the model optimises the following [loss function](https://github.com/UKPLab/sentence-transformers/blob/master/sentence_transformers/losses/MultipleNegativesRankingLoss.py): $$-\frac{1}{n}\sum_{i=1}^n\frac{exp(sim(a_i, p_i))}{\sum_j exp(sim(a_i, p_j))}$$ An illustrative example can be seen below. The model first embeds each sentence from every pair in the batch. Then, we compute a similarity matrix between every possible pair \\( (a_i, p_j) \\). We then compare the similarity matrix with the ground truth, which indicates the original pairs. Finally, we perform the comparison using the cross entropy loss. Intuitively, the model should assign high similarity to the sentences « How many people live in Berlin? » and « Around 3.5 million people live in Berlin » and low similarity to other negative answers such as « The capital of France is Paris » as detailed in the Figure below. ![snippet](assets/32_1b_sentence_embeddings/contrastive_2.png) In the loss equation, `sim` indicates a similarity function between \\( (a, p) \\). The similarity function could be either the Cosine-Similarity or the Dot-Product operator. Both methods have their pros and cons summarized below ([Thakur et al., 2021](https://arxiv.org/abs/2104.08663), [Bachrach et al., 2014](https://dl.acm.org/doi/10.1145/2645710.2645741)): | Cosine-similarity | Dot-product | |---------------------|-------------| | Vector has highest similarity to itself since \\( cos(a, a)=1 \\). | Other vectors can have higher dot-products \\( dot(a, a) < dot (a, b) \\). | | With normalised vectors it is equal to the dot product. The max vector length is equals 1. | It might be slower with certain approximate nearest neighbour methods since the max vector not known. | | With normalised vectors, it is proportional to euclidian distance. It works with k-means clustering. | It does not work with k-means clustering. | In practice, we used a scaled similarity because score differences tends to be too small and apply a scaling factor \\( C \\) such that \\( sim_{scaled}(a, b) = C * sim(a, b) \\) with typically \\( C = 20 \\) ([Henderson and al., 2020]([https://doi.org/10.18653/v1/2020.findings-emnlp.196), [Radford and al., 2021](http://proceedings.mlr.press/v139/radford21a.html)). ### Improving Quality with Better Batches In our method, we build batches of sample pairs \\( (a_i , p_i) \\). We consider all other samples from the batch, \\( (a_i , p_j), i \neq j \\), as negatives sample pairs. The batch composition is therefore a key training aspect. Given the literature in the domain, we mainly focused on three main aspects of the batch. #### 1. Size matters In contrastive learning, a larger batch size is synonymous with better performance. As shown in the Figure extracted from Qu and al., ([2021](https://doi.org/10.18653/v1/2021.naacl-main.466)), a larger batch size increases the results. ![snippet](assets/32_1b_sentence_embeddings/batch-size.png) #### 2. Hard Negatives In the same figure, we observe that including hard negatives also improves performance. Hard negatives are sample \\( p_j \\) which are hard to distinguish from \\( p_i \\). In our example, it could be the pairs « What is the capital of France? » and « What is the capital of the US? » which have a close semantic content and requires precisely understanding the full sentence to be answered correctly. On the contrary, the samples « What is the capital of France? » and «How many Star Wars movies is there?» are less difficult to distinguish since they do not refer to the same topic. #### 3. Cross dataset batches We concatenated multiple datasets to train our models. We built a large batch and gathered samples from the same batch dataset to limit the topic distribution and favor hard negatives. However, we also mix at least two datasets in the batch to learn a global structure between topics and not only a local structure within a topic. ## Training infrastructure and data As mentioned earlier, the quantity of data and the batch size directly impact the model performances. As part of the project, we benefited from efficient hardware infrastructure. We trained our models on [TPUs](https://cloud.google.com/tpu) which are compute units developed by Google and super efficient for matrix multiplications. TPUs have some [hardware specificities](https://huggingface.co/docs/accelerate/quicktour.html#training-on-tpu) which might require some specific code implementation. Additionally, we trained models on a large corpus as we concatenated multiple datasets up to 1 billion sentence pairs! All datasets used are detailed for each model in the [model card](https://huggingface.co/flax-sentence-embeddings/all_datasets_v3_MiniLM-L12). ## Conclusion You can find all models and datasets we created during the challenge in our [HuggingFace repository](https://huggingface.co/flax-sentence-embeddings). We trained 20 general-purpose Sentence Transformers models such as Mini-LM ([Wang and al., 2020](https://proceedings.neurips.cc/paper/2020/hash/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html)), RoBERTa ([liu and al., 2019](https://arxiv.org/abs/1907.11692 )), DistilBERT ([Sanh and al., 2020](http://arxiv.org/abs/1910.01108)) and MPNet ([Song and al., 2020](https://proceedings.neurips.cc/paper/2020/hash/c3a690be93aa602ee2dc0ccab5b7b67e-Abstract.html)). Our models achieve SOTA on multiple general-purpose Sentence Similarity evaluation tasks. We also shared [8 datasets](https://huggingface.co/flax-sentence-embeddings) specialized for Question Answering, Sentence-Similarity, and Gender Evaluation. General sentence embeddings might be used for many applications. We built a [Spaces demo](https://huggingface.co/spaces/flax-sentence-embeddings/sentence-embeddings) to showcase several applications: * The **sentence similarity** module compares the similarity of the main text with other texts of your choice. In the background, the demo extracts the embedding for each text and computes the similarity between the source sentence and the other using cosine similarity. * **Asymmetric QA** compares the answer likeliness of a given query with answer candidates of your choice. * **Search / Cluster** returns nearby answers from a query. For example, if you input « python », it will retrieve closest sentences using dot-product distance. * **Gender Bias Evaluation** report *inherent gender bias* in training set via random sampling of the sentences. Given an anchor text without mentioning gender for target occupation and 2 propositions with gendered pronouns, we compare if models assign a higher similarity to a given proposition and therefore evaluate their proportion to favor a specific gender. The [Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) has been an intense and highly rewarding experience! The quality of Google’s Flax, JAX, and Cloud and Hugging Face team members' guidance and their presence helped us all learn a lot. We hope all projects had as much fun as we did in ours. Whenever you have questions or suggestions, don’t hesitate to contact us!
5
0
hf_public_repos
hf_public_repos/blog/huggingface-amd-turin.md
--- title: "Introducing the AMD 5th Gen EPYC™ CPU" thumbnail: /blog/assets/optimum_amd/amd_hf_logo_fixed.png authors: - user: mohitsha - user: mfuntowicz --- # Introducing the AMD 5th Gen EPYC™ CPU AMD has just unveiled its 5th generation of server-grade EPYC CPU based on Zen5 architecture - also known as `Turin`. It provides a significant boost in performance, especially with a higher number of core count reaching up to `192` and `384` threads. From Large Language Models (LLMs) to RAG scenarios, Hugging Face users can leverage this new generation of servers to enhance their performance capabilities: 1. Reduce the target latency of their deployments. 2. Increase the maximum throughput. 3. Lower the operational costs. During the last few weeks, we have been working with AMD to validate that the Hugging Face ecosystem is fully supported on this new CPU generation and delivers the expected performance across different tasks. Also, we have been cooking some exciting new ways to leverage `torch.compile` for AMD CPU through the use of `AMD ZenDNN PyTorch plugin (zentorch)` to speed up even more the kind of workloads we will be discussing after. While we were able to get early access to this work to test Hugging Face models and libraries and share with you performance, we expect AMD to make it soon available to the community - stay tuned! ## AMD Turin vs AMD Genoa Performance - A 2X speedup In this section, we present the results from our benchmarking of the two AMD EPYC CPUs: Turin (128 cores) and Genoa (96 cores). For these benchmarks, we utilized the **ZenDNN** plug-in for PyTorch (zentorch), which provides inference optimizations tailored for deep learning workloads on AMD EPYC CPUs. This plug-in integrates seamlessly with the torch.compile graph compilation flow, enabling multiple passes of graph-level optimizations on the torch.fx graph to achieve further performance acceleration. To ensure optimal performance, we used the `bfloat16` data type and employed `ZenDNN 5.0`. We configured multi-instance setups that enable the parallel execution of multiple [Meta LLaMA 3.1 8B](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) model instances spawning across all the cores. Each model instance is allocated 32 physical cores per socket, allowing us to leverage the full processing power of the servers for efficient data handling and computational speed. We ran the benchmarks using two different batch sizes—16 and 32—across five distinct use cases: - Summarization (1024 input tokens / 128 output tokens) - Chatbot (128 input tokens / 128 output tokens) - Translation (1024 input tokens / 1024 output tokens) - Essay Writing (128 input tokens / 1024 output tokens) - Live Captioning (16 input tokens / 16 output tokens). These configurations not only facilitate a comprehensive analysis of how each server performs under varying workloads but also simulate real-world applications of LLMs. Specifically, we plot the decode throughput (excluding the first token) for each use case, to illustrate performance differences. ### Results for Llama 3.1 8B Instruct ![Turin vs Genoa](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/hf-amd-turin/zentorch_bs_16_32_turin_vs_genoa.png) _Throughput results for Meta Llama 3.1 8B, comparing AMD Turin against AMD Genoa. AMD Turin consistently outperforms the AMD Genoa CPUs, achieving approximately 2X higher throughput in most configurations._ ## Conclusion As demonstrated, the AMD EPYC Turin CPU offers a significant boost in performance for AI use cases compared to its predecessor, the AMD Genoa. To enhance reproducibility and streamline the benchmarking process, we utilized [optimum-benchmark](https://github.com/huggingface/optimum-benchmark), which provides a unified framework for efficient benchmarking across various setups. This enabled us to effectively benchmark using the `zentorch` backend for `torch.compile`. Furthermore, we have developed an optimized `Dockerfile` that will be released soon, along with the benchmarking code. This will facilitate easy deployment and reproduction of our results, ensuring that others can effectively leverage our findings. You can find more information at [AMD Zen Deep Neural Network (ZenDNN)](https://www.amd.com/en/developer/zendnn.html) ## Useful Resources - ZenTF: ​ https://github.com/amd/ZenDNN-tensorflow-plugin​ - ZenTorch: ​https://github.com/amd/ZenDNN-pytorch-plugin ​ - ZenDNN ONNXRuntime:  https://github.com/amd/ZenDNN-onnxruntime
6
0
hf_public_repos
hf_public_repos/blog/assisted-generation.md
--- title: "Assisted Generation: a new direction toward low-latency text generation" thumbnail: /blog/assets/assisted-generation/thumbnail.png authors: - user: joaogante --- # Assisted Generation: a new direction toward low-latency text generation Large language models are all the rage these days, with many companies investing significant resources to scale them up and unlock new capabilities. However, as humans with ever-decreasing attention spans, we also dislike their slow response times. Latency is critical for a good user experience, and smaller models are often used despite their lower quality (e.g. in [code completion](https://ai.googleblog.com/2022/07/ml-enhanced-code-completion-improves.html)). Why is text generation so slow? What’s preventing you from deploying low-latency large language models without going bankrupt? In this blog post, we will revisit the bottlenecks for autoregressive text generation and introduce a new decoding method to tackle the latency problem. You’ll see that by using our new method, assisted generation, you can reduce latency up to 10x in commodity hardware! ## Understanding text generation latency The core of modern text generation is straightforward to understand. Let’s look at the central piece, the ML model. Its input contains a text sequence, which includes the text generated so far, and potentially other model-specific components (for instance, Whisper also has an audio input). The model takes the input and runs a forward pass: the input is fed to the model and passed sequentially along its layers until the unnormalized log probabilities for the next token are predicted (also known as logits). A token may consist of entire words, sub-words, or even individual characters, depending on the model. The [illustrated GPT-2](https://jalammar.github.io/illustrated-gpt2/) is a great reference if you’d like to dive deeper into this part of text generation. <!-- [GIF 1 -- FWD PASS] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov" ></video> </figure> A model forward pass gets you the logits for the next token, which you can freely manipulate (e.g. set the probability of undesirable words or sequences to 0). The following step in text generation is to select the next token from these logits. Common strategies include picking the most likely token, known as greedy decoding, or sampling from their distribution, also called multinomial sampling. Chaining model forward passes with next token selection iteratively gets you text generation. This explanation is the tip of the iceberg when it comes to decoding methods; please refer to [our blog post on text generation](https://huggingface.co/blog/how-to-generate) for an in-depth exploration. <!-- [GIF 2 -- TEXT GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov" ></video> </figure> From the description above, the latency bottleneck in text generation is clear: running a model forward pass for large models is slow, and you may need to do hundreds of them in a sequence. But let’s dive deeper: why are forward passes slow? Forward passes are typically dominated by matrix multiplications and, after a quick visit to the [corresponding wikipedia section](https://en.wikipedia.org/wiki/Matrix_multiplication_algorithm#Communication-avoiding_and_distributed_algorithms), you can tell that memory bandwidth is the limitation in this operation (e.g. from the GPU RAM to the GPU compute cores). In other words, *the bottleneck in the forward pass comes from loading the model layer weights into the computation cores of your device, not from performing the computations themselves*. At the moment, you have three main avenues you can explore to get the most out of text generation, all tackling the performance of the model forward pass. First, you have the hardware-specific model optimizations. For instance, your device may be compatible with [Flash Attention](https://github.com/HazyResearch/flash-attention), which speeds up the attention layer through a reorder of the operations, or [INT8 quantization](https://huggingface.co/blog/hf-bitsandbytes-integration), which reduces the size of the model weights. Second, when you know you’ll get concurrent text generation requests, you can batch the inputs and massively increase the throughput with a small latency penalty. The model layer weights loaded into the device are now used on several input rows in parallel, which means that you’ll get more tokens out for approximately the same memory bandwidth burden. The catch with batching is that you need additional device memory (or to offload the memory somewhere) – at the end of this spectrum, you can see projects like [FlexGen](https://github.com/FMInference/FlexGen) which optimize throughput at the expense of latency. ```python # Example showcasing the impact of batched generation. Measurement device: RTX3090 from transformers import AutoModelForCausalLM, AutoTokenizer import time tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to("cuda") inputs = tokenizer(["Hello world"], return_tensors="pt").to("cuda") def print_tokens_per_second(batch_size): new_tokens = 100 cumulative_time = 0 # warmup model.generate( **inputs, do_sample=True, max_new_tokens=new_tokens, num_return_sequences=batch_size ) for _ in range(10): start = time.time() model.generate( **inputs, do_sample=True, max_new_tokens=new_tokens, num_return_sequences=batch_size ) cumulative_time += time.time() - start print(f"Tokens per second: {new_tokens * batch_size * 10 / cumulative_time:.1f}") print_tokens_per_second(1) # Tokens per second: 418.3 print_tokens_per_second(64) # Tokens per second: 16266.2 (~39x more tokens per second) ``` Finally, if you have multiple devices available to you, you can distribute the workload using [Tensor Parallelism](https://huggingface.co/docs/transformers/main/en/perf_train_gpu_many#tensor-parallelism) and obtain lower latency. With Tensor Parallelism, you split the memory bandwidth burden across multiple devices, but you now have to consider inter-device communication bottlenecks in addition to the monetary cost of running multiple devices. The benefits depend largely on the model size: models that easily fit on a single consumer device see very limited benefits. Taking the results from this [DeepSpeed blog post](https://www.microsoft.com/en-us/research/blog/deepspeed-accelerating-large-scale-model-inference-and-training-via-system-optimizations-and-compression/), you see that you can spread a 17B parameter model across 4 GPUs to reduce the latency by 1.5x (Figure 7). These three types of improvements can be used in tandem, resulting in [high throughput solutions](https://github.com/huggingface/text-generation-inference). However, after applying hardware-specific optimizations, there are limited options to reduce latency – and the existing options are expensive. Let’s fix that! ## Language decoder forward pass, revisited You’ve read above that each model forward pass yields the logits for the next token, but that’s actually an incomplete description. During text generation, the typical iteration consists in the model receiving as input the latest generated token, plus cached internal computations for all other previous inputs, returning the next token logits. Caching is used to avoid redundant computations, resulting in faster forward passes, but it’s not mandatory (and can be used partially). When caching is disabled, the input contains the entire sequence of tokens generated so far and the output contains the logits corresponding to the next token for *all positions* in the sequence! The logits at position N correspond to the distribution for the next token if the input consisted of the first N tokens, ignoring all subsequent tokens in the sequence. In the particular case of greedy decoding, if you pass the generated sequence as input and apply the argmax operator to the resulting logits, you will obtain the generated sequence back. ```python from transformers import AutoModelForCausalLM, AutoTokenizer tok = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2") inputs = tok(["The"], return_tensors="pt") generated = model.generate(**inputs, do_sample=False, max_new_tokens=10) forward_confirmation = model(generated).logits.argmax(-1) # We exclude the opposing tips from each sequence: the forward pass returns # the logits for the next token, so it is shifted by one position. print(generated[0, 1:].tolist() == forward_confirmation[0, :-1].tolist()) # True ``` This means that you can use a model forward pass for a different purpose: in addition to feeding some tokens to predict the next one, you can also pass a sequence to the model and double-check whether the model would generate that same sequence (or part of it). <!-- [GIF 3 -- FWD CONFIRMATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_3_1080p.mov" ></video> </figure> Let’s consider for a second that you have access to a magical latency-free oracle model that generates the same sequence as your model, for any given input. For argument’s sake, it can’t be used directly, it’s limited to being an assistant to your generation procedure. Using the property described above, you could use this assistant model to get candidate output tokens followed by a forward pass with your model to confirm that they are indeed correct. In this utopian scenario, the latency of text generation would be reduced from `O(n)` to `O(1)`, with `n` being the number of generated tokens. For long generations, we're talking about several orders of magnitude. Walking a step towards reality, let's assume the assistant model has lost its oracle properties. Now it’s a latency-free model that gets some of the candidate tokens wrong, according to your model. Due to the autoregressive nature of the task, as soon as the assistant gets a token wrong, all subsequent candidates must be invalidated. However, that does not prevent you from querying the assistant again, after correcting the wrong token with your model, and repeating this process iteratively. Even if the assistant fails a few tokens, text generation would have an order of magnitude less latency than in its original form. Obviously, there are no latency-free assistant models. Nevertheless, it is relatively easy to find a model that approximates some other model’s text generation outputs – smaller versions of the same architecture trained similarly often fit this property. Moreover, when the difference in model sizes becomes significant, the cost of using the smaller model as an assistant becomes an afterthought after factoring in the benefits of skipping a few forward passes! You now understand the core of _assisted generation_. ## Greedy decoding with assisted generation Assisted generation is a balancing act. You want the assistant to quickly generate a candidate sequence while being as accurate as possible. If the assistant has poor quality, your get the cost of using the assistant model with little to no benefits. On the other hand, optimizing the quality of the candidate sequences may imply the use of slow assistants, resulting in a net slowdown. While we can't automate the selection of the assistant model for you, we’ve included an additional requirement and a heuristic to ensure the time spent with the assistant stays in check. First, the requirement – the assistant must have the exact same tokenizer as your model. If this requirement was not in place, expensive token decoding and re-encoding steps would have to be added. Furthermore, these additional steps would have to happen on the CPU, which in turn may need slow inter-device data transfers. Fast usage of the assistant is critical for the benefits of assisted generation to show up. Finally, the heuristic. By this point, you have probably noticed the similarities between the movie Inception and assisted generation – you are, after all, running text generation inside text generation. There will be one assistant model forward pass per candidate token, and we know that forward passes are expensive. While you can’t know in advance the number of tokens that the assistant model will get right, you can keep track of this information and use it to limit the number of candidate tokens requested to the assistant – some sections of the output are easier to anticipate than others. Wrapping all up, here’s our original implementation of the assisted generation loop ([code](https://github.com/huggingface/transformers/blob/849367ccf741d8c58aa88ccfe1d52d8636eaf2b7/src/transformers/generation/utils.py#L4064)): 1. Use greedy decoding to generate a certain number of candidate tokens with the assistant model, producing `candidates`. The number of produced candidate tokens is initialized to `5` the first time assisted generation is called. 2. Using our model, do a forward pass with `candidates`, obtaining `logits`. 3. Use the token selection method (`.argmax()` for greedy search or `.multinomial()` for sampling) to get the `next_tokens` from `logits`. 4. Compare `next_tokens` to `candidates` and get the number of matching tokens. Remember that this comparison has to be done with left-to-right causality: after the first mismatch, all candidates are invalidated. 5. Use the number of matches to slice things up and discard variables related to unconfirmed candidate tokens. In essence, in `next_tokens`, keep the matching tokens plus the first divergent token (which our model generates from a valid candidate subsequence). 6. Adjust the number of candidate tokens to be produced in the next iteration — our original heuristic increases it by `2` if ALL tokens match and decreases it by `1` otherwise. <!-- [GIF 4 -- ASSISTED GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_4_1080p.mov" ></video> </figure> We’ve designed the API in 🤗 Transformers such that this process is hassle-free for you. All you need to do is to pass the assistant model under the new `assistant_model` keyword argument and reap the latency gains! At the time of the release of this blog post, assisted generation is limited to a batch size of `1`. ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch prompt = "Alice and Bob" checkpoint = "EleutherAI/pythia-1.4b-deduped" assistant_checkpoint = "EleutherAI/pythia-160m-deduped" device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint).to(device) outputs = model.generate(**inputs, assistant_model=assistant_model) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a'] ``` Is the additional internal complexity worth it? Let’s have a look at the latency numbers for the greedy decoding case (results for sampling are in the next section), considering a batch size of `1`. These results were pulled directly out of 🤗 Transformers without any additional optimizations, so you should be able to reproduce them in your setup. <!-- [SPACE WITH GREEDY DECODING PERFORMANCE NUMBERS] --> <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.28.2/gradio.js" ></script> <gradio-app theme_mode="light" space="joaogante/assisted_generation_benchmarks"></gradio-app> Glancing at the collected numbers, we see that assisted generation can deliver significant latency reductions in diverse settings, but it is not a silver bullet – you should benchmark it before applying it to your use case. We can conclude that assisted generation: 1. 🤏 Requires access to an assistant model that is at least an order of magnitude smaller than your model (the bigger the difference, the better); 2. 🚀 Gets up to 3x speedups in the presence of INT8 and up to 2x otherwise, when the model fits in the GPU memory; 3. 🤯 If you’re playing with models that do not fit in your GPU and are relying on memory offloading, you can see up to 10x speedups; 4. 📄 Shines in input-grounded tasks, like automatic speech recognition or summarization. ## Sample with assisted generation Greedy decoding is suited for input-grounded tasks (automatic speech recognition, translation, summarization, ...) or factual knowledge-seeking. Open-ended tasks requiring large levels of creativity, such as most uses of a language model as a chatbot, should use sampling instead. Assisted generation is naturally designed for greedy decoding, but that doesn’t mean that you can’t use assisted generation with multinomial sampling! Drawing samples from a probability distribution for the next token will cause our greedy assistant to fail more often, reducing its latency benefits. However, we can control how sharp the probability distribution for the next tokens is, using the temperature coefficient that’s present in most sampling-based applications. At one extreme, with temperatures close to 0, sampling will approximate greedy decoding, favoring the most likely token. At the other extreme, with the temperature set to values much larger than 1, sampling will be chaotic, drawing from a uniform distribution. Low temperatures are, therefore, more favorable to your assistant model, retaining most of the latency benefits from assisted generation, as we can see below. <!-- [TEMPERATURE RESULTS, SHOW THAT LATENCY INCREASES STEADILY WITH TEMP] --> <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/temperature.png"/> </div> Why don't you see it for yourself, so get a feeling of assisted generation? <!-- [DEMO] --> <gradio-app theme_mode="light" space="joaogante/assisted_generation_demo"></gradio-app> ## Future directions Assisted generation shows that modern text generation strategies are ripe for optimization. Understanding that it is currently a memory-bound problem, not a compute-bound problem, allows us to apply simple heuristics to get the most out of the available memory bandwidth, alleviating the bottleneck. We believe that further refinement of the use of assistant models will get us even bigger latency reductions - for instance, we may be able to skip a few more forward passes if we request the assistant to generate several candidate continuations. Naturally, releasing high-quality small models to be used as assistants will be critical to realizing and amplifying the benefits. Initially released under our 🤗 Transformers library, to be used with the `.generate()` function, we expect to offer it throughout the Hugging Face universe. Its implementation is also completely open-source so, if you’re working on text generation and not using our tools, feel free to use it as a reference. Finally, assisted generation resurfaces a crucial question in text generation. The field has been evolving with the constraint where all new tokens are the result of a fixed amount of compute, for a given model. One token per homogeneous forward pass, in pure autoregressive fashion. This blog post reinforces the idea that it shouldn’t be the case: large subsections of the generated output can also be equally generated by models that are a fraction of the size. For that, we’ll need new model architectures and decoding methods – we’re excited to see what the future holds! ## Related Work After the original release of this blog post, it came to my attention that other works have explored the same core principle (use a forward pass to validate longer continuations). In particular, have a look at the following works: - [Blockwise Parallel Decoding](https://proceedings.neurips.cc/paper/2018/file/c4127b9194fe8562c64dc0f5bf2c93bc-Paper.pdf), by Google Brain - [Speculative Sampling](https://arxiv.org/abs/2302.01318), by DeepMind ## Citation ```bibtex @misc {gante2023assisted, author = { {Joao Gante} }, title = { Assisted Generation: a new direction toward low-latency text generation }, year = 2023, url = { https://huggingface.co/blog/assisted-generation }, doi = { 10.57967/hf/0638 }, publisher = { Hugging Face Blog } } ``` ## Acknowledgements I'd like to thank Sylvain Gugger, Nicolas Patry, and Lewis Tunstall for sharing many valuable suggestions to improve this blog post. Finally, kudos to Chunte Lee for designing the gorgeous cover you can see in our web page.
7
0
hf_public_repos
hf_public_repos/blog/dynamic_speculation_lookahead.md
--- title: "Faster Assisted Generation with Dynamic Speculation" thumbnail: /blog/assets/optimum_intel/intel_thumbnail.png authors: - user: jmamou guest: true org: Intel - user: orenpereg guest: true org: Intel - user: joaogante - user: lewtun - user: danielkorat guest: true org: Intel - user: Nadav-Timor guest: true org: weizmannscience - user: moshew guest: true org: Intel --- ⭐ In this blog post, we’ll explore *dynamic speculative decoding* —a novel method developed by Intel labs and Hugging Face that accelerates text generation by up to 2.7x, depending on the task. This method is the default operational mode for assisted generation starting from [Transformers🤗](https://github.com/huggingface/transformers) release [4.45.0](https://github.com/huggingface/transformers/releases/tag/v4.45.0) ⭐ ## Speculative Decoding [Speculative decoding](https://arxiv.org/abs/2211.17192) is a popular technique to accelerate the inference of large language models, while preserving their accuracy. As shown in the figure below, speculative decoding works by splitting the generative process into two stages. In the first stage, a fast, but less accurate *draft* model (AKA assistant) autoregressively generates a sequence of tokens. In the second stage, a large, but more accurate *target* model conducts parallelized verification over the generated draft tokens. This process allows the target model to produce multiple tokens in a single forward pass and thus accelerate autoregressive decoding. The success of speculative decoding largely hinges on the _speculation lookahead_ (SL), i.e. the number of tokens produced by the draft model in each iteration. In practice, the SL is either a static value or based on heuristics, neither of which is optimal for squeezing out maximium performance during inference. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/dynamic_speculation_lookahead/spec_dec_diagram.png" width="250"><br> <em>Speculative decoding iteration.</em> </figure> ## Dynamic Speculative Decoding [Transformers🤗](https://github.com/huggingface/transformers) offers two distinct methods to determine the schedule for adjusting the number of draft (assistant) tokens during inference. The straightforward method, based on [Leviathan et al.](https://arxiv.org/pdf/2211.17192), uses a static value of the speculation lookahead and involves generating a constant number of candidate tokens at each speculative iteration. Alternatively, a [heuristic-based approach](https://huggingface.co/blog/assisted-generation) adjusts the number of candidate tokens for the next iteration based on the acceptance rate of the current iteration. If all speculative tokens are correct, the number of candidate tokens increases; otherwise, it decreases. We anticipate that an enhanced optimization strategy for managing the number of generated draft tokens could squeeze out further latency reductions. For testing this thesis we utilize an oracle that determines the optimal speculation lookahead value for each speculative iteration. The oracle employs the draft model to autoregressively generate tokens until a discrepancy arises between the predicted tokens of the draft and target models. This process is repeated for each speculative iteration, ultimately identifying the optimal (maximum) number of draft tokens accepted per iteration. The draft/target token mismatch is identified using the rejection sampling algorithm, introduced by Leviathan et al., with zero temperature. This oracle realizes the full potential of speculative decoding by generating the maximum number of valid draft tokens at each step and minimizing the number of calls to both the draft and target models. The left figure below illustrates the oracle and static speculation lookahead values across the speculative iterations of a code generation example from the [MBPP](https://huggingface.co/datasets/google-research-datasets/mbpp) dataset. A high variance in oracle speculation lookahead values (orange bars) is observed. The static speculation lookahead (blue bars), where the number of generated draft tokens is fixed to 5, performs 38 target forward passes and 192 draft forward passes, whereas the oracle speculation lookahead, performs only 27 target forward passes and 129 draft forward passes - a significant reduction. The right figure shows the oracle and static speculation lookahead across the entire [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/dynamic_speculation_lookahead/oracle_K_2.png" style="width: 400px; height: auto;"><br> <em>Oracle and static speculation lookahead (SL) values on one MBPP example.</em> </p> <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/dynamic_speculation_lookahead/Alpaca.png" style="width: 400px; height: auto;"><br> <em>Average oracle speculation lookahead for the entire Alpaca dataset.</em> </p> Both figures demonstrate significant variability in oracle speculation lookahead values, suggesting that a static speculation lookahead may be suboptimal. In order to get closer to the Oracle and gain extra speedup, we developed a straightforward method to dynamically adjust the speculation lookahead value at each iteration. After generating each draft token, we determine whether the draft model should continue generating the next token or switch to the target model for verification. This decision is based on the assistant model's confidence in its prediction, estimated by the softmax of the logits. If the assistant model's confidence in the current token prediction falls below a predefined threshold, referred to as the `assistant_confidence_threshold`, it halts the token generation process for that iteration, even if the maximum number of speculative tokens `num_assistant_tokens` has not been reached. Once halted, the draft tokens generated during the current iteration are sent to the target model for verification. ## Benchmarking We benchmarked the dynamic approach against the heuristic approach across a range of tasks and model pairings. The dynamic approach showed better performance in all tests. Notably, using the dynamic approach with `Llama3.2-1B` as the assistant for `Llama3.1-8B`, we observe speedups of up to 1.52x, whereas the heuristic approach showed no significant speedups with the same setup. Another observation is that `codegen-6B-mono` yields _slowdown_ using the heuristic approach, whereas the dynamic approach shows speedup. | Target model | Draft (Assistant) model | Task | Speedup - heuristic | Speedup - dynamic | |----------------------|---------------------|---------------------------|---------------------------|---------------------------| | `facebook/opt-6.7b` | `facebook/opt-125m` | summarization | 1.82x | **2.71x** | | `facebook/opt-6.7b` | `facebook/opt-125m` | open-ended generation | 1.23x | **1.59x** | | `Salesforce/codegen-6B-mono` | `Salesforce/codegen-350M-mono` | code generation (python) | 0.89x | **1.09x** | | `google/flan-t5-xl` | `google/flan-t5-small` | summarization | 1.18x | **1.31x** | | `meta-llama/Llama-3.1-8B` | `meta-llama/Llama-3.2-1B` | summarization | 1.00x | **1.52x** | | `meta-llama/Llama-3.1-8B` | `meta-llama/Llama-3.2-1B` | open-ended generation | 1.00x | **1.18x** | | `meta-llama/Llama-3.1-8B` | `meta-llama/Llama-3.2-1B` | code generation (python) | 1.09x | **1.15x** | * The results in the table reflect greedy decoding (temperature = 0). Similar trends were observed when using sampling (temperature > 0). * All tests were conducted on an RTX 4090. * Our benchmark is publicly available allowing everyone to evaluate further improvements: https://github.com/gante/huggingface-demos/tree/main/experiments/faster_generation ## Code Dynamic speculation has been integrated into release [4.45.0](https://github.com/huggingface/transformers/releases/tag/v4.45.0) of the Hugging Face Transformers library and now serves as the default operation mode for assisted decoding. To use assisted generation with dynamic speculation, no code changes are required—just execute the code as you normally would: ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch prompt = "Alice and Bob" checkpoint = "EleutherAI/pythia-1.4b-deduped" assistant_checkpoint = "EleutherAI/pythia-160m-deduped" device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained(checkpoint) inputs = tokenizer(prompt, return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint).to(device) outputs = model.generate(**inputs, assistant_model=assistant_model) ``` The default dynamic speculation lookahead parameters reflect optimal values but can be adjusted to improve performance for specific model pairs or datasets by using the following code: ```python # confidence threshold assistant_model.generation_config.assistant_confidence_threshold=0.4 # 'constant' means that num_assistant_tokens stays unchanged during generation assistant_model.generation_config.num_assistant_tokens_schedule='constant' # the maximum number of tokens generated by the assistant model. # after 20 tokens the draft halts even if the confidence is above the threshold assistant_model.generation_config.num_assistant_tokens=20 ``` To revert to the **heuristic** or **constant** (as in [Leviathan et al.](https://arxiv.org/pdf/2211.17192)) approaches, simply set `num_assistant_tokens_schedule` to `'heuristic'` or `'constant'`, respectively, set `assistant_confidence_threshold=0` and `num_assistant_tokens=5` as follows: ```python # Use 'heuristic' or 'constant' or 'dynamic' assistant_model.generation_config.num_assistant_tokens_schedule='heuristic' assistant_model.generation_config.assistant_confidence_threshold=0 assistant_model.generation_config.num_assistant_tokens=5 ``` ## What’s next? We introduced a faster strategy for assisted generation called _dynamic speculative decoding_, which outperforms heuristics-based methods as well as drawing a constant number of candidate tokens. In an upcoming blog post, we'll show a new method for assisted generation: combine any target model with any assistant model! This will open the door for accelerating countless models on the Hugging Face Hub that do not have small enough assistant variants. For example, `Phi 3`, `Gemma 2`, `CodeLlama` and many more will be eligible for speculative decoding. Stay tuned! ## References - [Dynamic Speculation Lookahead Accelerates Speculative Decoding of Large Language Models](https://arxiv.org/abs/2405.04304). In this paper, we introduced DISCO, a dynamic speculation lookahead optimization method that utilizes a classifier to decide whether the draft model should proceed with generating the next token or pause, and switch to the target model for validation instead of using a simple threshold on the prediction probability. - [Assisted Generation: a new direction toward low-latency text generation](https://huggingface.co/blog/assisted-generation) - [Fast Inference from Transformers via Speculative Decoding](https://arxiv.org/pdf/2211.17192) ## Citation ```bibtex @article{mamou2024accelerating, title={Accelerating Speculative Decoding using Dynamic Speculation Length}, author={Mamou, Jonathan and Pereg, Oren and Korat, Daniel and Berchansky, Moshe and Timor, Nadav and Wasserblat, Moshe and Schwartz, Roy}, journal={arXiv preprint arXiv:2405.04304}, year={2024} } ```
8
0
hf_public_repos
hf_public_repos/blog/opinion-classification-with-kili.md
--- title: "Opinion Classification with Kili and HuggingFace AutoTrain" thumbnail: /blog/assets/59_opinion-classification-with-kili/thumbnail.png authors: - user: alperiox guest: true --- # Opinion Classification with Kili and HuggingFace AutoTrain ## Introduction Understanding your users’ needs is crucial in any user-related business. But it also requires a lot of hard work and analysis, which is quite expensive. Why not leverage Machine Learning then? With much less coding by using Auto ML. In this article, we will leverage [HuggingFace AutoTrain](https://huggingface.co/autotrain) and [Kili](https://kili-technology.com/) to build an active learning pipeline for text classification. [Kili](https://kili-technology.com/) is a platform that empowers a data-centric approach to Machine Learning through quality training data creation. It provides collaborative data annotation tools and APIs that enable quick iterations between reliable dataset building and model training. Active learning is a process in which you add labeled data to the data set and then retrain a model iteratively. Therefore, it is endless and requires humans to label the data. As a concrete example use case for this article, we will build our pipeline by using user reviews of Medium from the Google Play Store. After that, we are going to categorize the reviews with the pipeline we built. Finally, we will apply sentiment analysis to the classified reviews. Then we will analyze the results, understanding the users’ needs and satisfaction will be much easier. ## AutoTrain with HuggingFace Automated Machine Learning is a term for automating a Machine Learning pipeline. It also includes data cleaning, model selection, and hyper-parameter optimization too. We can use 🤗 transformers for automated hyper-parameter searching. Hyper-parameter optimization is a difficult and time-consuming process. While we can build our pipeline ourselves by using transformers and other powerful APIs, it is also possible to fully automate this with [AutoTrain](https://huggingface.co/autotrain). AutoTrain is built on many powerful APIs like transformers, [datasets](https://github.com/huggingface/datasets) and [inference-api](https://huggingface.co/docs/transformers/main_classes/trainer). Cleaning the data, model selection, and hyper-parameter optimization steps are all fully automated in AutoTrain. One can fully utilize this framework to build production-ready SOTA transformer models for a specific task. Currently, AutoTrain supports binary and multi-label text classification, token classification, extractive question answering, text summarization, and text scoring. It also supports many languages like English, German, French, Spanish, Finnish, Swedish, Hindi, Dutch, and [more](https://huggingface.co/autotrain). If your language is not supported by AutoTrain, it is also possible to use custom models with custom tokenizers. ## Kili [Kili](https://kili-technology.com/) is an end-to-end AI training platform for data-centric businesses. Kili provides optimized labeling features and quality management tools to manage your data. You can quickly annotate the image, video, text, pdf, and voice data while controlling the quality of the dataset. It also has powerful APIs for GraphQL and Python which eases data management a lot. It is available either online or on-premise and it enables modern Machine Learning technics either on computer vision or on NLP and OCR. It supports text classification, named entity recognition (NER), relation extraction, and more NLP/OCR tasks. It also supports computer vision tasks like object detection, image transcription, video classification, semantic segmentation, and many more! Kili is a commercial tool but you can also create a free developer account to try Kili’s tools. You can learn more from the [pricing](https://kili-technology.com/pricing/) page. ## Project We will work on an example of review classification, along with sentiment analysis, to get insights about a mobile application. We have extracted around 40 thousand reviews of Medium from the Google Play Store. We will [annotate the review texts](https://kili-technology.com/blog/text-annotation-in-machine-learning-an-overview/) in this dataset step by step. And then we’re going to build a pipeline for review classification. In the modeling, the first model will be prepared with AutoTrain. Then we will also build a model without using AutoTrain. All the code and the dataset can be found on the [GitHub repository](https://github.com/alperiox/review-classification-kili-hf-automl) of the project. ## Dataset Let’s start by taking a look at the raw dataset, ![](assets/59_opinion-classification-with-kili/1.png) There are 10 columns and 40130 samples in this dataset. The only column we need is `content` which is the review of the user. Before starting, we need to define some categories. We have defined 4 categories, - Subscription: Since medium has a subscription option, anything related to users' opinions about subscription features should belong here. - Content: Medium is a sharing platform, there are lots of writings from poetry to advanced artificial intelligence research. Users’ opinions about a variety of topics, the quality of the content should belong here. - Interface: Thoughts about UI, searching articles, recommendation engine, and anything related to the interface should belong here. This also includes payment-related issues. - User Experience: The user’s general thoughts and opinions about the application. Which should be generally abstract without indicating another category. For the labeling part, we need to create a project in Kili’s platform at first. We can use either the web interface of the platform or APIs. Let's see both. **From the web interface:** From the project list page, we create a multi-class text classification project. ![](assets/59_opinion-classification-with-kili/2.png) After that, on the project’s page, you can add your data by clicking the Add assets button. Currently, you can add at most 25000 samples, but you can extend this limit if you contact the Kili sales team. After we create our project, we need to add jobs. We can prepare a labeling interface from the Settings page Although we have defined 4 categories, it is inevitable to come across reviews that should have multiple categories or completely weird ones. I will add two more labels (which are not to use in modeling) to catch these cases too. In our example, we added two more labels (Other, Multi-label). We also added a named entity recognition (NER) job just to specify how we decided on a label while labeling. The final interface is shown below ![](assets/59_opinion-classification-with-kili/3.png) As you can see from the menu at the left, it is also possible to drop a link that describes your labels on the `Instructions` page. We can also add other members to our project from `Members` or add quality measures from the `Quality management` pages. More information can be found in the [documentation](https://cloud.kili-technology.com/docs/overview/introduction-to-kili-technology.html). **Now, let’s create our project with Python API:** At first, we need to import needed libraries ([notebooks/kili_project_management.ipynb](https://github.com/alperiox/review-classification-kili-hf-automl/blob/master/notebooks/kili_project_management.ipynb)) ```python import os #we will process the data (which is a csv file) import pandas as pd #API client from kili.client import Kili #Why not use pretty progress bars? from tqdm import tqdm from dotenv import load_dotenv load_dotenv() ``` In order to access the platform, we need to authenticate our client ```python API_KEY = os.getenv('KILI_API_KEY') # initialize and authenticate the Kili client kili = Kili(api_key = API_KEY) ``` Now we can start to prepare our interface, the interface is just a dictionary in Python. We will define our jobs, then fill the labels up. Since all labels also could have children labels, we will pass labels as dictionaries too. ```python labels = ['User experience', 'Subscription', 'Content', 'Other', 'Multi label'] entity_dict = { 'User experience': '#cc4125', 'Subscription': '#4543e6', 'Content': '#3edeb6', } project_name = 'User review dataset for topic classification' project_description = "Medium's app reviews fetched from google play store for topic classification" interface = { 'jobs': { 'JOB_0': { 'mlTask': 'CLASSIFICATION', 'instruction': 'Labels', 'required': 1, 'content': { "categories": {}, "input": "radio", }, }, 'JOB_1': { 'mlTask': "NAMED_ENTITIES_RECOGNITION", 'instruction': 'Entities', 'required': 1, 'content': { 'categories': {}, "input": "radio" }, }, } } # fill the interface json with jobs for label in labels: # converts labels to uppercase and replaces whitespaces with underscores (_) # ex. User experience -> USER_EXPERIENCE # this is the preferred way to fill the interface label_upper = label.strip().upper().replace(' ', '_') # content_dict_0 = interface['jobs']['JOB_0']['content'] categories_0 = content_dict_0['categories'] category = {'name': label, 'children': []} categories_0[label_upper] = category for label, color in entity_dict.items(): label_upper = label.strip().upper().replace(' ', '_') content_dict_1 = interface['jobs']['JOB_1']['content'] categories_1 = content_dict_1['categories'] category = {'name': label, 'children': [], 'color': color} categories_1[label_upper] = category # now we can create our project # this method returns the created project’s id project_id = kili.create_project(json_interface=interface, input_type='TEXT', title=project_name, description=project_description)['id'] ``` We are ready to upload our data to the project. The `append_many_to_dataset` method can be used to import the data into the platform. By using the Python API, we can import the data by batch of 100 maximum. Here is a simple function to upload the data: ```python def import_dataframe(project_id:str, dataset:pd.DataFrame, text_data_column:str, external_id_column:str, subset_size:int=100) -> bool: """ Arguments: Inputs - project_id (str): specifies the project to load the data, this is also returned when we create our project - dataset (pandas DataFrame): Dataset that has proper columns for id and text inputs - text_data_column (str): specifies which column has the text input data - external_id_column (str): specifies which column has the ids - subset_size (int): specifies the number of samples to import at a time. Cannot be higher than 100 Outputs: None Returns: True or False regards to process succession """ assert subset_size <= 100, "Kili only allows to upload 100 assets at most at a time onto the app" L = len(dataset) # set 25000 as an upload limit, can be changed if L>25000: print('Kili Projects currently supports maximum 25000 samples as default. Importing first 25000 samples...') L=25000 i = 0 while i+subset_size < L: subset = dataset.iloc[i:i+subset_size] externalIds = subset[external_id_column].astype(str).to_list() contents = subset[text_data_column].astype(str).to_list() kili.append_many_to_dataset(project_id=project_id, content_array=contents, external_id_array=externalIds) i += subset_size return True ``` It simply imports the given `dataset` DataFrame to a project specified by project_id. We can see the arguments from docstring, we just need to pass our dataset along with the corresponding column names. We’ll just use the sample indices we get when we load the data. And then voila, uploading the data is done! ```python dataset_path = '../data/processed/lowercase_cleaned_dataset.csv' df = pd.read_csv(dataset_path).reset_index() # reset index to get the indices import_dataframe(project_id, df, 'content', 'index') ``` It wasn’t difficult to use the Python API, the helper methods we used covered many difficulties. We also used another script to check the new samples when we updated the dataset. Sometimes the model performance drop down after the dataset update. This is due to simple mistakes like mislabeling and introducing bias to the dataset. The script simply authenticates and then moves distinct samples of two given dataset versions to `To Review`. We can change the property of a sample through `update_properties_in_assets` method: ([scripts/move_diff_to_review.py](https://github.com/alperiox/review-classification-kili-hf-automl/blob/master/scripts/move_diff_to_review.py)) ```python # Set up the Kili client and arguments from kili.client import Kili from dotenv import load_dotenv import os import argparse import pandas as pd load_dotenv() parser = argparse.ArgumentParser() parser.add_argument('--first', required=True, type=str, help='Path to first dataframe') parser.add_argument('--second', required=True, type=str, help='Path to second dataframe') args = vars(parser.parse_args()) # set the kili connection up API_KEY = os.getenv('KILI_API_KEY') kili = Kili(API_KEY) # read dataframes df1 = pd.read_csv(args['first']) df2 = pd.read_csv(args['second']) # concating two of them should let us have duplicates of common elements # then we can drop the duplicated elements without keeping any duplicates to get the different elements across the two dataframes diff_df = pd.concat((df1, df2)).drop_duplicates(keep=False) diff_ids = diff_df['id'].to_list() # The changes should be given as an array that # contains the change for every single sample. # That’s why [‘TO_REVIEW’] * len(diff_df) is passed to status_array argument kili.update_properties_in_assets(diff_ids, status_array=['TO_REVIEW'] * len(diff_ids)) print('SET %d ENTRIES TO BE REVIEWED!' % len(diff_df)) ``` ## Labeling Now that we have the source data uploaded, the platform has a built-in labeling interface which is pretty easy to use. Available keyboard shortcuts helped while annotating the data. We used the interface without breaking a sweat, there are automatically defined shortcuts and it simplifies the labeling. We can see the shortcuts by clicking the keyboard icon at the right-upper part of the interface, they are also shown by underlined characters in the labeling interface at the right. ![](assets/59_opinion-classification-with-kili/4.png) Some samples were very weird, so we decided to skip them while labeling. In general, the process was way easier thanks to Kili’s built-in platform. ![](assets/59_opinion-classification-with-kili/5.gif) ## Exporting the Labeled Data The labeled data is exported with ease by using Python API. The script below exports the labeled and reviewed samples into a dataframe, then saves it with a given name as a CSV file. ([scripts/prepare_dataset.py](https://github.com/alperiox/review-classification-kili-hf-automl/blob/master/scripts/prepare_dataset.py)) ```python import argparse import os import pandas as pd from dotenv import load_dotenv from kili.client import Kili load_dotenv() parser = argparse.ArgumentParser() parser.add_argument('--output_name', required=True, type=str, default='dataset.csv') parser.add_argument('--remove', required=False, type=str) args = vars(parser.parse_args()) API_KEY = os.getenv('KILI_API_KEY') dataset_path = '../data/processed/lowercase_cleaned_dataset.csv' output_path = os.path.join('../data/processed', args['output_name']) def extract_labels(labels_dict): response = labels_dict[-1] # pick the latest version of the sample label_job_dict = response['jsonResponse']['JOB_0'] categories = label_job_dict['categories'] # all samples have a label, we can just pick it by its index label = categories[0]['name'] return label kili = Kili(API_KEY) print('Authenticated!') # query will return a list that contains matched elements (projects in this case) # since we have only one project with this name, we can just pick the first index project = kili.projects( search_query='User review dataset for topic classification')[0] project_id = project['id'] # we can customize the returned fields # the fields below are pretty much enough, # labels.jsonResponse carries the labeling data returned_fields = [ 'id', 'externalId', 'labels.jsonResponse', 'skipped', 'status' ] # I read the raw dataset too in order to match the samples with externalId dataset = pd.read_csv(dataset_path) # we can fetch the data as a dataframe df = kili.assets(project_id=project_id, status_in=['LABELED', 'REVIEWED'], fields=returned_fields, format='pandas') print('Got the samples!') # we will pass the skipped samples df_ns = df[~df['skipped']].copy() # extract the labeled samples df_ns.loc[:, 'label'] = df_ns['labels'].apply(extract_labels) # The externalId column is returned as string, let’s convert it to integer # to use as indices df_ns.loc[:, 'content'] = dataset.loc[df_ns.externalId.astype(int), 'content'] # we can drop the `labels` column now df_ns = df_ns.drop(columns=['labels']) # we'll remove the multi-labeled samples df_ns = df_ns[df_ns['label'] != 'MULTI_LABEL'].copy() # also remove the samples with label specified in remove argument if it's given if args['remove']: df_ns = df_ns.drop(index=df_ns[df_ns['label'] == args['remove']].index) print(‘DATA FETCHING DONE') print('DATASET HAS %d SAMPLES' % (len(df_ns))) print('SAVING THE PROCESSED DATASET TO: %s' % os.path.abspath(output_path)) df_ns.to_csv(output_path, index=False) print('DONE!') ``` Nice! We now have the labeled data as a csv file. Let's create a dataset repository in HuggingFace and upload the data there! It's really simple, just click your profile picture and select `New Dataset` option. ![](assets/59_opinion-classification-with-kili/19.png) Then enter the repository name, pick a license if you want and it's done! ![](assets/59_opinion-classification-with-kili/20.png) Now we can upload the dataset from `Add file` in the `Files and versions` tab. ![](assets/59_opinion-classification-with-kili/22.png) Dataset viewer is automatically available after you upload the data, we can easily check the samples! ![](assets/59_opinion-classification-with-kili/24.png) It is also possible to [upload the dataset to Hugging Face's dataset hub](https://huggingface.co/docs/datasets/upload_dataset#upload-from-python) by using `datasets` package. ## Modeling Let's use active learning. We iteratively label and fine-tune the model. In each iteration, we label 50 samples in the dataset. The number of samples is shown below: ![](assets/59_opinion-classification-with-kili/6.png) Let’s try out AutoTrain first: First, open the [AutoTrain](https://ui.autonlp.huggingface.co/) 1. Create a project ![](assets/59_opinion-classification-with-kili/7.png) 2. We can select the dataset repository we created before or upload the dataset again. Then we need to choose the split type, I’ll leave it as Auto. ![](assets/59_opinion-classification-with-kili/8.png) 3. Train the models ![](assets/59_opinion-classification-with-kili/9.png) AutoTrain will try different models and select the best models. Then performs hyper-parameter optimization automatically. The dataset is also processed automatically. The price totally depends on your use case. It can be as low as $10 or it can be more expensive than the current value. The training is done after around 20 minutes, the results are pretty good! ![](assets/59_opinion-classification-with-kili/10.png) The best model’s accuracy is almost %89. ![](assets/59_opinion-classification-with-kili/11.png) Now we can use this [model](https://huggingface.co/alperiox/autonlp-user-review-classification-536415182) to perform the analysis, it only took about 30 minutes to set up the whole thing. ## Modeling without AutoTrain We will use [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) and Hugging Face’s Trainer API to search hyper-parameters and fine-tune a pre-trained deep learning model. We have selected [roBERTa base sentiment classification model](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment) which is trained on tweets for fine-tuning. We've fine-tuned the model on google collaboratory and it can be found on the `notebooks` folder in the [GitHub repository](https://github.com/alperiox/user-review-classification-hf-kili). Ray tune is a popular library for hyper-parameter optimization which comes with many SOTA algorithms out of the box. It is also possible to use [Optuna](https://optuna.readthedocs.io/en/stable/index.html) and [SigOpt](https://sigopt.com/). We also used [Async Successive Halving Algorithm [(ASHA)](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#asha-tune-schedulers-ashascheduler) as the scheduler and [HyperOpt](https://hyperopt.github.io/hyperopt/) as the search algorithm. Which is pretty much a starting point. You can use different [schedulers](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html) and [search algorithms](https://docs.ray.io/en/latest/tune/api_docs/suggestion.html). What will we do? - Import the necessary libraries (a dozen of them) and prepare a dataset class - Define needed functions and methods to process the data - Load the pre-trained model and tokenizer - Run hyper-parameter search - Use the best results for evaluation Let’s start with importing necessary libraries! (all the code is in [notebooks/modeling.ipynb](https://github.com/alperiox/review-classification-kili-hf-automl/blob/master/notebooks/modeling.ipynb) and [google collaboratory notebook](https://colab.research.google.com/drive/1YL-q3_JTEnOtoQdiDUnwSxLVn9Aqpzs8?usp=sharing)) ```python # general data science/utilization/visualization imports import json import os import random # progress bar from tqdm import tqdm # data manipulation / reading import numpy as np import pandas as pd # visualization import plotly.express as px import matplotlib.pyplot as plt # pre-defined evaluation metrics from sklearn.metrics import (accuracy_score, f1_score, precision_score, recall_score) from sklearn.model_selection import train_test_split # torch imports import torch import torch.nn as nn from torch.utils.data import DataLoader, Dataset, random_split # huggingface imports import transformers from datasets import load_metric from transformers import (AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments) # ray tune imports for hyperparameter optimization from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining from ray.tune.suggest.hyperopt import HyperOptSearch ``` We will set a seed for the libraries we use for reproducibility ```python def seed_all(seed): torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) SEED=42 seed_all(SEED) ``` Now let’s define our dataset class! ```python class TextClassificationDataset(Dataset): def __init__(self, dataframe): self.labels = dataframe.label.to_list() self.inputs = dataframe.content.to_list() self.labels_to_idx = {k:v for k,v in labels_dict.items()} # copy the labels_dict dictionary def __len__(self): return len(self.inputs) def __getitem__(self, idx): if type(idx)==torch.Tensor: idx = list(idx) input_data = self.inputs[idx] target = self.labels[idx] target = self.labels_to_idx[target] return {'text': input_data, 'label':target} ``` We can download the model easily by specifying HuggingFace hub repository. It is also needed to import the tokenizer for the specified model. We have to provide a function to initialize the model during hyper-parameter optimization. The model will be defined there. The metric to optimize is accuracy, we want this value to be as high as possible. Because of that, we need to load the metric, then define a function to get the predictions and calculate the preferred metric. ```python model_name = 'cardiffnlp/twitter-roberta-base-sentiment' # we will perform the search to optimize the model accuracy, # we need to specify and load the accuracy metric as a first step metric = load_metric("accuracy") # since we already entered a model name, we can load the tokenizer # we can also load the model but i'll describe it in the model_init function. tokenizer = AutoTokenizer.from_pretrained(model_name) def model_init(): """ Hyperparameter optimization is performed by newly initialized models, therefore we will need to initialize the model again for every single search run. This function initializes and returns the pre-trained model selected with `model_name` """ return AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=4, return_dict=True, ignore_mismatched_sizes=True) # the function to calculate accuracy def compute_metrics(eval_pred): logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) # just pick the indices that has the maximum values return metric.compute(predictions=predictions, references=labels) ``` After defining metric calculation and model initialization function, we can load the data: ```python file_name = "dataset-11.csv" dataset_path = os.path.join('data/processed', file_name) dataset = pd.read_csv(dataset_path) ``` I also defined two dictionaries for mapping labels to indices and indices to labels. ```python idx_to_label = dict(enumerate(dataset.label.unique())) labels_dict = {v:k for k,v in idx_to_label.items()} ``` Now we can define the search algorithm and the scheduler for the hyper-parameter-search. ```python scheduler = ASHAScheduler(metric='objective', mode='max') search_algorithm = HyperOptSearch(metric='objective', mode='max', random_state_seed=SEED) # number of runs for parameter searching n_trials = 40 ``` We also need to tokenize the text data before passing it to the model, we can easily do this by using the loaded tokenizer. Ray Tune works in a black-box setting so I used tokenizer as a default argument for a work-around. Otherwise, an error about tokenizer definition would arise. ```python def tokenize(sample, tokenizer=tokenizer): tokenized_sample = tokenizer(sample['text'], padding=True, truncation=True) tokenized_sample['label'] = sample['label'] return tokenized_sample ``` Another utility function that returns stratified and tokenized Torch dataset splits: ```python def prepare_datasets(dataset_df, test_size=.2, val_size=.2): train_set, test_set = train_test_split(dataset_df, test_size=test_size, stratify=dataset_df.label, random_state=SEED) train_set, val_set = train_test_split(train_set, test_size=val_size, stratify=train_set.label, random_state=SEED) # shuffle the dataframes beforehand train_set = train_set.sample(frac=1, random_state=SEED) val_set = val_set.sample(frac=1, random_state=SEED) test_set = test_set.sample(frac=1, random_state=SEED) # convert dataframes to torch datasets train_dataset = TextClassificationDataset(train_set) val_dataset = TextClassificationDataset(val_set) test_dataset = TextClassificationDataset(test_set) # tokenize the datasets tokenized_train_set = train_dataset.map(tokenize) tokenized_val_set = val_dataset.map(tokenize) tokenized_test_set = test_dataset.map(tokenize) # finally return the processed sets return tokenized_train_set, tokenized_val_set, tokenized_test_set ``` Now we can perform the search! Let’s start by processing the data: ```python tokenized_train_set, tokenized_val_set, tokenized_test_set = prepare_datasets(dataset) training_args = TrainingArguments( 'trial_results', evaluation_strategy="steps", disable_tqdm=True, skip_memory_metrics=True, ) trainer = Trainer( args=training_args, tokenizer=tokenizer, train_dataset=tokenized_train_set, eval_dataset=tokenized_val_set, model_init=model_init, compute_metrics=compute_metrics ) best_run = trainer.hyperparameter_search( direction="maximize", n_trials=n_trials, backend="ray", search_alg=search_algorithm, scheduler=scheduler ) ``` We performed the search with 20 and 40 trials respectively, the results are shown below. The weighted average of F1, Recall, and Precision scores for 20 runs. ![](assets/59_opinion-classification-with-kili/12.png) The weighted average of F1, Recall, and Precision scores for 40 runs. ![](assets/59_opinion-classification-with-kili/13.png) The performance spiked up at the third dataset version. At some point in data labeling, I’ve introduced too much bias to the dataset mistakingly. As we can see its performance becomes more reasonable since the sample variance increased later on. The final model is saved at Google Drive and can be downloaded from [here](https://drive.google.com/drive/folders/1X_ci2Pwu0-1XbXsaCksQHZF0254TIHiD?usp=sharing), it is also possible to download via the [download_models.py](https://github.com/alperiox/review-classification-kili-hf-automl/tree/master/scripts) script. ## Final Analysis We can use the fine-tuned model to conduct the final analysis now. All we have to do is load the data, process it, and get the prediction results from the model. Then we can use a pre-trained model for sentiment analysis and hopefully get insights. We use Google Colab for the inference ([here](https://colab.research.google.com/drive/1kGYl_YcMmA2gj6HnYFzkcxSDNPlHjYaZ?usp=sharing)) and then exported the results to [result.csv](https://github.com/alperiox/review-classification-kili-hf-automl/tree/master/results). It can be found in `results` in the GitHub repository. We then analyzed the results in another [google collaboratory notebook](https://colab.research.google.com/drive/1TOX7tqJ7SGbUDWwA_6D1y-U0aNNXY04Q?usp=sharing) for an interactive experience. So you can also use it easily and interactively. Let’s check the results now! We can see that the given scores are highly positive. In general, the application is liked by the users. ![](assets/59_opinion-classification-with-kili/14.png) This also matches with the sentiment analysis, most of the reviews are positive and the least amount of reviews are classified as negative. ![](assets/59_opinion-classification-with-kili/15.png) As we can see from above, the model's performance is kind of understandable. Positive scores are dominantly higher than the others, just like the sentimental analysis graph shows. As it comes to the categories defined before, it seems that the model predicts most of the reviews are about users' experiences (excluding experiences related to other categories): ![](assets/59_opinion-classification-with-kili/16.png) We can also see the sentiment predictions over defined categories below: ![](assets/59_opinion-classification-with-kili/17.png) We won't do a detailed analysis of the reviews, a basic understanding of potential problems would suffice. Therefore, it is enough to conclude simple results from the final data: - It is understandable that most of the reviews about the subscription are negative. Paid content generally is not welcomed in mobile applications. - There are many negative reviews about the interface. This may be a clue for further analysis. Maybe there is a misconception about features, or a feature doesn't work as users thought. - People have generally liked the articles and most of them had good experiences. Important note about the plot: we haven't filtered the reviews by application version. When we look at the results of the latest current version (4.5), it seems that the interface of the application confuses the users or has annoying bugs. ![](assets/59_opinion-classification-with-kili/18.png) ## Conclusion Now we can use the pre-trained model to try to understand the potential shortcomings of the mobile application. Then it would be easier to analyze a specific feature. We used HuggingFace’s powerful APIs and AutoTrain along with Kili’s easy-to-use interface in this example. The modeling with AutoTrain just took 30 minutes, it chose the models and trained them for our use. AutoTrain is definitely much more efficient since I spent more time as I develop the model by myself. All the code, datasets, and scripts can be found in [github](https://github.com/alperiox/review-classification-kili-hf-automl). You can also try the [AutoTrain model](https://huggingface.co/alperiox/autonlp-user-review-classification-536415182). While we can consider this as a valid starting point, we should collect more data and try to build better pipelines. Better pipelines would result in more efficient improvements.
9
0
hf_public_repos/adversarialnlp/adversarialnlp/tests
hf_public_repos/adversarialnlp/adversarialnlp/tests/generators/swag_generator_test.py
# pylint: disable=no-self-use,invalid-name from typing import List import pytest from allennlp.data.fields import TextField from allennlp.common.util import ensure_list from allennlp.common.testing import AllenNlpTestCase from allennlp.data import Instance, Token, Vocabulary from allennlp.data.iterators import BasicIterator from allennlp.data.token_indexers import SingleIdTokenIndexer from allennlp.data.dataset_readers.dataset_reader import _LazyInstances from adversarialnlp.generators.swag.swag_generator import SwagGenerator from adversarialnlp.generators.swag.activitynet_captions import ActivityNetCaptionsDatasetReader from adversarialnlp.tests.utils import FIXTURES_ROOT class GeneratorTest(AllenNlpTestCase): def setUp(self): super(GeneratorTest, self).setUp() self.token_indexers = {"tokens": SingleIdTokenIndexer()} self.vocab = Vocabulary() self.this_index = self.vocab.add_token_to_namespace('this') self.is_index = self.vocab.add_token_to_namespace('is') self.a_index = self.vocab.add_token_to_namespace('a') self.sentence_index = self.vocab.add_token_to_namespace('sentence') self.another_index = self.vocab.add_token_to_namespace('another') self.yet_index = self.vocab.add_token_to_namespace('yet') self.very_index = self.vocab.add_token_to_namespace('very') self.long_index = self.vocab.add_token_to_namespace('long') instances = [ self.create_instance(["this", "is", "a", "sentence"], ["this", "is", "another", "sentence"]), self.create_instance(["yet", "another", "sentence"], ["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]), ] class LazyIterable: def __iter__(self): return (instance for instance in instances) self.instances = instances self.lazy_instances = LazyIterable() def create_instance(self, first_sentence: List[str], second_sentence: List[str]): first_tokens = [Token(t) for t in first_sentence] second_tokens = [Token(t) for t in second_sentence] instance = Instance({'first_sentence': TextField(first_tokens, self.token_indexers), 'second_sentence': TextField(second_tokens, self.token_indexers)}) return instance def assert_instances_are_correct(self, candidate_instances): # First we need to remove padding tokens from the candidates. # pylint: disable=protected-access candidate_instances = [tuple(w for w in instance if w != 0) for instance in candidate_instances] expected_instances = [tuple(instance.fields["first_sentence"]._indexed_tokens["tokens"]) for instance in self.instances] assert set(candidate_instances) == set(expected_instances) class TestSwagGenerator(GeneratorTest): # The Generator should work the same for lazy and non lazy datasets, # so each remaining test runs over both. def test_yield_one_epoch_generation_over_the_data_once(self): for test_instances in (self.instances, self.lazy_instances): generator = SwagGenerator(num_examples=1) test_instances = ActivityNetCaptionsDatasetReader().read(FIXTURES_ROOT / 'activitynet_captions.json') batches = list(generator(test_instances)) # We just want to get the single-token array for the text field in the instance. instances = [tuple(instance.detach().cpu().numpy()) for batch in batches for instance in batch['text']["tokens"]] assert len(instances) == 5 self.assert_instances_are_correct(instances)
0
0
hf_public_repos/adversarialnlp/adversarialnlp/tests
hf_public_repos/adversarialnlp/adversarialnlp/tests/generators/addsent_generator_test.py
# pylint: disable=no-self-use,invalid-name from typing import List import pytest from adversarialnlp.generators.addsent.addsent_generator import AddSentGenerator from adversarialnlp.generators.addsent.squad_reader import squad_reader from adversarialnlp.common.file_utils import FIXTURES_ROOT # class GeneratorTest(AllenNlpTestCase): # def setUp(self): # super(GeneratorTest, self).setUp() # self.token_indexers = {"tokens": SingleIdTokenIndexer()} # self.vocab = Vocabulary() # self.this_index = self.vocab.add_token_to_namespace('this') # self.is_index = self.vocab.add_token_to_namespace('is') # self.a_index = self.vocab.add_token_to_namespace('a') # self.sentence_index = self.vocab.add_token_to_namespace('sentence') # self.another_index = self.vocab.add_token_to_namespace('another') # self.yet_index = self.vocab.add_token_to_namespace('yet') # self.very_index = self.vocab.add_token_to_namespace('very') # self.long_index = self.vocab.add_token_to_namespace('long') # instances = [ # self.create_instance(["this", "is", "a", "sentence"], ["this", "is", "another", "sentence"]), # self.create_instance(["yet", "another", "sentence"], # ["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]), # ] # class LazyIterable: # def __iter__(self): # return (instance for instance in instances) # self.instances = instances # self.lazy_instances = LazyIterable() # def create_instance(self, first_sentence: List[str], second_sentence: List[str]): # first_tokens = [Token(t) for t in first_sentence] # second_tokens = [Token(t) for t in second_sentence] # instance = Instance({'first_sentence': TextField(first_tokens, self.token_indexers), # 'second_sentence': TextField(second_tokens, self.token_indexers)}) # return instance # def assert_instances_are_correct(self, candidate_instances): # # First we need to remove padding tokens from the candidates. # # pylint: disable=protected-access # candidate_instances = [tuple(w for w in instance if w != 0) for instance in candidate_instances] # expected_instances = [tuple(instance.fields["first_sentence"]._indexed_tokens["tokens"]) # for instance in self.instances] # assert set(candidate_instances) == set(expected_instances) class TestSwagGenerator(): # The Generator should work the same for lazy and non lazy datasets, # so each remaining test runs over both. def test_yield_one_epoch_generation_over_the_data_once(self): generator = AddSentGenerator() test_instances = squad_reader(FIXTURES_ROOT / 'squad.json') batches = list(generator(test_instances, num_epochs=1)) # We just want to get the single-token array for the text field in the instance. # instances = [tuple(instance.detach().cpu().numpy()) # for batch in batches # for instance in batch['text']["tokens"]] assert len(batches) == 5 # self.assert_instances_are_correct(instances)
1
0
hf_public_repos/adversarialnlp/adversarialnlp/tests
hf_public_repos/adversarialnlp/adversarialnlp/tests/dataset_readers/activitynet_captions_test.py
# pylint: disable=no-self-use,invalid-name import pytest from allennlp.common.util import ensure_list from adversarialnlp.dataset_readers import ActivityNetCaptionsDatasetReader from adversarialnlp.tests.utils import FIXTURES_ROOT class TestActivityNetCaptionsReader(): @pytest.mark.parametrize("lazy", (True, False)) def test_read_from_file(self, lazy): reader = ActivityNetCaptionsDatasetReader(lazy=lazy) instances = reader.read(FIXTURES_ROOT / 'activitynet_captions.json') instances = ensure_list(instances) instance1 = {"video_id": "v_uqiMw7tQ1Cc", "first_sentence": "A weight lifting tutorial is given .".split(), "second_sentence": "The coach helps the guy in red with the proper body placement and lifting technique .".split()} instance2 = {"video_id": "v_bXdq2zI1Ms0", "first_sentence": "A man is seen speaking to the camera and pans out into more men standing behind him .".split(), "second_sentence": "The first man then begins performing martial arts moves while speaking to he camera .".split()} instance3 = {"video_id": "v_bXdq2zI1Ms0", "first_sentence": "The first man then begins performing martial arts moves while speaking to he camera .".split(), "second_sentence": "He continues moving around and looking to the camera .".split()} assert len(instances) == 3 for instance, expected_instance in zip(instances, [instance1, instance2, instance3]): fields = instance.fields assert [t.text for t in fields["first_sentence"].tokens] == expected_instance["first_sentence"] assert [t.text for t in fields["second_sentence"].tokens] == expected_instance["second_sentence"] assert fields["video_id"].metadata == expected_instance["video_id"]
2
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/tutorials/usage.py
from adversarialnlp import Adversarial from allennlp.data.dataset_readers.reading_comprehension.squad import SquadReader adversarial = Adversarial(dataset_reader=SquadReader, editor='lstm_lm', num_samples=10) examples = adversarial.generate()
3
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/bin/adversarialnlp
#!/bin/sh python -m adversarialnlp.run "$@"
4
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/docs/generators.rst
.. role:: hidden :class: hidden-section Generators ========== .. automodule:: adversarialnlp.generators .. currentmodule:: adversarialnlp.generators Generator ---------- .. autoclass:: adversarialnlp.generators.Generator AddSent ---------- .. autoclass:: adversarialnlp.generators.addsent.AddSentGenerator .. autofunction:: adversarialnlp.generators.addsent.squad_reader SWAG ---- .. autoclass:: adversarialnlp.generators.swag.SwagGenerator .. autoclass:: adversarialnlp.generators.swag.ActivityNetCaptionsDatasetReader
5
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/docs/readme.rst
.. include:: ../README.md
6
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/docs/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = AdversarialNLP SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
7
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/docs/conf.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # AdversarialNLP documentation build configuration file, created by # sphinx-quickstart on Wed Oct 24 11:35:14 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'AdversarialNLP' copyright = '2018, Mohit Iyyer, Pasquale Minervini, Victor Sanh, Thomas Wolf, Rowan Zellers' author = 'Mohit Iyyer, Pasquale Minervini, Victor Sanh, Thomas Wolf, Rowan Zellers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'AdversarialNLPdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'AdversarialNLP.tex', 'AdversarialNLP Documentation', 'Mohit Iyyer, Pasquale Minervini, Victor Sanh, Thomas Wolf, Rowan Zellers', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'adversarialnlp', 'AdversarialNLP Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'AdversarialNLP', 'AdversarialNLP Documentation', author, 'AdversarialNLP', 'One line description of project.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None}
8
0
hf_public_repos/adversarialnlp
hf_public_repos/adversarialnlp/docs/index.rst
.. AdversarialNLP documentation master file, created by sphinx-quickstart on Wed Oct 24 11:35:14 2018. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. :github_url: https://github.com/pytorch/pytorch AdversarialNLP documentation ============================ AdversarialNLP is a generic library for crafting and using Adversarial NLP examples. .. toctree:: :maxdepth: 1 :caption: Contents readme common generators Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
9
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/espnet/prestart.sh
python app/main.py
0
0
hf_public_repos/api-inference-community/docker_images/espnet
hf_public_repos/api-inference-community/docker_images/espnet/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( # AutomaticSpeechRecognitionPipeline, AutomaticSpeechRecognitionPipeline, Pipeline, TextToSpeechPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeecRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "text-to-speech": TextToSpeechPipeline, "automatic-speech-recognition": AutomaticSpeechRecognitionPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
1
0
hf_public_repos/api-inference-community/docker_images/espnet/app
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/text_to_speech.py
from typing import Tuple import numpy as np from app.pipelines import Pipeline from espnet2.bin.tts_inference import Text2Speech class TextToSpeechPipeline(Pipeline): def __init__(self, model_id: str): self.model = Text2Speech.from_pretrained(model_id, device="cpu") if hasattr(self.model, "fs"): self.sampling_rate = self.model.fs else: # 16000 by default if not specified self.sampling_rate = 16000 def __call__(self, inputs: str) -> Tuple[np.array, int]: """ Args: inputs (:obj:`str`): The text to generate audio from Return: A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int. """ outputs = self.model(inputs) speech = outputs["wav"] return speech.numpy(), self.sampling_rate
2
0
hf_public_repos/api-inference-community/docker_images/espnet/app
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
3
0
hf_public_repos/api-inference-community/docker_images/espnet/app
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.automatic_speech_recognition import ( AutomaticSpeechRecognitionPipeline, ) from app.pipelines.text_to_speech import TextToSpeechPipeline
4
0
hf_public_repos/api-inference-community/docker_images/espnet/app
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/automatic_speech_recognition.py
from typing import Dict import numpy as np from app.pipelines import Pipeline from espnet2.bin.asr_inference import Speech2Text class AutomaticSpeechRecognitionPipeline(Pipeline): def __init__(self, model_id: str): self.model = Speech2Text.from_pretrained(model_id, device="cpu", beam_size=1) self.sampling_rate = 16000 def __call__(self, inputs: np.array) -> Dict[str, str]: """ Args: inputs (:obj:`np.array`): The raw waveform of audio received. By default at 16KHz. Check `app.validation` if a different sample rate is required or if it depends on the model Return: A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing the detected language from the input audio """ outputs = self.model(inputs) text, *_ = outputs[0] return {"text": text}
5
0
hf_public_repos/api-inference-community/docker_images/espnet
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
6
0
hf_public_repos/api-inference-community/docker_images/espnet
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api_automatic_speech_recognition.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "automatic-speech-recognition" not in ALLOWED_TASKS, "automatic-speech-recognition not implemented", ) class AutomaticSpeecRecognitionTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["automatic-speech-recognition"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "automatic-speech-recognition" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def read(self, filename: str) -> bytes: dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "samples", filename) with open(filename, "rb") as f: bpayload = f.read() return bpayload def test_original_audiofile(self): bpayload = self.read("sample1.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"text"}) def test_malformed_audio(self): bpayload = self.read("malformed.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 400, ) self.assertEqual(response.content, b'{"error":"Malformed soundfile"}') def test_dual_channel_audiofile(self): bpayload = self.read("sample1_dual.ogg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"text"}) def test_webm_audiofile(self): bpayload = self.read("sample1.webm") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"text"})
7
0
hf_public_repos/api-inference-community/docker_images/espnet
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { "text-to-speech": "espnet/kan-bayashi_ljspeech_fastspeech2", "automatic-speech-recognition": "espnet/kamo-naoyuki_mini_an4_asr_train_raw_bpe_valid.acc.best", } ALL_TASKS = { "automatic-speech-recognition", "audio-source-separation", "image-classification", "question-answering", "text-generation", "text-to-speech", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): os.environ["TASK"] = unsupported_task os.environ["MODEL_ID"] = "XX" with self.assertRaises(EnvironmentError): get_pipeline()
8
0
hf_public_repos/api-inference-community/docker_images/espnet
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api_text_to_speech.py
import os from unittest import TestCase, skipIf from api_inference_community.validation import ffmpeg_read from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-to-speech" not in ALLOWED_TASKS, "text-to-speech not implemented", ) class TextToSpeechTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-to-speech"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-to-speech" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): with TestClient(self.app) as client: response = client.post("/", json={"inputs": "This is some text"}) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "audio/flac") audio = ffmpeg_read(response.content, 16000) self.assertEqual(len(audio.shape), 1) self.assertGreater(audio.shape[0], 1000) def test_malformed_input(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
9
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/101_train-decision-transformers.ipynb
%%html <video controls autoplay><source src="https://huggingface.co/edbeeching/decision-transformer-gym-halfcheetah-expert/resolve/main/replay.mp4" type="video/mp4"></video>import os import random from dataclasses import dataclass import numpy as np import torch from datasets import load_dataset from transformers import DecisionTransformerConfig, DecisionTransformerModel, Trainer, TrainingArgumentsos.environ["WANDB_DISABLED"] = "true" # we diable weights and biases logging for this tutorial dataset = load_dataset("edbeeching/decision_transformer_gym_replay", "halfcheetah-expert-v2") @dataclass class DecisionTransformerGymDataCollator: return_tensors: str = "pt" max_len: int = 20 #subsets of the episode we use for training state_dim: int = 17 # size of state space act_dim: int = 6 # size of action space max_ep_len: int = 1000 # max episode length in the dataset scale: float = 1000.0 # normalization of rewards/returns state_mean: np.array = None # to store state means state_std: np.array = None # to store state stds p_sample: np.array = None # a distribution to take account trajectory lengths n_traj: int = 0 # to store the number of trajectories in the dataset def __init__(self, dataset) -> None: self.act_dim = len(dataset[0]["actions"][0]) self.state_dim = len(dataset[0]["observations"][0]) self.dataset = dataset # calculate dataset stats for normalization of states states = [] traj_lens = [] for obs in dataset["observations"]: states.extend(obs) traj_lens.append(len(obs)) self.n_traj = len(traj_lens) states = np.vstack(states) self.state_mean, self.state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6 traj_lens = np.array(traj_lens) self.p_sample = traj_lens / sum(traj_lens) def _discount_cumsum(self, x, gamma): discount_cumsum = np.zeros_like(x) discount_cumsum[-1] = x[-1] for t in reversed(range(x.shape[0] - 1)): discount_cumsum[t] = x[t] + gamma * discount_cumsum[t + 1] return discount_cumsum def __call__(self, features): batch_size = len(features) # this is a bit of a hack to be able to sample of a non-uniform distribution batch_inds = np.random.choice( np.arange(self.n_traj), size=batch_size, replace=True, p=self.p_sample, # reweights so we sample according to timesteps ) # a batch of dataset features s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], [] for ind in batch_inds: # for feature in features: feature = self.dataset[int(ind)] si = random.randint(0, len(feature["rewards"]) - 1) # get sequences from dataset s.append(np.array(feature["observations"][si : si + self.max_len]).reshape(1, -1, self.state_dim)) a.append(np.array(feature["actions"][si : si + self.max_len]).reshape(1, -1, self.act_dim)) r.append(np.array(feature["rewards"][si : si + self.max_len]).reshape(1, -1, 1)) d.append(np.array(feature["dones"][si : si + self.max_len]).reshape(1, -1)) timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1)) timesteps[-1][timesteps[-1] >= self.max_ep_len] = self.max_ep_len - 1 # padding cutoff rtg.append( self._discount_cumsum(np.array(feature["rewards"][si:]), gamma=1.0)[ : s[-1].shape[1] # TODO check the +1 removed here ].reshape(1, -1, 1) ) if rtg[-1].shape[1] < s[-1].shape[1]: print("if true") rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1) # padding and state + reward normalization tlen = s[-1].shape[1] s[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, self.state_dim)), s[-1]], axis=1) s[-1] = (s[-1] - self.state_mean) / self.state_std a[-1] = np.concatenate( [np.ones((1, self.max_len - tlen, self.act_dim)) * -10.0, a[-1]], axis=1, ) r[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, 1)), r[-1]], axis=1) d[-1] = np.concatenate([np.ones((1, self.max_len - tlen)) * 2, d[-1]], axis=1) rtg[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, 1)), rtg[-1]], axis=1) / self.scale timesteps[-1] = np.concatenate([np.zeros((1, self.max_len - tlen)), timesteps[-1]], axis=1) mask.append(np.concatenate([np.zeros((1, self.max_len - tlen)), np.ones((1, tlen))], axis=1)) s = torch.from_numpy(np.concatenate(s, axis=0)).float() a = torch.from_numpy(np.concatenate(a, axis=0)).float() r = torch.from_numpy(np.concatenate(r, axis=0)).float() d = torch.from_numpy(np.concatenate(d, axis=0)) rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).float() timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).long() mask = torch.from_numpy(np.concatenate(mask, axis=0)).float() return { "states": s, "actions": a, "rewards": r, "returns_to_go": rtg, "timesteps": timesteps, "attention_mask": mask, }class TrainableDT(DecisionTransformerModel): def __init__(self, config): super().__init__(config) def forward(self, **kwargs): output = super().forward(**kwargs) # add the DT loss action_preds = output[1] action_targets = kwargs["actions"] attention_mask = kwargs["attention_mask"] act_dim = action_preds.shape[2] action_preds = action_preds.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0] action_targets = action_targets.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0] loss = torch.mean((action_preds - action_targets) ** 2) return {"loss": loss} def original_forward(self, **kwargs): return super().forward(**kwargs)collator = DecisionTransformerGymDataCollator(dataset["train"]) config = DecisionTransformerConfig(state_dim=collator.state_dim, act_dim=collator.act_dim) model = TrainableDT(config)training_args = TrainingArguments( output_dir="output/", remove_unused_columns=False, num_train_epochs=120, per_device_train_batch_size=64, learning_rate=1e-4, weight_decay=1e-4, warmup_ratio=0.1, optim="adamw_torch", max_grad_norm=0.25, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], data_collator=collator, ) trainer.train()import mujoco_py import gym from colabgymrender.recorder import Recorder# Function that gets an action from the model using autoregressive prediction with a window of the previous 20 timesteps. def get_action(model, states, actions, rewards, returns_to_go, timesteps): # This implementation does not condition on past rewards states = states.reshape(1, -1, model.config.state_dim) actions = actions.reshape(1, -1, model.config.act_dim) returns_to_go = returns_to_go.reshape(1, -1, 1) timesteps = timesteps.reshape(1, -1) states = states[:, -model.config.max_length :] actions = actions[:, -model.config.max_length :] returns_to_go = returns_to_go[:, -model.config.max_length :] timesteps = timesteps[:, -model.config.max_length :] padding = model.config.max_length - states.shape[1] # pad all tokens to sequence length attention_mask = torch.cat([torch.zeros(padding), torch.ones(states.shape[1])]) attention_mask = attention_mask.to(dtype=torch.long).reshape(1, -1) states = torch.cat([torch.zeros((1, padding, model.config.state_dim)), states], dim=1).float() actions = torch.cat([torch.zeros((1, padding, model.config.act_dim)), actions], dim=1).float() returns_to_go = torch.cat([torch.zeros((1, padding, 1)), returns_to_go], dim=1).float() timesteps = torch.cat([torch.zeros((1, padding), dtype=torch.long), timesteps], dim=1) state_preds, action_preds, return_preds = model.original_forward( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) return action_preds[0, -1]# build the environment directory = './video' model = model.to("cpu") env = gym.make("HalfCheetah-v3") env = Recorder(env, directory, fps=30) max_ep_len = 1000 device = "cpu" scale = 1000.0 # normalization for rewards/returns TARGET_RETURN = 12000 / scale # evaluation is conditioned on a return of 12000, scaled accordingly state_mean = collator.state_mean.astype(np.float32) state_std = collator.state_std.astype(np.float32) print(state_mean) state_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] # Create the decision transformer model state_mean = torch.from_numpy(state_mean).to(device=device) state_std = torch.from_numpy(state_std).to(device=device) # Interact with the environment and create a video episode_return, episode_length = 0, 0 state = env.reset() target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1) states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32) actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32) rewards = torch.zeros(0, device=device, dtype=torch.float32) timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1) for t in range(max_ep_len): actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0) rewards = torch.cat([rewards, torch.zeros(1, device=device)]) action = get_action( model, (states - state_mean) / state_std, actions, rewards, target_return, timesteps, ) actions[-1] = action action = action.detach().cpu().numpy() state, reward, done, _ = env.step(action) cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim) states = torch.cat([states, cur_state], dim=0) rewards[-1] = reward pred_return = target_return[0, -1] - (reward / scale) target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1) timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1) episode_return += reward episode_length += 1 if done: break # Play the video env.play() # If you want to convert the video: # !ffmpeg -i {your_video} -vcodec h264 replay.mp4 from huggingface_hub import notebook_login notebook_login()trainer.push_to_hub()
0
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/03_reformer.ipynb
#@title Installs and Imports # pip installs !pip -qq install git+https://github.com/huggingface/transformers.git !pip install -qq py3nvml from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArgumentsconfig = ReformerConfig.from_pretrained("google/reformer-enwik8", lsh_attn_chunk_length=16386, local_attn_chunk_length=16386, lsh_num_chunks_before=0, local_num_chunks_before=0) benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[2048, 4096, 8192, 16386], batch_sizes=[1], models=["Reformer"], no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config], args=benchmark_args) result = benchmark.run() config = ReformerConfig.from_pretrained("google/reformer-enwik8") benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[2048, 4096, 8192, 16384, 32768, 65436], batch_sizes=[1], models=["Reformer"], no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config], args=benchmark_args) result = benchmark.run()#@title Installs and Imports # pip installs !pip -qq install git+https://github.com/huggingface/transformers.git !pip install -qq py3nvml from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArgumentsconfig_no_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8") # no chunk config_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=1) # feed forward chunk benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[1024, 2048, 4096], batch_sizes=[8], models=["Reformer-No-Chunk", "Reformer-Chunk"], no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config_no_chunk, config_chunk], args=benchmark_args) result = benchmark.run()config_no_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=0, num_attention_heads=2, feed_forward_size=16384) # no chuck config_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=1, num_attention_heads=2, feed_forward_size=16384) # feed forward chunk benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[1024, 2048, 4096], batch_sizes=[8], models=["Reformer-No-Chunk", "Reformer-Chunk"], no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config_no_chunk, config_chunk], args=benchmark_args) result = benchmark.run()#@title Installs and Imports # pip installs !pip -qq install git+https://github.com/huggingface/transformers.git !pip install -qq py3nvml from transformers import ReformerConfig, BertConfig, PyTorchBenchmark, PyTorchBenchmarkArgumentsconfig_4_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=4) config_8_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=8) config_12_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=12) benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Bert-4-Layers", "Bert-8-Layers", "Bert-12-Layers"], training=True, no_inference=True, no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config_4_layers_bert, config_8_layers_bert, config_12_layers_bert], args=benchmark_args) result = benchmark.run()config_4_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=4, num_hashes=1) config_8_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=8, num_hashes=1) config_12_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=12, num_hashes=1) benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Reformer-4-Layers", "Reformer-8-Layers", "Reformer-12-Layers"], training=True, no_inference=True, no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config_4_layers_reformer, config_8_layers_reformer, config_12_layers_reformer], args=benchmark_args) result = benchmark.run()#@title Installs and Imports # pip installs !pip -qq install git+https://github.com/huggingface/transformers.git !pip install -qq py3nvml from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArguments, ReformerModelconfig_no_pos_axial_embeds = ReformerConfig.from_pretrained("google/reformer-crime-and-punishment", axial_pos_embds=False) # disable axial positional embeddings config_pos_axial_embeds = ReformerConfig.from_pretrained("google/reformer-crime-and-punishment", axial_pos_embds=True, axial_pos_embds_dim=(64, 192), axial_pos_shape=(512, 1024)) # enable axial positional embeddings print("Default Positional Encodings") print(20 * '-') model = ReformerModel(config_no_pos_axial_embeds) print(f"Positional embeddings shape: {model.embeddings.position_embeddings}") print(f"Num parameters of model: {model.num_parameters()}") print(20 * '-' + '\n\n') print("Axial Positional Encodings") print(20 * '-') model = ReformerModel(config_pos_axial_embeds) print(f"Positional embeddings shape: {model.embeddings.position_embeddings}") print(f"Num parameters of model: {model.num_parameters()}") print(20 * '-' + '\n\n')benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Reformer-No-Axial-Pos-Embeddings", "Reformer-Axial-Pos-Embeddings"], no_speed=True, no_env_print=True) benchmark = PyTorchBenchmark(configs=[config_no_pos_axial_embeds, config_pos_axial_embeds], args=benchmark_args) result = benchmark.run()
1
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/123_clipseg-zero-shot.ipynb
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")from PIL import Image import requests url = "https://unsplash.com/photos/8Nc_oQsc2qQ/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjcxMjAwNzI0&force=true&w=640" image = Image.open(requests.get(url, stream=True).raw) imageprompts = ["cutlery", "pancakes", "blueberries", "orange juice"]import torch inputs = processor(text=prompts, images=[image] * len(prompts), padding="max_length", return_tensors="pt") # predict with torch.no_grad(): outputs = model(**inputs) preds = outputs.logits.unsqueeze(1)import matplotlib.pyplot as plt _, ax = plt.subplots(1, len(prompts) + 1, figsize=(3*(len(prompts) + 1), 4)) [a.axis('off') for a in ax.flatten()] ax[0].imshow(image) [ax[i+1].imshow(torch.sigmoid(preds[i][0])) for i in range(len(prompts))]; [ax[i+1].text(0, -15, prompt) for i, prompt in enumerate(prompts)];url = "https://unsplash.com/photos/Ki7sAc8gOGE/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTJ8fGNvZmZlJTIwdG8lMjBnb3xlbnwwfHx8fDE2NzExOTgzNDQ&force=true&w=640" prompt = Image.open(requests.get(url, stream=True).raw) promptencoded_image = processor(images=[image], return_tensors="pt") encoded_prompt = processor(images=[prompt], return_tensors="pt") # predict with torch.no_grad(): outputs = model(**encoded_image, conditional_pixel_values=encoded_prompt.pixel_values) preds = outputs.logits.unsqueeze(1) preds = torch.transpose(preds, 0, 1)_, ax = plt.subplots(1, 2, figsize=(6, 4)) [a.axis('off') for a in ax.flatten()] ax[0].imshow(image) ax[1].imshow(torch.sigmoid(preds[0]))url = "https://i.imgur.com/mRSORqz.jpg" alternative_prompt = Image.open(requests.get(url, stream=True).raw) alternative_prompt encoded_alternative_prompt = processor(images=[alternative_prompt], return_tensors="pt") # predict with torch.no_grad(): outputs = model(**encoded_image, conditional_pixel_values=encoded_alternative_prompt.pixel_values) preds = outputs.logits.unsqueeze(1) preds = torch.transpose(preds, 0, 1)_, ax = plt.subplots(1, 2, figsize=(6, 4)) [a.axis('off') for a in ax.flatten()] ax[0].imshow(image) ax[1].imshow(torch.sigmoid(preds[0]))from segments import SegmentsClient from getpass import getpass api_key = getpass('Enter your API key: ') segments_client = SegmentsClient(api_key)samples = segments_client.get_samples("admin-tobias/clipseg") # Use the last image as an example sample = samples[1] image = Image.open(requests.get(sample.attributes.image.url, stream=True).raw) imagedataset = segments_client.get_dataset("admin-tobias/clipseg") category_names = [category.name for category in dataset.task_attributes.categories]from torch import nn inputs = processor(text=category_names, images=[image] * len(category_names), padding="max_length", return_tensors="pt") # predict with torch.no_grad(): outputs = model(**inputs) # resize the outputs preds = nn.functional.interpolate( outputs.logits.unsqueeze(1), size=(image.size[1], image.size[0]), mode="bilinear" )len_cats = len(category_names) _, ax = plt.subplots(1, len_cats + 1, figsize=(3*(len_cats + 1), 4)) [a.axis('off') for a in ax.flatten()] ax[0].imshow(image) [ax[i+1].imshow(torch.sigmoid(preds[i][0])) for i in range(len_cats)]; [ax[i+1].text(0, -15, category_name) for i, category_name in enumerate(category_names)];threshold = 0.1 flat_preds = torch.sigmoid(preds.squeeze()).reshape((preds.shape[0], -1)) # Initialize a dummy "unlabeled" mask with the threshold flat_preds_with_treshold = torch.full((preds.shape[0] + 1, flat_preds.shape[-1]), threshold) flat_preds_with_treshold[1:preds.shape[0]+1,:] = flat_preds # Get the top mask index for each pixel inds = torch.topk(flat_preds_with_treshold, 1, dim=0).indices.reshape((preds.shape[-2], preds.shape[-1]))plt.imshow(inds)from segments.utils import bitmap2file import numpy as np inds_np = inds.numpy().astype(np.uint32) unique_inds = np.unique(inds_np).tolist() f = bitmap2file(inds_np, is_segmentation_bitmap=True) asset = segments_client.upload_asset(f, "clipseg_prediction.png") attributes = { 'format_version': '0.1', 'annotations': [{"id": i, "category_id": i} for i in unique_inds if i != 0], 'segmentation_bitmap': { 'url': asset.url }, } segments_client.add_label(sample.uuid, 'ground-truth', attributes)
2
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/91_tf_xla_generate.ipynb
# Preparing the environment !pip install transformers>=4.21.0# Stand-alone TF XLA generate example for Encoder-Decoder Models. # Note: execution times are deeply dependent on hardware. # If you have a machine with a powerful GPU, I highly recommend you to try this example there! import time import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM # 1. Load model and tokenizer model_name = "t5-small" tokenizer = AutoTokenizer.from_pretrained(model_name) model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name) # 2. Prepare tokenization and generation arguments -- don't forget padding to avoid retracing! tokenization_kwargs = {"pad_to_multiple_of": 32, "padding": True, "return_tensors": "tf"} generation_kwargs = {"num_beams": 4, "max_new_tokens": 32} # 3. Create your XLA generate function a̶n̶d̶ ̶m̶a̶k̶e̶ ̶P̶y̶T̶o̶r̶c̶h̶ ̶e̶a̶t̶ ̶d̶u̶s̶t̶ # This is the only change with respect to original generate workflow! xla_generate = tf.function(model.generate, jit_compile=True) # 4. Generate! Remember -- the first call will be slow, but all subsequent calls will be fast if you've done things right. input_prompts = [ f"translate English to {language}: I have four cats and three dogs." for language in ["German", "French", "Romanian"] ] for input_prompt in input_prompts: tokenized_inputs = tokenizer([input_prompt], **tokenization_kwargs) start = time.time_ns() generated_text = xla_generate(**tokenized_inputs, **generation_kwargs) end = time.time_ns() decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True) print(f"Original prompt -- {input_prompt}") print(f"Generated -- {decoded_text}") print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")# Stand-alone TF XLA generate example for Decoder-Only Models. # Note: execution times are deeply dependent on hardware. # If you have a machine with a powerful GPU, I highly recommend you to try this example there! import time import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM # 1. Load model and tokenizer model_name = "gpt2" # remember: decoder-only models need left-padding tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained(model_name) # 2. Prepare tokenization and generation arguments -- don't forget padding to avoid retracing! tokenization_kwargs = {"pad_to_multiple_of": 32, "padding": True, "return_tensors": "tf"} generation_kwargs = {"num_beams": 4, "max_new_tokens": 32} # 3. Create your XLA generate function a̶n̶d̶ ̶m̶a̶k̶e̶ ̶P̶y̶T̶o̶r̶c̶h̶ ̶e̶a̶t̶ ̶d̶u̶s̶t̶ # This is the only change with respect to original generate workflow! xla_generate = tf.function(model.generate, jit_compile=True) # 4. Generate! Remember -- the first call will be slow, but all subsequent calls will be fast if you've done things right. input_prompts = [f"The best thing about {country} is" for country in ["Spain", "Japan", "Angola"]] for input_prompt in input_prompts: tokenized_inputs = tokenizer([input_prompt], **tokenization_kwargs) start = time.time_ns() generated_text = xla_generate(**tokenized_inputs, **generation_kwargs) end = time.time_ns() decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True) print(f"Original prompt -- {input_prompt}") print(f"Generated -- {decoded_text}") print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
3
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/13_pytorch_xla.ipynb
%load_ext tensorboard %tensorboard --logdir tensorboardimport torch_xla.core.xla_model as xm from transformers import pipeline from transformers import FillMaskPipeline from transformers import AutoModelForMaskedLM, AutoTokenizer tpu_device = xm.xla_device() model = AutoModelForMaskedLM.from_pretrained('output').to(tpu_device) tokenizer = AutoTokenizer.from_pretrained('output') fill_mask = FillMaskPipeline(model, tokenizer) fill_mask.device = tpu_devicefill_mask('TPUs are much faster than <mask>!')
4
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/10_tf_serving.ipynb
import os import requests import tempfile import json import numpy as np import tensorflow as tf from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpc import grpc from transformers import TFBertForSequenceClassification, BertTokenizerFast, BertConfigMODEL_DIR = tempfile.gettempdir() model = TFBertForSequenceClassification.from_pretrained("nateraw/bert-base-uncased-imdb", from_pt=True) # the saved_model parameter is a flag to create a saved model version of the model in same time than the h5 weights model.save_pretrained(MODEL_DIR, saved_model=True) os.environ["MODEL_DIR"] = os.path.join(MODEL_DIR, "saved_model")%%bash --bg nohup tensorflow_model_server \ --rest_api_port=8501 \ --grpc_api_port=8500 \ --model_name=bert \ --model_base_path="${MODEL_DIR}" >server.log 2>&1 sentence = "I love the new TensorFlow update in transformers." # Load the corresponding tokenizer of our saved model tokenizer = BertTokenizerFast.from_pretrained("nateraw/bert-base-uncased-imdb") # Load the model config of our saved model config = BertConfig.from_pretrained("nateraw/bert-base-uncased-imdb")# Tokenize the sentence batch = tokenizer(sentence) # Convert the batch into a proper dict batch = dict(batch) # Put the example into a list of size 1, that corresponds to the batch size batch = [batch] # The REST API needs a JSON that contains the key instances to declare the examples to process input_data = {"instances": batch} # Query the REST API, the path corresponds to http://host:port/model_version/models_root_folder/model_name:method r = requests.post("http://localhost:8501/v1/models/bert:predict", data=json.dumps(input_data)) # Parse the JSON result. The results are contained in a list with a root key called "predictions" # and as there is only one example, takes the first element of the list result = json.loads(r.text)["predictions"][0] # The returned results are probabilities, that can be positive/negative hence we take their absolute value abs_scores = np.abs(result) # Take the argmax that correspond to the index of the max probability. label_id = np.argmax(abs_scores) # Print the proper LABEL with its index print(config.id2label[label_id])# Tokenize the sentence but this time with TensorFlow tensors as output already batch sized to 1. Ex: # { # 'input_ids': <tf.Tensor: shape=(1, 3), dtype=int32, numpy=array([[ 101, 19082, 102]])>, # 'token_type_ids': <tf.Tensor: shape=(1, 3), dtype=int32, numpy=array([[0, 0, 0]])>, # 'attention_mask': <tf.Tensor: shape=(1, 3), dtype=int32, numpy=array([[1, 1, 1]])> # } batch = tokenizer(sentence, return_tensors="tf") # Create a channel that will be connected to the gRPC port of the container channel = grpc.insecure_channel("localhost:8500") # Create a stub made for prediction. This stub will be used to send the gRPC request to the TF Server. stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) # Create a gRPC request made for prediction request = predict_pb2.PredictRequest() # Set the name of the model, for this use case it is bert request.model_spec.name = "bert" # Set which signature is used to format the gRPC query, here the default one request.model_spec.signature_name = "serving_default" # Set the input_ids input from the input_ids given by the tokenizer # tf.make_tensor_proto turns a TensorFlow tensor into a Protobuf tensor request.inputs["input_ids"].CopyFrom(tf.make_tensor_proto(batch["input_ids"])) # Same with attention mask request.inputs["attention_mask"].CopyFrom(tf.make_tensor_proto(batch["attention_mask"])) # Same with token type ids request.inputs["token_type_ids"].CopyFrom(tf.make_tensor_proto(batch["token_type_ids"])) # Send the gRPC request to the TF Server result = stub.Predict(request) # The output is a protobuf where the only one output is a list of probabilities # assigned to the key logits. As the probabilities as in float, the list is # converted into a numpy array of floats with .float_val output = result.outputs["logits"].float_val # Print the proper LABEL with its index print(config.id2label[np.argmax(np.abs(output))])
5
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/02_how_to_generate.ipynb
import tensorflow as tf from transformers import TFGPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # add the EOS token as PAD token to avoid warnings model = TFGPT2LMHeadModel.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id)# encode context the generation is conditioned on input_ids = tokenizer.encode('I enjoy walking with my cute dog', return_tensors='tf') # generate text until the output length (which includes the context length) reaches 50 greedy_output = model.generate(input_ids, max_length=50) print("Output:\n" + 100 * '-') print(tokenizer.decode(greedy_output[0], skip_special_tokens=True))# activate beam search and early_stopping beam_output = model.generate( input_ids, max_length=50, num_beams=5, early_stopping=True ) print("Output:\n" + 100 * '-') print(tokenizer.decode(beam_output[0], skip_special_tokens=True))# set no_repeat_ngram_size to 2 beam_output = model.generate( input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, early_stopping=True ) print("Output:\n" + 100 * '-') print(tokenizer.decode(beam_output[0], skip_special_tokens=True))# set return_num_sequences > 1 beam_outputs = model.generate( input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, num_return_sequences=5, early_stopping=True ) # now we have 3 output sequences print("Output:\n" + 100 * '-') for i, beam_output in enumerate(beam_outputs): print("{}: {}".format(i, tokenizer.decode(beam_output, skip_special_tokens=True)))# set seed to reproduce results. Feel free to change the seed though to get different results tf.random.set_seed(0) # activate sampling and deactivate top_k by setting top_k sampling to 0 sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_k=0 ) print("Output:\n" + 100 * '-') print(tokenizer.decode(sample_output[0], skip_special_tokens=True))# set seed to reproduce results. Feel free to change the seed though to get different results tf.random.set_seed(0) # use temperature to decrease the sensitivity to low probability candidates sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_k=0, temperature=0.7 ) print("Output:\n" + 100 * '-') print(tokenizer.decode(sample_output[0], skip_special_tokens=True))# set seed to reproduce results. Feel free to change the seed though to get different results tf.random.set_seed(0) # set top_k to 50 sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_k=50 ) print("Output:\n" + 100 * '-') print(tokenizer.decode(sample_output[0], skip_special_tokens=True))# set seed to reproduce results. Feel free to change the seed though to get different results tf.random.set_seed(0) # deactivate top_k sampling and sample only from 92% most likely words sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_p=0.92, top_k=0 ) print("Output:\n" + 100 * '-') print(tokenizer.decode(sample_output[0], skip_special_tokens=True))# set seed to reproduce results. Feel free to change the seed though to get different results tf.random.set_seed(0) # set top_k = 50 and set top_p = 0.95 and num_return_sequences = 3 sample_outputs = model.generate( input_ids, do_sample=True, max_length=50, top_k=50, top_p=0.95, num_return_sequences=3 ) print("Output:\n" + 100 * '-') for i, sample_output in enumerate(sample_outputs): print("{}: {}".format(i, tokenizer.decode(sample_output, skip_special_tokens=True)))
6
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/95_Training_Sentence_Transformers.ipynb
from sentence_transformers import SentenceTransformer, models ## Step 1: use an existing language model word_embedding_model = models.Transformer('distilroberta-base') ## Step 2: use a pool function over the token embeddings pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension()) ## Join steps 1 and 2 using the modules argument model = SentenceTransformer(modules=[word_embedding_model, pooling_model])from datasets import load_dataset dataset_id = "embedding-data/QQP_triplets" # dataset_id = "embedding-data/sentence-compression" dataset = load_dataset(dataset_id)print(f"- The {dataset_id} dataset has {dataset['train'].num_rows} examples.") print(f"- Each example is a {type(dataset['train'][0])} with a {type(dataset['train'][0]['set'])} as value.") print(f"- Examples look like this: {dataset['train'][0]}")from sentence_transformers import InputExample train_examples = [] train_data = dataset['train']['set'] # For agility we only 1/2 of our available data n_examples = dataset['train'].num_rows // 2 for i in range(n_examples): example = train_data[i] train_examples.append(InputExample(texts=[example['query'], example['pos'][0], example['neg'][0]]))print(f"We have a {type(train_examples)} of length {len(train_examples)} containing {type(train_examples[0])}'s.")from torch.utils.data import DataLoader train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=16)from sentence_transformers import losses train_loss = losses.TripletLoss(model=model)num_epochs = 10 warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) #10% of train datamodel.fit(train_objectives=[(train_dataloader, train_loss)], epochs=num_epochs, warmup_steps=warmup_steps) model.save_to_hub( "distilroberta-base-sentence-transformer", organization="embedding-data", train_datasets=["embedding-data/QQP_triplets"], exist_ok=True, )modelB = SentenceTransformer('embedding-data/distilroberta-base-sentence-transformer')dataset_id = "embedding-data/sentence-compression" datasetB = load_dataset(dataset_id)print(f"Examples look like this: {datasetB['train']['set'][0]}")train_examplesB = [] train_dataB = dataset['train']['set'] n_examples = dataset['train'].num_rows for i in range(n_examples): example = train_dataB[i] train_examplesB.append(InputExample(texts=[example[0], example[1]]))train_dataloaderB = DataLoader(train_examplesB, shuffle=True, batch_size=64) train_lossB = losses.MultipleNegativesRankingLoss(model=modelB) num_epochsB = 10 warmup_stepsB = int(len(train_dataloaderB) * num_epochsB * 0.1) #10% of train datamodelB.fit(train_objectives=[(train_dataloaderB, train_lossB)], epochs=num_epochsB, warmup_steps=warmup_stepsB) modelB.save_to_hub( "distilroberta-base-sentence-transformer", organization="embedding-data", train_datasets=["embedding-data/sentence-compression"], exist_ok=True, )
7
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/111_tf_serving_vision.ipynb
from transformers import ViTImageProcessor, TFViTForImageClassification import tensorflow as tf import tempfile import requests import base64 import json import osimport transformers print(transformers.__version__)# the saved_model parameter is a flag to create a saved model version of the model temp_model_dir = "vit" model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224") model.save_pretrained(temp_model_dir, saved_model=True)processor = ViTImageProcessor() processorCONCRETE_INPUT = "pixel_values" SIZE = processor.size["height"] INPUT_SHAPE = (SIZE, SIZE, 3)def normalize_img( img, mean=processor.image_mean, std=processor.image_std ): # Scale to the value range of [0, 1] first and then normalize. img = img / 255 mean = tf.constant(mean) std = tf.constant(std) return (img - mean) / std def preprocess(string_input): decoded_input = tf.io.decode_base64(string_input) decoded = tf.io.decode_jpeg(decoded_input, channels=3) resized = tf.image.resize(decoded, size=(SIZE, SIZE)) normalized = normalize_img(resized) normalized = tf.transpose( normalized, (2, 0, 1) ) # Since HF models are channel-first. return normalized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(string_input): decoded_images = tf.map_fn( preprocess, string_input, dtype=tf.float32, back_prop=False ) return {CONCRETE_INPUT: decoded_images} def model_exporter(model: tf.keras.Model): m_call = tf.function(model.call).get_concrete_function( tf.TensorSpec( shape=[None, 3, SIZE, SIZE], dtype=tf.float32, name=CONCRETE_INPUT ) ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(string_input): labels = tf.constant(list(model.config.id2label.values()), dtype=tf.string) images = preprocess_fn(string_input) predictions = m_call(**images) indices = tf.argmax(predictions.logits, axis=1) pred_source = tf.gather(params=labels, indices=indices) probs = tf.nn.softmax(predictions.logits, axis=1) pred_confidence = tf.reduce_max(probs, axis=1) return {"label": pred_source, "confidence": pred_confidence} return serving_fnMODEL_DIR = tempfile.gettempdir() VERSION = 1 tf.saved_model.save( model, os.path.join(MODEL_DIR, str(VERSION)), signatures={"serving_default": model_exporter(model)}, ) os.environ["MODEL_DIR"] = MODEL_DIR# Deviates from the original installation instructions. # https://issuemode.com/issues/tensorflow/serving/92945160 !wget 'http://storage.googleapis.com/tensorflow-serving-apt/pool/tensorflow-model-server-universal-2.8.0/t/tensorflow-model-server-universal/tensorflow-model-server-universal_2.8.0_all.deb' !dpkg -i tensorflow-model-server-universal_2.8.0_all.deb%%bash --bg nohup tensorflow_model_server \ --rest_api_port=8501 \ --model_name=vit \ --model_base_path=$MODEL_DIR >server.log 2>&1 image_path = tf.keras.utils.get_file( "image.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg" ) bytes_inputs = tf.io.read_file(image_path) b64str = base64.urlsafe_b64encode(bytes_inputs.numpy()).decode("utf-8") data = json.dumps({"signature_name": "serving_default", "instances": [b64str]}) print("Data: {} ... {}".format(data[:50], data[len(data) - 52 :]))headers = {"content-type": "application/json"} json_response = requests.post( "http://localhost:8501/v1/models/vit:predict", data=data, headers=headers ) print(json.loads(json_response.text))import grpc from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpcchannel = grpc.insecure_channel("localhost:8500") stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)loaded = tf.saved_model.load(f"{MODEL_DIR}/{VERSION}") serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input)request = predict_pb2.PredictRequest() request.model_spec.name = "vit" request.model_spec.signature_name = "serving_default" request.inputs[serving_input].CopyFrom(tf.make_tensor_proto([b64str]))grpc_predictions = stub.Predict(request, 10.0) # 10 secs timeout print(grpc_predictions)grpc_predictions.outputs["label"].string_val, grpc_predictions.outputs[ "confidence" ].float_val
8
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/unified-tool-calling.ipynb
import torch from transformers import AutoTokenizer, AutoModelForCausalLM checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, device_map="auto")def get_current_temperature(location: str): """ Gets the temperature at a given location. Args: location: The location to get the temperature for, in the format "city, country" """ return 22.0 # bug: Sometimes the temperature is not 22. low priority to fix tho tools = [get_current_temperature] chat = [ {"role": "user", "content": "Hey, what's the weather like in Paris right now?"} ]tool_prompt = tokenizer.apply_chat_template( chat, tools=tools, return_tensors="pt", return_dict=True, add_generation_prompt=True, ) tool_prompt = tool_prompt.to(model.device) out = model.generate(**tool_prompt, max_new_tokens=128) generated_text = out[0, tool_prompt['input_ids'].shape[1]:] print(tokenizer.decode(generated_text))tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France"}} chat.append({"role": "assistant", "tool_calls": [{"type": "function", "function": tool_call}]})chat.append({"role": "tool", "name": "get_current_temperature", "content": "22.0"})tool_prompt = tokenizer.apply_chat_template( chat, tools=tools, return_tensors="pt", return_dict=True, add_generation_prompt=True, ) tool_prompt = tool_prompt.to(model.device) out = model.generate(**tool_prompt, max_new_tokens=128) generated_text = out[0, tool_prompt['input_ids'].shape[1]:] print(tokenizer.decode(generated_text))
9
0
hf_public_repos/blog
hf_public_repos/blog/zh/huggy-lingo.md
--- title: "Huggy Lingo:利用机器学习改进 Hugging Face Hub 上的语言元数据" thumbnail: blog/assets/156_huggylingo/Huggy_Lingo.png authors: - user: davanstrien translators: - user: MatrixYao - user: zhongdongy proofreader: true --- ## Huggy Lingo: 利用机器学习改进 Hugging Face Hub 上的语言元数据 **太长不看版**: Hub 上有不少数据集没有语言元数据,我们用机器学习来检测其语言,并使用 [librarian-bots](https://huggingface.co/librarian-bots) 自动向这些数据集提 PR 以添加其语言元数据。 Hugging Face Hub 已成为社区共享机器学习模型、数据集以及应用的存储库。随着 Hub 上的数据集越来越多,元数据,作为一种能帮助用户找到所需数据集的工具,变得越来越重要。 我们很高兴能够通过本文与大家分享我们的一些早期实验,这些实验旨在利用机器学习来改进 Hugging Face Hub 上托管的数据集的元数据。 ### Hub 上数据集的语言元数据 目前 Hugging Face Hub 上约有 5 万个公开数据集。用户可以通过 [数据集卡](https://huggingface.co/docs/datasets/upload_dataset#create-a-dataset-card) 顶部的 [YAML](https://en.wikipedia.org/wiki/YAML) 字段设定其语言元信息。 我们目前支持 1716 种语言标签,所有的公开数据集都可以在其语言元信息中指定其一。请注意,有些语言会有多个不同的语言标签,如 `en` 、`eng` 、`english` 、`English` 都是英语。 举个例子,[IMDB 数据集](https://huggingface.co/datasets/imdb) 的 YAML 元数据中的语言标签为 `en` : <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/huggy_lingo/lang_metadata.png" alt="Screenshot of YAML metadata"><br> <em>IMDB 数据集的 YAML 元数据部分 </em> </p> 迄今为止,Hub 上数据集上最常见的语言是英语,有大约 19% 的数据集将其语言标注为 `en` (这还没把 `en` 的其他变体统计在内,因此实际百分比可能会比 19% 要高得多)。这个现象符合我们的预期。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/huggy_lingo/lang_freq.png" alt="Distribution of language tags"><br> <em>Hugging Face Hub 上的数据集的频率及占比 </em> </p> 如果排除掉英语,语言分布又如何呢?我们可以看到,有几种语言相对占主导,随后的其他语言的频率则出现了平稳下降。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/huggy_lingo/lang_freq_distribution.png" alt="Distribution of language tags"><br> <em>Hub 上数据集的语言标签分布情况 (除英语外) </em> </p> 这里,我们发现一个重大问题,那就是大多数数据集 (约 87%) 没有指明其所使用的语言,只有大约 13% 的数据集在其元数据中指明了语言信息。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/huggy_lingo/has_lang_info_bar.png" alt="Barchart"><br> <em>具有语言元数据的数据集占比。True 表示指明了语言元数据,False 表示未列出语言元数据。No card data 意味着没有任何元数据,抑或是`huggingface_hub` Python 库无法加载它。</em> </p> #### 为什么语言元数据很重要? 语言元数据是查找相关数据集的重要工具。Hugging Face Hub 允许用户按语言过滤数据集。例如,如果想查找荷兰语数据集,我们可以在 Hub 上用 [过滤器](https://huggingface.co/datasets?language=language:nl&sort=trending) 过滤出仅包含荷兰语的数据集。 目前,此过滤器返回 184 个数据集。但是,Hub 上其实还有别的一些数据集中包含荷兰语,但其未在元数据中指明语言,因此就很难被过滤器找到。随着 Hub 上数据集越来越多,这种现象会愈发严重。 许多人希望能够找到特定语言的数据集。而当前,为特定语言训练出优秀的开源 LLM 的主要障碍之一就是缺乏相应语言的高质量训练数据。 在为某些任务搜寻相关的机器学习模型时,了解模型的训练数据中包含哪些语言有助于我们找到能够支持我们想要的语言的模型。而这又依赖于相应的训练数据集中是否含有相关语言信息。 最后,了解 Hub 上有哪些语言 (以及没有哪些语言),有助于我们了解 Hub 在语种支持上的偏差,并为社区解决特定语言上的数据差距提供信息支持。 ### 利用机器学习预测数据集的语言 我们已经看到 Hugging Face Hub 上的许多数据集并未包含语言元数据。然而,由于这些数据集已经公开,也许我们可以尝试利用机器学习来识别其语言。 #### 获取数据 我们可以使用 `datasets` 库下载数据集并获取它的一些样本,代码如下: ```python from datasets import load_dataset dataset = load_dataset("biglam/on_the_books") ``` 对于 Hub 上的某些数据集,我们可能不希望下载整个数据集。我们可以尝试加载数据集的部分样本。然而,根据数据集的创建方式不同,对某些数据集,我们最终下载到机器上的数据可能仍比我们实际需要的多。 幸运的是,Hub 上的许多数据集都可以通过 [dataset viewer](https://huggingface.co/docs/datasets-server/index) 获得。Dataset viewer 是一个 API,其允许我们无需下载到本地即可访问 Hub 上托管的数据集。Dataset viewer 已被应用于数据集查看器预览功能,Hub 上托管的许多数据集都支持数据集查看器预览功能。 为了给语言检测实验准备数据,我们首先定义了一个白名单,其中包含了可能包含文本的列名及数据类型,如名字为 `text` 或 `prompt` 的列以及数据类型为 `string` 的特征可能包含文本,但名字为 `image` 的列大概率是不相关的。这意味着我们可以避免为不相关的数据集预测其语言,例如为图像分类数据集预测语言。我们用 dataset viewer 获取 20 行文本数据并传给机器学习模型 (具体用多少行数据可以根据实际情况修改)。 这么做的话,我们可以对 Hub 上的大多数数据集,快速获取它们前 20 行数据的文本内容。 #### 预测数据集的语言 获取到文本样本后,我们就需要预测其语言。这里有多种方法,目前,我们使用了由 [Meta](https://huggingface.co/facebook) 为 [“一个语言都不能少”](https://ai.facebook.com/research/no-language-left-behind/) 项目而开发的 [facebook/fasttext-language-identification](https://huggingface.co/facebook/fasttext-language-identification) fastText 模型。该模型可以检测 217 种语言,这覆盖了 Hub 上托管的大多数数据集的语言。 我们将 20 个样本传给模型,由模型为每个数据集生成 20 个单独的语言预测 (每个样本一个)。 有了这些预测后,我们会进行一些额外的过滤,以决定我们是否接受这些预测作为元数据。主要步骤有: - 对每个数据集按预测语言进行分组: 某些数据集可能会预测出多种语言。此时,我们会按预测语言对这些样本进行分组。举个例子,如果返回英语和荷兰语两种预测,我们将样本按照预测语言分成两个组。 - 分别计算每种预测语言的样本数。如果其中某种语言的样本比例低于 20%,我们就丢弃该预测。举个例子,如果我们有 18 个样本预测为英语,2 个样本预测为荷兰语,此时我们就会丢弃荷兰语预测。 - 对每种语言的预测分求平均。如果平均分低于 80%,丢弃该预测。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/huggy_lingo/prediction-flow.png" alt="Prediction workflow"><br> <em>预测过滤流程图</em> </p> 一旦完成过滤,我们就需要进一步决定如何使用这些预测。fastText 语言预测模型的输出为一个 [ISO 639-3](https://en.wikipedia.org/wiki/ISO_639-3) 代码 (一种语言代码的国际标准) 加一个文字代码。举个例子,输出 `kor_Hang` 的前半部分 kor 是韩语的 ISO 693-3 语言代码,而后半部分 Hang 是韩字的 [ISO 15924](https://en.wikipedia.org/wiki/ISO_15924) 代码。 我们会丢弃文字代码,因为当前 Hub 尚未将此作为元数据的一部分。同时,我们会将模型返回的 [ISO 639-3](https://en.wikipedia.org/wiki/ISO_639-3) 语言代码转换为 [ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1)。这么做主要是因为 Hub UI 的数据集导航功能对 ISO 639-3 代码的支持更好。 还有一种情况需要处理,即某些 ISO 639-3 代码并没有对应的 ISO 639-1 代码。此时,如有必要,我们会手动指定映射,例如将标准阿拉伯语 ( `arb` ) 映射到阿拉伯语 ( `ar` )。如果无法进行显式映射,我们索性就放弃配置该数据集的语言元数据。我们会在后续工作中继续改进我们的方法。我们已经意识到,当前的方法确实是有缺点的,它人为减少了语言的多样性,并依赖于对某些语言映射关系的主观判断。 我们会持续改进,但当前我们并不会因为被问题绊住而停在原地。毕竟,如果我们无法与社区其他成员共享这些信息,那么预测数据集的语言又有什么用呢?有缺陷的信息总比没有信息好。 ### 使用 Librarian-Bot 更新元数据 为了确保将这些有价值的语言元数据上传至 Hub,我们使用了 Librarian-Bot! Librarian-Bot 会采纳 Meta 的 [facebook/fasttext-language-identification](https://huggingface.co/facebook/fasttext-language-identification) fastText 模型预测的语言信息,并自动生成 PR 以将此信息添加至各数据集的元数据中。 该系统可以快速高效地更新各数据集的语言信息,无需人类手动操作。一旦数据集的所有者批准并合并相应 PR,所有用户就都可以使用该语言元数据,从而显著增强 Hugging Face Hub 的可用性。你可以在 [此处](https://huggingface.co/librarian-bot/activity/community) 跟踪 Librarian-Bot 的一举一动! #### 下一步 随着 Hub 上的数据集越来越多,元数据变得越来越重要,而其中语言元数据可以帮助用户甄别出合适自己场景的数据集。 在 Dataset viewer 和 [Librarian-Bots](https://huggingface.co/librarian-bots) 的帮助下,我们可以大规模地自动更新数据集元数据,这是手动更新无法企及的。我们正在用这种方法不断丰富 Hub,进而使 Hub 成为服务世界各地的数据科学家、语言学家和人工智能爱好者的强大工具。
0
0
hf_public_repos/blog
hf_public_repos/blog/zh/blip-2.md
--- title: "使用 BLIP-2 零样本“图生文”" thumbnail: /blog/assets/blip-2/thumbnail.png authors: - user: MariaK - user: JunnanLi translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 使用 BLIP-2 零样本“图生文” 本文将介绍来自 Salesforce 研究院的 [BLIP-2](https://huggingface.co/docs/transformers/main/en/model_doc/blip-2) 模型,它支持一整套最先进的视觉语言模型,且已集成入 [🤗 Transformers](https://huggingface.co/transformers)。我们将向你展示如何将其用于图像字幕生成、有提示图像字幕生成、视觉问答及基于聊天的提示这些应用场景。 ## Table of contents 1. [简介](#简介) 2. [BLIP-2 葫芦里卖的什么药?](#BLIP-2 葫芦里卖的什么药?) 3. [通过 Hugging Face Transformers 使用 BLIP-2](#通过 Hugging Face Transformers 使用 BLIP-2) 1. [图像字幕生成](#图像字幕生成) 2. [有提示图片字幕生成](#有提示图片字幕生成) 3. [视觉问答](#视觉问答) 4. [基于聊天的提示](#基于聊天的提示) 4. [结论](#结论) 5. [致谢](#致谢) ## 简介 近年来,计算机视觉和自然语言处理领域各自都取得了飞速发展。但许多实际问题本质上其实是多模态的,即它们同时涉及几种不同形式的数据,如图像和文本。因此,需要视觉语言模型来帮助解决一系列组合模态的挑战,我们的技术才能最终得到广泛落地。视觉语言模型可以处理的一些 图生文 任务包括图像字幕生成、图文检索以及视觉问答。图像字幕生成可以用于视障人士辅助、创建有用的产品描述、识别非文本模态的不当内容等。图文检索可以用于多模态搜索,也可用于自动驾驶场合。视觉问答可以助力教育行业、使能多模态聊天机器人,还可用于各种特定领域的信息检索应用。 现代计算机视觉和自然语言模型在能力越来越强大的同时,模型尺寸也随之显著增大。由于当前进行一次单模态模型的预训练既耗费资源又昂贵,因此端到端视觉语言 [BLIP-2](https://arxiv.org/pdf/2301.12597.pdf) 通过引入一种新的视觉语言预训练范式来应对这一挑战,该范式可以任意组合并充分利用两个预训练好的视觉编码器和 LLM,而无须端到端地预训练整个架构。这使得我们可以在多个视觉语言任务上实现最先进的结果,同时显著减少训练参数量和预训练成本。此外,这种方法为多模态ChatGPT 类应用奠定了基础。 ## BLIP-2 葫芦里卖的什么药? BLIP-2 通过在冻结的预训练图像编码器和冻结的预训练大语言模型之间添加一个轻量级 查询 Transformer (Query Transformer, Q-Former) 来弥合视觉和语言模型之间的模态隔阂 (modality gap)。在整个模型中,Q-Former 是唯一的可训练模块,而图像编码器和语言模型始终保持冻结状态。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/blip-2/q-former-1.png" alt="Overview of BLIP-2's framework" width=500> </p> Q-Former 是一个 transformer 模型,它由两个子模块组成,这两个子模块共享相同的自注意力层: * 与冻结的图像编码器交互的图像 transformer,用于视觉特征提取 * 文本 transformer,用作文本编码器和解码器 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/blip-2/q-former-2.png" alt="Q-Former architecture" width=500> </p> 图像 transformer 从图像编码器中提取固定数量的输出特征,这里特征的个数与输入图像分辨率无关。同时,图像 transformer 接收若干查询嵌入作为输入,这些查询嵌入是可训练的。这些查询还可以通过相同的自注意力层与文本进行交互 (译者注: 这里的相同是指图像 transformer 和文本 transformer 对应的自注意力层是共享的)。 Q-Former 分两个阶段进行预训练。第一阶段,图像编码器被冻结,Q-Former 通过三个损失函数进行训练: * 图文对比损失 (image-text contrastive loss): 每个查询的输出都与文本输出的 CLS 词元计算成对相似度,并从中选择相似度最高的一个最终计算对比损失。在该损失函数下,查询嵌入和文本不会 “看到” 彼此。 * 基于图像的文本生成损失: 查询内部可以相互计算注意力但不计算文本词元对查询的注意力,同时文本内部的自注意力使用因果掩码且需计算所有查询对文本的注意力。 * 图文匹配损失 (image-text matching loss): 查询和文本可以看到彼此,最终获得一个几率 (logit) 用以表示文字与图像是否匹配。这里,使用难例挖掘技术 (hard negative mining) 来生成负样本。 图像 transformer 作为一个信息瓶颈 (information bottleneck),查询嵌入经过它后,其输出嵌入已经不仅仅包含了视觉信息,而且包含了与文本相关的视觉信息。这些输出嵌入用作第二阶段 LLM 输入的视觉前缀。该预训练阶段主要涉及一个以基于图像的文本生成任务,损失函数使用因果 LM 损失。 BLIP-2 使用 ViT 作为视觉编码器。而对于 LLM,论文作者使用 OPT 和 Flan T5 模型。你可以找到在 [Hugging Face Hub](https://huggingface.co/models?other=blip-2) 上找到 OPT 和 Flan T5 的预训练 checkpoints。但不要忘记,如前所述,BLIP-2 设计的预训练方法允许任意的视觉主干模型和 LLM 的组合。 ## 通过 Hugging Face Transformers 使用 BLIP-2 使用 Hugging Face Transformers,你可以轻松下载并在你自己的图像上运行预训练的 BLIP-2 模型。如果你想跑跑本文中的示例,请确保使用大显存 GPU。 我们从安装 Transformers 开始。由于此模型是最近才添加到 Transformers 中的,因此我们需要从源代码安装 Transformers: ```bash pip install git+https://github.com/huggingface/transformers.git ``` 接下来,我们需要一个输入图像。《纽约客》每周都会面向其读者举办一场 [卡通字幕比赛](https://www.newyorker.com/cartoons/contest#thisweek)。我们从中取一张卡通图像输入给 BLIP-2 用于测试。 ``` import requests from PIL import Image url = 'https://media.newyorker.com/cartoons/63dc6847be24a6a76d90eb99/master/w_1160,c_limit/230213_a26611_838.jpg' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') display(image.resize((596, 437))) ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/blip-2/cartoon.jpeg" alt="New Yorker Cartoon" width=500> </p> 现在我们有一张输入图像了,还需要一个预训练过的 BLIP-2 模型和相应的预处理器来处理输入。你 可以在 [Hugging Face Hub](https://huggingface.co/models?other=blip-2) 上找到所有可用的预训练 checkpoints 列表。这里,我们将加载一个使用 Meta AI 的预训练 OPT 模型的 BLIP-2 checkpoint,该 OPT 模型具有 27 亿个参数。 ``` from transformers import AutoProcessor, Blip2ForConditionalGeneration import torch processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) ``` 请注意,你暂时还无法使用 Auto API (例如 AutoModelForXXX) 来加载 BLIP-2 模型,这种情况在 Hugging Face 中比较少见。你需要显式使用 `Blip2ForConditionalGeneration` 来加载 BLIP-2 模型。虽然自动获取模型还不能做到,但是你可以使用 `AutoProcessor` 来获取匹配的处理器类,在本例中为 `Blip2Processor`。 我们可以使用 GPU 来加快文本生成速度: ``` device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) ``` ### 图像字幕生成 我们先看看 BLIP-2 是否可以零样本地为《纽约客》卡通图像生成字幕。要为图像添加字幕,我们不必向模型提供任何文本提示,仅提供预处理过的输入图像。没有任何文字提示,模型将从 BOS (beginning-of-sequence) 开始生成图像字幕。 ``` inputs = processor(image, return_tensors="pt").to(device, torch.float16) generated_ids = model.generate(**inputs, max_new_tokens=20) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print(generated_text) ``` ``` "two cartoon monsters sitting around a campfire" ``` 对于未使用《纽约客》风格的卡通图像训练过的模型,这是一个令人印象深刻的准确描述! ### 有提示图片字幕生成 我们还可以通过提供文本提示来扩展图像字幕生成,模型将在给定图像的情况下接着提示词往下补充。 ``` prompt = "this is a cartoon of" inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) generated_ids = model.generate(**inputs, max_new_tokens=20) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print(generated_text) ``` ``` "two monsters sitting around a campfire" ``` ``` prompt = "they look like they are" inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) generated_ids = model.generate(**inputs, max_new_tokens=20) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print(generated_text) ``` ``` "having a good time" ``` ### 视觉问答 用于视觉问答时,提示必须遵循特定格式: "Question: {} Answer:" ``` prompt = "Question: What is a dinosaur holding? Answer:" inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) generated_ids = model.generate(**inputs, max_new_tokens=10) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print(generated_text) ``` ``` "A torch" ``` ### 基于聊天的提示 最后,我们可以通过拼接对话中每轮的问题和回答来创建类似 ChatGPT 的体验。我们用某个提示 (比如 “恐龙拿着什么?”) 来问模型,模型会为它生成一个答案 (如 “火炬”),我们可以把这一问一答拼接到对话中。然后我们再来一轮,这样就把上下文 (context) 建立起来了。 但是,需要确保的是,上下文不能超过 512 个标记,因为这是 BLIP-2 使用的语言模型 (OPT 和 T5) 的上下文长度。 ``` context = [ ("What is a dinosaur holding?", "a torch"), ("Where are they?", "In the woods.") ] question = "What for?" template = "Question: {} Answer: {}." prompt = " ".join([template.format(context[i][0], context[i][1]) for i in range(len(context))]) + " Question: " + question + " Answer:" print(prompt) ``` ``` Question: What is a dinosaur holding? Answer: a torch. Question: Where are they? Answer: In the woods.. Question: What for? Answer: ``` ``` inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) generated_ids = model.generate(**inputs, max_new_tokens=10) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print(generated_text) ``` ``` To light a fire. ``` ## 结论 BLIP-2 是一种零样本视觉语言模型,可用于各种含图像和文本提示的图像到文本任务。这是一种效果好且效率高的方法,可应用于多种场景下的图像理解,特别是当训练样本稀缺时。 该模型通过在预训练模型之间添加 transformer 来弥合视觉和自然语言模态之间的隔阂。这一新的预训练范式使它能够充分享受两种模态的各自的进展的红利。 如果您想了解如何针对各种视觉语言任务微调 BLIP-2 模型,请查看 [Salesforce 提供的 LAVIS 库](https://github.com/salesforce/LAVIS),它为模型训练提供全面支持。 要查看 BLIP-2 的运行情况,可以在 [Hugging Face Spaces](https://huggingface.co/spaces/Salesforce/BLIP2) 上试用其演示。 ## 致谢 非常感谢 Salesforce 研究团队在 BLIP-2 上的工作,感谢 Niels Rogge 将 BLIP-2 添加到 🤗 Transformers,感谢 Omar Sanseviero 审阅这篇文章。
1
0
hf_public_repos/blog
hf_public_repos/blog/zh/ml-for-games-1.md
--- title: "基于AI进行游戏开发:5天!创建一个农场游戏!第1部分" thumbnail: /blog/assets/124_ml-for-games/thumbnail.png authors: - user: dylanebert translators: - user: SuSung-boy - user: inferjay proofreader: true --- # 基于AI进行游戏开发:5天!创建一个农场游戏!第1部分 **欢迎使用 AI 进行游戏开发!** 在本系列中,我们将使用各种 AI 工具,在 5 天内创建一个功能完备的农场游戏。到本系列结束时,你将了解到如何将多种 AI 工具整合到游戏开发流程中。本系列文章将向你展示如何将 AI 工具用于: 1. 美术风格 2. 游戏设计 3. 3D 素材 4. 2D 素材 5. 剧情 想要观看视频快速了解?请 [点击这里](https://www.tiktok.com/@individualkex/video/7184106492180630827) 快速了解本文内容,如果你需要掌握更多技术细节,请继续阅读! > 注意:本教程面向熟悉 Unity 开发和 C# 语言的读者。如果你不熟悉这些技术,我们建议你先了解一下 Unity 然后再继续阅读,本文的作者制作了一系列 [Unity 初学者视频](https://www.tiktok.com/@individualkex/video/7086863567412038954?is_from_webapp=1&sender_device=pc&web_id=7043883634428052997),如果你希望我们发布到 Hugging Face 账号,请在评论区留言告诉我们。 第 1 天:确立美术风格 ============ 游戏开发流程第一步是 **确立美术风格**。对于要创建的农场游戏,本文将使用 Stable Diffusion 工具来帮助其美术风格的确立。Stable Diffusion 是一种基于文本描述生成图像的开源模型。接下来会介绍如何使用该工具为农场游戏创建视觉美术风格。 Stable Diffusion 基本设置 --------------------- 运行 Stable Diffusion 有两种方案可选:**本地或在线**。如果你拥有一台配备良好 GPU 的台式机并想使用全功能工具库,那么更建议 [本地方案](#locally)。除此之外,你还可以尝试 [在线方案](#online),请继续阅读本文详细查看本地方案和线上方案: 本地方案 <a name="locally"></a> ---- 本文将使用 [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 在本地运行 Stable Diffusion。这是比较流行的本地运行 Stable Diffusion 的方案,不过要成功将其设置运行起来,还需要一些技术知识。如果你使用 Windows 且具有 8GB 以上内存的 Nvidia GPU,请按以下指示执行。否则,请在 [代码仓库](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 的 README 文件中查看其他平台的运行说明,更或者可以选择 [在线方案](#online)。 ### 在 Windows 上安装 **要求:** 具有 8 GB 以上内存的 Nvidia GPU。 1. 安装 [Python 3.10.6](https://www.python.org/downloads/windows/),安装时勾选 "Add Python to PATH" 2. 安装 [git](https://git-scm.com/download/win) 3. 在命令提示符中输入以下内容来克隆所需仓库 ``` git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git ``` 4. 下载 [Stable Diffusion v1.5 checkpoint](https://huggingface.co/runwayml/stable-diffusion-v1-5),并将其移动到仓库的 `models` 目录下 5. 运行 `webui-user.bat` 来启动 WebUI 6. 浏览器中访问 `localhost://7860`。如果一切正常,你将看到如下内容: <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/webui.png" alt="Stable Diffusion WebUI"> </figure> ### 在线方案 <a name="online"></a> 如果你不具备本地运行 Stable Diffusion 的条件,或者偏好简易的解决方案,同样有多种在线运行方案供你选择。 🤗 Hugging Face 提供的 [Space](https://huggingface.co/spaces) 应用中包含众多免费在线方案,例如 [Stable Diffusion 2.1 Demo](https://huggingface.co/spaces/stabilityai/stable-diffusion) 或 [camemduru webui](https://huggingface.co/spaces/camenduru/webui)。你可以 [查看更多在线服务](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services),甚至可以使用 🤗 [Diffusers](https://huggingface.co/docs/diffusers/index) 编写你专属的免费运行方案!你也可以查看简单的 [代码示例](https://colab.research.google.com/drive/1HebngGyjKj7nLdXfj6Qi0N1nh7WvD74z) 以快速上手。 **注意:** 本系列的部分内容将使用 image2image 等高级功能,有些在线服务未提供这些功能。 ### 生成概念艺术图片 <a name="generating"></a> 首先让我们生成一些概念图。只需几步,非常简单: 1. 输入提示语 2. 点击生成 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/sd-demo.png" alt="Stable Diffusion Demo Space"> </figure> 但问题是,生成的图片是你真正想要的结果吗?如果不是,如何才能获得呢?这里要提醒你一下,输入提示语,本身就需要一些技巧。所以如果你生成的第一张图片非你所想也没关系,网络上有非常多神级资源可以帮助改善提示语。你可以查看 Reddit 上的帖子了解 [书写指南](https://www.reddit.com/r/StableDiffusion/comments/x41n87/how_to_get_images_that_dont_suck_a/),也可以 [点击这里](https://youtube.com/shorts/8PGucf999nI?feature=share) 查看我做的视频,带你 20 秒了解图片生成提示语的技巧! 上述书写技巧的共通之处是使用诸如 [lexica.art](https://lexica.art/) 网站之类的图片库来查看其他创作者使用提示语在 Stable Diffusion 生成的内容范式,从中寻找与你期望风格相似的图片,从而获得书写提示语的灵感。实际上没有所谓的标准答案,不过在你使用 Stable Diffusion 1.5 生成概念艺术图片时,建议遵循以下温馨提示: * 使用描述词: 描述词会限制生成图片的形式,如 isometric, simple, solid shapes 等。这样生成图片的美术风格在游戏中会更容易重现。 * 使用同义关键词: 一些关键词 (如 low poly) 虽然契合主题,但生成的图片质量通常较低。尝试找到它们的同义词,替换以保证生成质量。 * 使用指定艺术家的名字: 这种方式可以有效地引导模型采用指定艺术家的绘画风格,从而生成更高质量的图片。 我输入这样的提示语: `isometric render of a farm by a river, simple, solid shapes, james gilleard, atey ghailan` 生成图片如下: <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/concept.png" alt="Stable Diffusion Concept Art"> </figure> ### 使用 Unity 重现概念艺术 接下来,如何使用生成的概念艺术图片来制作游戏?本文将使用流行游戏引擎 [Unity](https://unity.com/) 来使游戏鲜活起来。 1. 使用带有 [通用渲染管道](https://docs.unity3d.com/Packages/[email protected]/manual/index.html) 的 [Unity 2021.9.3f1](https://unity.com/releases/editor/whats-new/2021.3.9) 创建一个 Unity 项目。 2. 使用基本形状绘制场景草图。例如,要添加一个立方体形状,**右键单击 -> 3D对象 (3D Object) -> 立方体 (Cube)** <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/gray.png" alt="Gray Scene"> </figure> 3. 设置 [材质](https://docs.unity3d.com/Manual/Materials.html): 可以参考前面生成的概念艺术图片对各部分进行设置。这里选用 Unity 内置的基本材质 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/color.png" alt="Scene with Materials"> </figure> 4. 设置 [光照](https://docs.unity3d.com/Manual/Lighting.html): 这里使用暖调自然光 (`#FFE08C`,强度 `1.25`) 和柔和环境光 (`#B3AF91`) <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/lighting.png" alt="Scene with Lighting"> </figure> 5. 设置 [摄像机](https://docs.unity3d.com/ScriptReference/Camera.html): 这里使用 **正交投影** 来匹配概念艺术图片的投影形式 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/camera.png" alt="Scene with Camera"> </figure> 6. 设置 [水着色器](https://assetstore.unity.com/packages/vfx/shaders/stylized-water-shader-71207): 可以给游戏场景增加一些水流,这里使用 Unity 资源商店中的程式化水着色器 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/water.png" alt="Scene with Water"> </figure> 7. 最后,设置后 [处理效果](https://docs.unity3d.com/Packages/[email protected]/manual/integration-with-post-processing.html): 这里使用 ACES 色调映射和 +0.2 曝光 <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/post-processing.png" alt="Final Result"> </figure> 至此,一个简单上手而引人入胜的游戏场景,不到一天就创建完成了!如果你有任何问题,或者想跃跃欲试参与后续内容?现在来 [加入我们的 Discord 频道](https://hf.co/join/discord) 与我们交流吧! 在 [下一篇文章](https://huggingface.co/blog/zh/ml-for-games-2) 中,我们将 **使用 AI 进行游戏设计**,敬请关注我们获得后续更新!
2
0
hf_public_repos/blog
hf_public_repos/blog/zh/password-git-deprecation.md
--- title: Hub 上的 Git 操作不再支持使用密码验证 thumbnail: /blog/assets/password-git-deprecation/thumbnail.png authors: - user: Sylvestre - user: pierric - user: sbrandeis translators: - user: chenglu --- # Hugging Face Hub: Git 操作认证的重要变更 在 Hugging Face,我们一直致力于提升服务安全性,因此,我们将对通过 Git 与 Hugging Face Hub 交互时的认证方式进行更改。从 **2023 年 10 月 1 日** 开始,我们将不再接受密码作为命令行 Git 操作的认证方式。我们推荐使用更安全的认证方法,例如用个人访问令牌替换密码或使用 SSH 密钥。 ## 背景 近几个月来,我们已经实施了各种安全增强功能,包括登录提醒和 Git 中对 SSH 密钥的支持,不过,用户仍然可以使用用户名和密码进行 Git 操作的认证。 为了进一步提高安全性,我们现在转向基于令牌或 SSH 密钥的认证。与传统的密码认证相比,基于令牌和 SSH 密钥的认证有多个优点,包括唯一性、可撤销和随机特性,这些都增强了安全性和控制。 ## 立即需采取的行动 如果你当前使用 HF 账户密码进行 Git 认证,请在 **2023 年 10 月 1 日** 之前切换到使用个人访问令牌或 SSH 密钥。 ### 切换到个人访问令牌 你需要为你的账户生成一个访问令牌;你可以按照 [这个文档](https://huggingface.co/docs/hub/security-tokens#user-access-tokens) 中提到的方法来生成一个访问令牌。 生成访问令牌后,你可以使用以下命令更新你的 Git 仓库: ``` $: git remote set-url origin https://<user_name>:<token>@huggingface.co/<user_name>/<repo_name> $: git pull origin ``` 或者,如果你克隆了一个新的仓库,当你的 Git 凭证管理器要求你提供认证凭证时,你可以直接输入令牌来替代密码。 ### 切换到 SSH 密钥 按照我们的 [指南文档](https://huggingface.co/docs/hub/security-git-ssh) 生成 SSH 密钥并将其添加到你的账户。 然后,你可以使用以下命令更新你的 Git 仓库: ``` $: git remote set-url origin [email protected]:<user_name>/<repo_name> ``` ## 时间表 在接下来的时间里,这个变动将以下面的时间表来执行: - 现在开始起:依赖密码进行 Git 认证的用户可能会收到电子邮件,敦促他们更新认证方法。 - 10 月 1 日:个人访问令牌或 SSH 密钥将成为所有 Git 操作的强制要求。 如需更多详情,可以通过 [[email protected]](mailto:[email protected]) 联系支持团队,以解决你的疑问或顾虑。
3
0
hf_public_repos/blog
hf_public_repos/blog/zh/_events.yml
# "date" attribute should be end date of an event in `Mon DD, YYYY` format (e.g. Feb 3, 2014) # "date_formatted" attribute is optional if you want to control the format/look of "date" on hf.co # (e.g. specifying event date range like Jan 24 to Feb 7, 2022) - name: Hugging Face AI 头像变装秀 link: https://hf.link/tx date: July 5, 2023 date_formatted: July 5 to July 31, 2023 description: 参与我们在官方小红书账号上发起的 AI 头像换装秀活动。 - name: Hugging Face JAX/Diffusers 社区冲刺活动北京见面会 link: https://services.google.cn/fb/forms/huggingfaceevent/?channel=hfblog date: Apr 20, 2023 date_formatted: Apr 18 to Apr 20, 2023 description: 参与我们在谷歌北京办公室举办的参赛者线下见面会。 - name: 造个 🤖️ 去瀛海威广场聚会啦! link: https://mp.weixin.qq.com/s/M5vjicNG1uBdCQzQtQU9yw date: Mar 19, 2023 date_formatted: Mar 17 to Mar 19, 2023 description: 百姓 AI 和 Hugging Face 联合推出的机器人黑客松活动。 - name: 飞桨黑客马拉松第四期:AIGC 趣味创意赛 link: https://aistudio.baidu.com/aistudio/competition/detail/860/0/introduction date: May 20, 2023 date_formatted: Feb 28 to May 20, 2023 description: 与飞浆联合推出的,使用扩散模型并结合 LoRA、DreamBooth 等技术进行模型微调的黑客松活动。 - name: Hugging Face DreamBooth 微调黑客松 link: https://www.heywhale.com/home/competition/63bbfb98de6c0e9cdb0d9dd5 date: Feb 10, 2023 date_formatted: Jan 9 to Feb 10, 2023 description: 与和鲸社区共同推出,助力中国社区成员参与全球的 DreamBooth 微调黑客松活动。 - name: Hugging Face 社区投稿 link: https://bit.ly/hf-tougao date: Feb 9, 2023 description: 我们正在努力丰富中文社区的内容生态,希望邀请社区的成员们共同添砖加瓦,共同推进让 AI 惠及大众。 - name: 抱抱脸本地化小组志愿者招募 link: https://bit.ly/baobaolian-new date: Feb 9, 2023 description: 参与翻译 Hugging Face 最新课程、视频、博客文章等内容。 - name: 直播 (回放):“生产环境中的小样本学习” 研讨会 link: https://mp.weixin.qq.com/s/uJW63vyDq3igP8Rw_3EEwA date: Dec 14, 2022 description: 由 Hugging Face 和 Intel AI 的研究员共同出席,带领大家训练和部署一个小样本的语言模型。 - name: 直播 (回放):ChatGPT 背后的“功臣” —— RLHF 介绍 link: https://mp.weixin.qq.com/s/iaKOp1F-nJlgSbRF2h0r_g date: Dec 13, 2022 description: RLHF,从人类反馈中强化学习,它是 ChatGPT 背后的“功臣”。 - name: 直播 (回放):Whisper 微调冲刺直播 link: https://mp.weixin.qq.com/s/7YQiUacdH7SwLGAWnSwDzw date: Dec 6, 2022 description: 邀请 OpenAI 和 MetaAI 团队成员介绍 Whisper 模型和 VoxPopuli 数据集的最新研究。 - name: 直播 (回放):扩散模型分享 link: https://space.bilibili.com/1740664937/channel/seriesdetail?sid=2839427&ctype=0 date: Dec 1, 2022 description: 为了支持扩散模型课程的发布,我们邀请了重磅嘉宾做了一次围绕扩散模型的分享。 - name: Hugging Face ❤️ arXiv 社区冲刺活动 link: https://mp.weixin.qq.com/s/twygNE7QpZEGjr-baRskPQ date: Nov 25, 2022 description: 将一些 arXiv 论文中提及的开源模型制作成 Hugging Face Space 应用来进行展示,提升易用性。
4
0
hf_public_repos/blog
hf_public_repos/blog/zh/codellama.md
--- title: "Code Llama:Llama 2 学会写代码了!" thumbnail: /blog/assets/160_codellama/thumbnail.jpg authors: - user: philschmid - user: osanseviero - user: pcuenq - user: lewtun - user: lvwerra - user: loubnabnl - user: ArthurZ - user: joaogante translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # Code Llama: Llama 2 学会写代码了! ## 引言 Code Llama 是为代码类任务而生的一组最先进的、开放的 [Llama 2](https://huggingface.co/blog/zh/llama2) 模型,我们很高兴能将其集成入 Hugging Face 生态系统!Code Llama 使用与 Llama 2 相同的社区许可证,且可商用。 今天,我们很高兴能发布 Hugging Face 对 Code Llama 的全面支持 , 包括: - Hub 上的模型支持,包括模型卡及许可证 - Transformers 已集成 Code Llama - TGI 已集成 Code Llama,以支持对其进行快速高效的产品级推理 - 推理终端 (Inference Endpoints) 已集成 Code Llama - 对 Code Llama 的代码基准测试结果已发布 代码大语言模型的发展对于软件工程师来说无疑是振奋人心的,因为这意味着他们可以通过 IDE 中的代码补全功能来提高生产力,并利用其来处理重复或烦人的任务,例如为代码编写文档字符串或创建单元测试。 ## 目录 - [引言](#引言) - [目录](#目录) - [Code Llama 简介](#code-llama-简介) - [如何使用 Code Llama?](#如何使用-code-llama) - [演示](#演示) - [Transformers](#transformers) - [代码补全](#代码补全) - [代码填充](#代码填充) - [对话式指令](#对话式指令) - [4 比特加载](#4-比特加载) - [使用 TGI 和推理终端](#使用-tgi-和推理终端) - [评估](#评估) - [其他资源](#其他资源) ## Code Llama 简介 Code Llama 包含 3 个不同参数量的版本,分别为: 70 亿参数版、130 亿参数版 以及 340 亿参数版。在训练基础模型时,先用同等参数量的 Llama 2 模型初始化权重,然后在 5000 亿词元的代码数据集上训练。 Meta 还对训得的基础模型进行了两种不同风格的微调,分别为: Python 专家版 (再加 1000 亿个额外词元) ; 以及指令微调版,其可以理解自然语言指令。 这些模型在 Python、C++、Java、PHP、C#、TypeScript 和 Bash 中都展现出最先进的性能。7B 和 13B 基础版和指令版支持完形填空,因此非常适合用作代码助手。 Code Llama 基于 16k 上下文窗口训练。此外,这三个尺寸的模型还进行了额外的长上下文微调,使其上下文窗口最多可扩展至 10 万词元。 受益于 RoPE 扩展方面的最新进展,将 Llama 2 的 4k 上下文窗口增加到 Code Llama 的 16k (甚至可以外插至 100k) 成为可能。社区发现可以对 Llama 的位置嵌入进行线性插值或频域插值,这使得通过微调让基础模型轻松扩展到更大的上下文窗口成为可能。在 Code Llama 中,他们把频域缩放和松弛技术二者结合起来: 微调长度是缩放后的预训练长度的一小部分。这个做法赋予了模型强大的外推能力。 ![训练过程](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/160_codellama/training-process.jpg 训练过程 ) 第一步是在 5000 亿词元的公开代码数据集上训练出一个模型。该数据集中除了有代码数据集外,还包含一些自然语言数据集,例如有关代码和代码片段的讨论,且最终数据集是使用近似去重法去过重的。不幸的是,Meta 没有披露有关该数据集的更多信息。 在对模型进行指令微调时,使用了两个数据集: 为 Llama 2 Chat 收集的指令微调数据集和自指令数据集。自指令数据集收集了 Llama 2 编制出的编程面试问题,然后使用 Code Llama 生成单元测试和解答,最后通过执行测试来评估解答。 ## 如何使用 Code Llama? `Transformers` 从 4.33 版开始支持 Code Llama。在此之前,需要从主分支进行源代码安装才行。 ### 演示 我们准备了 **[这个 Space](https://huggingface.co/spaces/codellama/codellama-playground)** 或下面的 Playground 以供大家尝试 Code Llama 模型 (130 亿参数!): <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.28.3/gradio.js"> </script> <gradio-app theme_mode="light" space="codellama/codellama-playground"></gradio-app> 这个演示背后使用了 Hugging Face [TGI](https://github.com/huggingface/text-generation-inference),[HuggingChat](https://huggingface.co/chat) 也用了相同的技术,具体内容见下文。 你还可以玩玩 [这个聊天机器人](https://huggingface.co/spaces/codellama/codellama-13b-chat),或者复制一份到自己的账号下以供你使用 – 它是自含的,因此你可以随心所欲地修改代码! ### Transformers 从最新发布的 `transformers` 4.33 开始,你可以在 Code Llama 上应用 HF 生态系统中的所有工具,例如: - 训练和推理脚本和示例 - 安全的文件格式 (`safetensors` ) - 与 `bitsandbytes` (4 比特量化) 和 PEFT 等工具结合使用 - 运行模型生成所需的工具及辅助代码 - 导出模型以进行部署的机制 在 `transformers` 4.33 发布之前,用户需要从主分支源码安装 `transformers` 。 ```bash !pip install git+https://github.com/huggingface/transformers.git@main accelerate ``` #### 代码补全 我们可以使用 7B 和 13B 模型进行文本/代码补全或填充。下述代码演示了如何使用 `pipeline` 接口来进行文本补全。运行时,只需选择 GPU 即可在 Colab 的免费 GPU 上运行。 ```python from transformers import AutoTokenizer import transformers import torch tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-hf") pipeline = transformers.pipeline( "text-generation", model="codellama/CodeLlama-7b-hf", torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( 'def fibonacci(', do_sample=True, temperature=0.2, top_p=0.9, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=100, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 其输出如下: ```python Result: def fibonacci(n): if n == 0: return 0 elif n == 1: return 1 else: return fibonacci(n-1) + fibonacci(n-2) def fibonacci_memo(n, memo={}): if n == 0: return 0 elif n == 1: return ``` Code Llama 虽然专精于代码理解,但其仍是一个语言模型。你仍然可以使用相同的生成策略来自动完成注释或自然语言文本。 #### 代码填充 这是代码模型才能完成的专门任务。该模型经过训练后,可以生成与给定上下文最匹配的代码 (包括注释)。这是代码助理的典型使用场景: 要求它们根据上下文填充当前光标处的代码。 此任务需要使用 7B 和 13B 的 **基础** 或 **指令** 模型。任何 34B 或 Python 版模型不能用于此任务。 填充类任务需要在生成时使用与训练时相同格式的输入文本,因为训练时会使用特殊的分隔符来区分提示的不同部分。幸运的是, `transformers` 的 `CodeLlamaTokenizer` 已经帮你把这事做了,如下所示: ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "codellama/CodeLlama-7b-hf" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16 ).to("cuda") prompt = '''def remove_non_ascii(s: str) -> str: """ <FILL_ME> return result ''' input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda") output = model.generate( input_ids, max_new_tokens=200, ) output = output[0].to("cpu") filling = tokenizer.decode(output[input_ids.shape[1]:], skip_special_tokens=True) print(prompt.replace("<FILL_ME>", filling)) ``` 输出如下: ```Python def remove_non_ascii(s: str) -> str: """ Remove non-ASCII characters from a string. Args: s: The string to remove non-ASCII characters from. Returns: The string with non-ASCII characters removed. """ result = "" for c in s: if ord(c) < 128: result += c return result ``` 在底层,分词器会 [自动按 `<fill_me>` 分割](https://huggingface.co/docs/transformers/main/model_doc/code_llama#transformers.CodeLlamaTokenizer.fill_token) 并生成一个格式化的输入字符串,其格式与 [训练时的格式](https://github.com/facebookresearch/codellama/blob/cb51c14ec761370ba2e2bc351374a79265d0465e/llama/generation.py#L402) 相同。这样做既避免了用户自己格式化的很多麻烦,也避免了一些很难调试的陷阱,例如词元粘合 (token glueing)。 #### 对话式指令 如上所述,基础模型可用于补全和填充。Code Llama 还包含一个适用于对话场景的指令微调模型。 为此类任务准备输入时,我们需要一个提示模板。一个例子是我们在 [Llama 2 博文](https://huggingface.co/blog/zh/llama2#如何提示-Llama-2) 中描述的模板,如下: ``` <s>[INST] <<SYS>> {{ system_prompt }} <</SYS>> {{ user_msg_1 }} [/INST]{{ model_answer_1 }} </s><s>[INST]{{ user_msg_2 }} [/INST] ``` 请注意,系统提示 ( `system prompt` ) 是可选的 - 没有它模型也能工作,但你可以用它来进一步指定模型的行为或风格。例如,如果你希望获得 JavaScript 的答案,即可在此声明。在系统提示之后,你需要提供对话交互历史: 用户问了什么以及模型回答了什么。与填充场景一样,你需要注意分隔符的使用。输入的最后必须是新的用户指令,这对模型而言是让其提供答案的信号。 以下代码片段演示了如何在实际工作中使用该模板。 1. **首次用户输入,无系统提示** ```python user = 'In Bash, how do I list all text files in the current directory (excluding subdirectories) that have been modified in the last month?' prompt = f"<s>[INST]{user.strip()} [/INST]" inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") ``` 1. **首次用户查询,有系统提示** ```python system = "Provide answers in JavaScript" user = "Write a function that computes the set of sums of all contiguous sublists of a given list." prompt = f"<s><<SYS>>\\n{system}\\n<</SYS>>\\n\\n{user}" inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") ``` 1. **含对话历史的多轮对话** 该过程与 [Llama 2](https://huggingface.co/blog/zh/llama2#如何提示-Llama-2) 中的过程相同。为了最清楚起见,我们没有使用循环或泛化此示例代码: ```python system = "System prompt" user_1 = "user_prompt_1" answer_1 = "answer_1" user_2 = "user_prompt_2" answer_2 = "answer_2" user_3 = "user_prompt_3" prompt = f"<<SYS>>\\n{system}\\n<</SYS>>\\n\\n{user_1}" prompt = f"<s>[INST]{prompt.strip()} [/INST]{answer_1.strip()} </s>" prompt += f"<s>[INST]{user_2.strip()} [/INST]{answer_2.strip()} </s>" prompt += f"<s>[INST]{user_3.strip()} [/INST]" inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") ``` #### 4 比特加载 将 Code Llama 集成到 Transformers 中意味着我们可以立即获得 4 比特加载等高级功能的支持。这使得用户可以在英伟达 3090 卡等消费类 GPU 上运行大型的 32B 参数量模型! 以下是在 4 比特模式下运行推理的方法: ```Python from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig import torch model_id = "codellama/CodeLlama-34b-hf" quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=quantization_config, device_map="auto", ) prompt = 'def remove_non_ascii(s: str) -> str:\n """ ' inputs = tokenizer(prompt, return_tensors="pt").to("cuda") output = model.generate( inputs["input_ids"], max_new_tokens=200, do_sample=True, top_p=0.9, temperature=0.1, ) output = output[0].to("cpu") print(tokenizer.decode(output)) ``` ### 使用 TGI 和推理终端 [TGI](https://github.com/huggingface/text-generation-inference) 是 Hugging Face 开发的生产级推理容器,可用于轻松部署大语言模型。它包含连续批处理、流式输出、基于张量并行的多 GPU 快速推理以及生产级的日志记录和跟踪等功能。 你可以在自己的基础设施上使用 TGI,也可以使用 Hugging Face 的 [推理终端](https://huggingface.co/inference-endpoints)。要部署 Codellama 2 模型,请登陆其 [模型页面](https://huggingface.co/codellama),然后单击 [Deploy -> Inference Endpoints](https://huggingface.co/codellama/CodeLlama-7b-hf) 按钮。 - 推理 7B 模型,我们建议选择“GPU [medium] - 1x Nvidia A10G”。 - 推理 13B 模型,我们建议选择“GPU [xlarge] - 1x Nvidia A100”。 - 推理 34B 模型,我们建议启用 `bitsandbytes` 量化并选择“GPU [1xlarge] - 1x Nvidia A100”或“GPU [2xlarge] - 2x Nvidia A100” _注意: 你可能需要发邮件给 **[[email protected]](mailto:[email protected])** 申请配额升级才能访问 A100_ 你可以在我们的博文中详细了解如何 [使用 Hugging Face 推理终端部署 LLM](https://huggingface.co/blog/zh/inference-endpoints-llm),该 [博文](https://huggingface.co/blog/zh/inference-endpoints-llm) 还包含了有关其支持的超参以及如何使用 Python 和 Javascript API 流式生成文本的相关知识。 ## 评估 代码语言模型通常在 HumanEval 等数据集上进行基准测试,其包含了一系列编程题,我们将函数签名和文档字符串输入给模型,模型需要完成函数体代码的编写。接着是运行一组预定义的单元测试来验证所提出的解答。最后是报告通过率,即有多少解答通过了所有测试。pass@1 度量了模型一次生成即通过的频率,而 pass@10 描述了模型生成 10 个候选解答其中至少有一个解答通过的频率。 虽然 HumanEval 是一个 Python 基准测试,但社区付出了巨大努力将其转成更多编程语言,从而实现更全面的评估。其中一种方法是 [MultiPL-E](https://github.com/nuprl/MultiPL-E),它将 HumanEval 翻译成十多种编程语言。我们正在基于其制作一个 [多语言代码排行榜](https://huggingface.co/spaces/bigcode/multilingual-code-evals),这样社区就可以用它来比较不同模型在各种编程语言上的表现,以评估哪个模型最适合他们的需求。 | 模型 | 许可证 | 训练数据集是否已知 | 是否可商用 | 预训练词元数 | Python | JavaScript | Leaderboard Avg Score | | ---------------------- | ------------------ | ------------- | --------------- | --------------------------- | ------ | ---------- | --------------------- | | CodeLlaMa-34B | Llama 2 license | ❌ | ✅ | 2,500B | 45.11 | 41.66 | 33.89 | | CodeLlaMa-13B | Llama 2 license | ❌ | ✅ | 2,500B | 35.07 | 38.26 | 28.35 | | CodeLlaMa-7B | Llama 2 license | ❌ | ✅ | 2,500B | 29.98 | 31.8 | 24.36 | | CodeLlaMa-34B-Python | Llama 2 license | ❌ | ✅ | 2,620B | 53.29 | 44.72 | 33.87 | | CodeLlaMa-13B-Python | Llama 2 license | ❌ | ✅ | 2,620B | 42.89 | 40.66 | 28.67 | | CodeLlaMa-7B-Python | Llama 2 license | ❌ | ✅ | 2,620B | 40.48 | 36.34 | 23.5 | | CodeLlaMa-34B-Instruct | Llama 2 license | ❌ | ✅ | 2,620B | 50.79 | 45.85 | 35.09 | | CodeLlaMa-13B-Instruct | Llama 2 license | ❌ | ✅ | 2,620B | 50.6 | 40.91 | 31.29 | | CodeLlaMa-7B-Instruct | Llama 2 license | ❌ | ✅ | 2,620B | 45.65 | 33.11 | 26.45 | | StarCoder-15B | BigCode-OpenRail-M | ✅ | ✅ | 1,035B | 33.57 | 30.79 | 22.74 | | StarCoderBase-15B | BigCode-OpenRail-M | ✅ | ✅ | 1,000B | 30.35 | 31.7 | 22.4 | | WizardCoder-15B | BigCode-OpenRail-M | ❌ | ✅ | 1,035B | 58.12 | 41.91 | 32.07 | | OctoCoder-15B | BigCode-OpenRail-M | ✅ | ✅ | 1,000B | 45.3 | 32.8 | 24.01 | | CodeGeeX-2-6B | CodeGeeX License | ❌ | ❌ | 2,000B | 33.49 | 29.9 | 21.23 | | CodeGen-2.5-7B-Mono | Apache-2.0 | ✅ | ✅ | 1400B | 45.65 | 23.22 | 12.1 | | CodeGen-2.5-7B-Multi | Apache-2.0 | ✅ | ✅ | 1400B | 28.7 | 26.27 | 20.04 | **注意:** 上表中的分数来自我们的代码排行榜,所有模型均使用相同的设置。欲了解更多详情,请参阅 [排行榜](https://huggingface.co/spaces/bigcode/multilingual-code-evals)。 ## 其他资源 - [Hub 上的模型](https://huggingface.co/codellama) - [论文](https://huggingface.co/papers/2308.12950) - [Meta 官宣博文](https://ai.meta.com/blog/code-llama-large-language-model-coding/) - [负责任使用指南](https://ai.meta.com/llama/responsible-use-guide/) - [演示 (代码补全,流式生成)](https://huggingface.co/spaces/codellama/codellama-playground) - [演示 (指令微调、自含、可复制到自己的空间并修改)](https://huggingface.co/spaces/codellama/codellama-13b-chat)
5
0
hf_public_repos/blog
hf_public_repos/blog/zh/autoformer.md
--- title: "Transformer 模型能够有效地进行时间序列预测 (使用 Autoformer)" thumbnail: /blog/assets/148_autoformer/thumbnail.png authors: - user: elisim guest: true - user: kashif - user: nielsr translators: - user: hugging-hoi2022 - user: zhongdongy proofreader: true --- # Transformer 模型能够有效地进行时间序列预测 (使用 Autoformer) <script async defer src="https://unpkg.com/medium-zoom-element@0/dist/medium-zoom-element.min.js"></script> <a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/autoformer-transformers-are-effective.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> ## 简介 几个月前,我们介绍了 [Informer](https://huggingface.co/blog/informer) 这个模型,相关论文 ([Zhou, Haoyi, et al., 2021](https://arxiv.org/abs/2012.07436)) 是一篇获得了 AAAI 2021 最佳论文奖的时间序列论文。我们也展示了一个使用 Informer 进行多变量概率预测的例子。在本文中,我们讨论以下问题: [Transformer 模型对时间序列预测真的有效吗?](https://arxiv.org/abs/2012.07436)。我们给出的答案是,它们真的有效。 首先,我们将会提供一些实验证据,展示其真正的有效性。我们的对比实验将表明, _DLinear_ 这个简单线性模型并没有像说的那样比 transformer 好。当我们在同等模型大小和相同设定的情况下对比时,我们发现基于 transformer 的模型在我们关注的测试标准上表现得更好。其次,我们将会介绍 _Autoformer_ 模型,相关论文 ([Wu, Haixu, et al., 2021](https://arxiv.org/abs/2106.13008)) 在 Informer 模型问世后发表在 NeurIPS 2021 上。Autoformer 的模型现在已经可以在 🤗 Transformers 中 [使用](https://huggingface.co/docs/transformers/main/en/model_doc/autoformer)。最后,我们还会讨论 _DLinear_ 模型,该模型是一个简单的前向网络,使用了 Autoformer 中的分解层 (decomposition layer)。DLinear 模型是在 [Are Transformers Effective for Time Series Forecasting?](https://arxiv.org/abs/2205.13504) 这篇论文中提出的,文中声称其性能在时间序列预测领域超越了 transformer 系列的算法。 下面我们开始! ## 评估 Transformer 系列模型 和 DLinear 模型 在 AAAI 2023 的论文 [Are Transformers Effective for Time Series Forecasting?](https://arxiv.org/abs/2205.13504) 中,作者声称 transformer 系列模型在时间序列预测方面并不有效。他们拿基于 transformer 的模型与一个简单的线性模型 _DLinear_ 作对比。DLinear 使用了 Autoformer 中的 decomposition layer 结构 (下文将会介绍),作者声称其性能超越了基于 transformer 的模型。但事实真的是这样吗?我们接下来看看。 | Dataset | Autoformer (uni.) MASE | DLinear MASE | |:-----------------:|:----------------------:|:-------------:| | `Traffic` | 0.910 | 0.965 | | `Exchange-Rate` | 1.087 | 1.690 | | `Electricity` | 0.751 | 0.831 | 上表展示了 Autoformer 和 DLinear 在三个论文中用到的数据集上的表现。结果说明 Autoformer 在三个数据集上表现都超越了 DLinear 模型。 接下来,我们将介绍 Autoformer 和 DLinear 模型,演示我们如何在上表 Traffic 数据集上对比它们的性能,并为结果提供一些可解释性。 **先说结论:** 一个简单的线性模型可能在某些特定情况下更有优势,但可能无法像 transformer 之类的复杂模型那样处理协方差信息。 ## Autoformer 详细介绍 Autoformer 基于传统的时间序列方法: 把时间序列分解为季节性 (seasonality) 以及趋势 - 周期 (trend-cycle) 这些要素。这通过加入分解层 ( _Decomposition Layer_ ) 来实现,以此来增强模型获取这些信息的能力。此外,Autoformer 中还独创了自相关 (auto-correlation) 机制,替换掉了传统 transformer 中的自注意力 (self-attention)。该机制使得模型可以利用注意力机制中周期性的依赖,提升了总体性能。 下面,我们将深入探讨 Autoformer 的这两大主要贡献: 分解层 ( _Decomposition Layer_ ) 和自相关机制 ( _Autocorrelation Mechanism_ )。相关代码也会提供出来。 ### 分解层 分解是一个时间序列领域十分常用的方法,但在 Autoformer 以前都没有被密集集成入深度学习模型中。我们先简单介绍这一概念,随后会使用 PyTorch 代码演示这一思路是如何应用到 Autoformer 中的。 #### 时间序列分解 在时间序列分析中,分解 ([decomposition](https://en.wikipedia.org/wiki/Decomposition_of_time_series)) 是把一个时间序列拆分成三个系统性要素的方法: 趋势周期 (trend-cycle) 、季节性变动 (seasonal variation) 和随机波动 (random fluctuations)。趋势要素代表了时间序列的长期走势方向; 季节要素反映了一些反复出现的模式,例如以一年或一季度为周期出现的模式; 而随机 (无规律) 因素则反映了数据中无法被上述两种要素解释的随机噪声。 有两种主流的分解方法: 加法分解和乘法分解,这在 [statsmodels](https://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.seasonal_decompose.html) 这个库里都有实现。通过分解时间序列到这三个要素,我们能更好地理解和建模数据中潜在的模式。 但怎样把分解集成进 transformer 结构呢?我们可以参考参考 Autoformer 的做法。 #### Autoformer 中的分解 | ![autoformer_architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/autoformer_architecture.png) | |:--:| | Autoformer 结构 (来自[论文](https://arxiv.org/abs/2106.13008)) | Autoformer 把分解作为一个内部计算操作集成到模型中,如上图所示。可以看到,编码器和解码器都使用了分解模块来集合 trend-cyclical 信息,并从序列中渐进地提取 seasonal 信息。这种内部分解的概念已经从 Autoformer 中展示了其有效性。所以很多其它的时间序列论文也开始采用这一方法,例如 FEDformer ([Zhou, Tian, et al., ICML 2022](https://arxiv.org/abs/2201.12740)) 和 DLinear [(Zeng, Ailing, et al., AAAI 2023)](https://arxiv.org/abs/2205.13504),这更说明了其在时间序列建模中的意义。 现在,我们正式地给分解层做出定义: 对一个长度为 $L$ 的序列 $\mathcal{X} \in \mathbb{R}^{L \times d}$,分解层返回的 $\mathcal{X}_\textrm{trend} 和 \mathcal{X}_\textrm{seasonal}$ 定义如下: $$ \mathcal{X}_\textrm{trend} = \textrm{AvgPool(Padding(} \mathcal{X} \textrm{))} \\ \mathcal{X}_\textrm{seasonal} = \mathcal{X} - \mathcal{X}_\textrm{trend} $$ 对应的 PyTorch 代码实现是: ```python import torch from torch import nn class DecompositionLayer(nn.Module): """ Returns the trend and the seasonal parts of the time series. """ def __init__(self, kernel_size): super().__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=1, padding=0) # moving average def forward(self, x): """Input shape: Batch x Time x EMBED_DIM""" # padding on the both ends of time series num_of_pads = (self.kernel_size - 1) // 2 front = x[:, 0:1, :].repeat(1, num_of_pads, 1) end = x[:, -1:, :].repeat(1, num_of_pads, 1) x_padded = torch.cat([front, x, end], dim=1) # calculate the trend and seasonal part of the series x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1) x_seasonal = x - x_trend return x_seasonal, x_trend ``` 可见,代码非常简单,可以很方便地用在其它模型中,正如 DLinear 那样。下面,我们讲解第二个创新点: _注意力 (自相关) 机制_。 ### 注意力 (自相关) 机制 | ![autoformer_autocorrelation_vs_full_attention](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/autoformer_autocorrelation_vs_full_attention.png) | |:--:| | 最原始的注意力机制和自相关机制 (图片来自[论文](https://arxiv.org/abs/2106.13008)) | 除了分解层之外,Autoformer 还使用了一个原创的自相关 (autocorrelation) 机制,可以完美替换自注意力 (self-attention) 机制。在 [最原始的时间序列 transformer 模型](https://huggingface.co/docs/transformers/model_doc/time_series_transformer) 中,注意力权重是在时域计算并逐点聚合的。而从上图中可以看出,Autoformer 不同的是它在频域计算这些 (使用 [快速傅立叶变换](https://en.wikipedia.org/wiki/Fast_Fourier_transform)),然后通过时延聚合它们。 接下来部分,我们深入细节,并使用代码作出讲解。 #### 时域的注意力机制 | ![autoformer_autocorrelation_only_attention](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/autoformer_autocorrelation_only_attention.png) | |:--:| | 借助 FFT 在频域计算注意力权重 (图片来自[论文](https://arxiv.org/abs/2106.13008)) | 理论上讲,给定一个时间延迟 $\tau$,一个离散变量的 _自相关性_ $y$ 可以用来衡量这个变量当前时刻 $t$ 的值和过去时刻 $t-\tau$ 的值之间的“关系”(皮尔逊相关性,pearson correlation): $$ \textrm{Autocorrelation}(\tau) = \textrm{Corr}(y_t, y_{t-\tau}) $$ 使用自相关性,Autoformer 提取了 query 和 key 之间基于频域的相互依赖,而不是像之前那样两两之间的点乘。可以把这个操作看成是自注意力中 $QK^T$ 的替换。 实际操作中,query 和 key 之间的自相关是通过 FFT 一次性针对 **所有时间延迟** 计算出来的。通过这种方法,自相关机制达到了 $O(L \log L)$ 的时间复杂度 ( $L$ 是输入时间长度),这个速度和 [Informer 的 ProbSparse attention](https://huggingface.co/blog/informer#probsparse-attention) 接近。值得一提的是,使用 FFT 计算自相关性的理论基础是 [Wiener–Khinchin theorem](https://en.wikipedia.org/wiki/Wiener%E2%80%93Khinchin_theorem),这里我们不细讲了。 现在,我们来看看相应的 PyTorch 代码: ```python import torch def autocorrelation(query_states, key_states): """ Computes autocorrelation(Q,K) using `torch.fft`. Think about it as a replacement for the QK^T in the self-attention. Assumption: states are resized to same shape of [batch_size, time_length, embedding_dim]. """ query_states_fft = torch.fft.rfft(query_states, dim=1) key_states_fft = torch.fft.rfft(key_states, dim=1) attn_weights = query_states_fft * torch.conj(key_states_fft) attn_weights = torch.fft.irfft(attn_weights, dim=1) return attn_weights ``` 代码非常简洁!😎 请注意这只是 `autocorrelation(Q,K)` 的部分实现,完整实现请参考 🤗 Transformers 中的代码。 接下来,我们将看到如何使用时延值聚合我们的 `attn_weights` ,这个过程被称为时延聚合 ( _Time Delay Aggregation_ )。 #### 时延聚合 | ![autoformer_autocorrelation_only_aggregation](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/autoformer_autocorrelation_only_aggregation.png) | |:--:| | 通过时延来聚合,图片来自 [Autoformer 论文](https://arxiv.org/abs/2106.13008) | 我们用 $\mathcal{R_{Q,K}}$ 来表示自相关 (即 `attn_weights` )。那么问题是: 我们应该如何聚合这些 $\mathcal{R_{Q,K}}(\tau_1), \mathcal{R_{Q,K}}(\tau_2), …, \mathcal{R_{Q,K}}(\tau_k)$ 到 $\mathcal{V}$ 上面?在标准的自注意力机制中,这种聚合通过点乘完成。但在 Autoformer 中,我们使用了一种不同的方法。首先我们在时延 $\tau_1, \tau_2, … \tau_k$ 上对齐 $\mathcal{V}$,计算在这些时延下它对应的值,这个操作叫作 _Rolling_ 。接下来,我们将对齐的 $\mathcal{V}$ 和自相关的值进行逐点的乘法运算。在上图中,你可以看到在左边是基于时延对 $\mathcal{V}$ 进行的 Rolling 操作; 而右边就展示了与自相关进行的逐点乘法。 整个过程可以用以下公式总结: $$ \tau_1, \tau_2, … \tau_k = \textrm{arg Top-k}(\mathcal{R_{Q,K}}(\tau)) \ \hat{\mathcal{R}}\mathcal{ _{Q,K}}(\tau _1), \hat{\mathcal{R}}\mathcal{_ {Q,K}}(\tau _2), …, \hat{\mathcal{R}}\mathcal{_ {Q,K}}(\tau _k) = \textrm{Softmax}(\mathcal{R_ {Q,K}}(\tau _1), \mathcal{R_ {Q,K}}(\tau_2), …, \mathcal{R_ {Q,K}}(\tau_k)) \ \textrm{Autocorrelation-Attention} = \sum_{i=1}^k \textrm{Roll}(\mathcal{V}, \tau_i) \cdot \hat{\mathcal{R}}\mathcal{_{Q,K}}(\tau _i) $$ 就是这样!需要注意的是,$k$ 是一个超参数,我们称之为 `autocorrelation_factor` (类似于 [Informer](https://huggingface.co/blog/informer) 里的 `sampling_factor` ) ; 而 softmax 是在乘法操作之前运用到自相关上面的。 现在,我们已经可以看看最终的代码了: ```python import torch import math def time_delay_aggregation(attn_weights, value_states, autocorrelation_factor=2): """ Computes aggregation as value_states.roll(delay)* top_k_autocorrelations(delay). The final result is the autocorrelation-attention output. Think about it as a replacement of the dot-product between attn_weights and value states. The autocorrelation_factor is used to find top k autocorrelations delays. Assumption: value_states and attn_weights shape: [batch_size, time_length, embedding_dim] """ bsz, num_heads, tgt_len, channel = ... time_length = value_states.size(1) autocorrelations = attn_weights.view(bsz, num_heads, tgt_len, channel) # find top k autocorrelations delays top_k = int(autocorrelation_factor * math.log(time_length)) autocorrelations_mean = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len top_k_autocorrelations, top_k_delays = torch.topk(autocorrelations_mean, top_k, dim=1) # apply softmax on the channel dim top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k # compute aggregation: value_states.roll(delay)* top_k_autocorrelations(delay) delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel for i in range(top_k): value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays[i]), dims=1) top_k_at_delay = top_k_autocorrelations[:, i] # aggregation top_k_resized = top_k_at_delay.view(-1, 1, 1).repeat(num_heads, tgt_len, channel) delays_agg += value_states_roll_delay * top_k_resized attn_output = delays_agg.contiguous() return attn_output ``` 完成!Autoformer 模型现在已经可以在 🤗 Transformers 中 [使用](https://huggingface.co/docs/transformers/main/en/model_doc/autoformer) 了,名字就叫 `AutoformerModel` 。 针对这个模型,我们要对比单变量 transformer 模型与 DLinear 的性能,DLinear 本质也是单变量的。后面我们也会展示两个多变量 transformer 模型的性能 (在同一数据上训练的)。 ## DLinear 详细介绍 实际上,DLinear 结构非常简单,仅仅是从 Autoformer 的 `DecompositionLayer` 上连接全连接层。它使用 `DecompositionLayer` 来分解输入的世界序列到残差部分 (季节性) 和趋势部分。前向过程中,每个部分都被输入到各自的线性层,并被映射成 `prediction_length` 长度的输出。最终的输出就是两个输入的和: ```python def forward(self, context): seasonal, trend = self.decomposition(context) seasonal_output = self.linear_seasonal(seasonal) trend_output = self.linear_trend(trend) return seasonal_output + trend_output ``` 在这种设定下,首先我们把输入的序列映射成 `prediction-length * hidden` 维度 (通过 `linear_seasonal` 和 `linear_trend` 两个层) ; 得到的结果会被相加起来,并转换为 `(prediction_length, hidden)` 形状; 最后,维度为 `hidden` 的隐性表征会被映射到某种分布的参数上。 在我们的测评中,我们使用 [GluonTS](https://github.com/awslabs/gluonts) 中 DLinear 的实现。 ## 示例: Traffic 数据集 我们希望用实验结果展示库中基于 transformer 模型的性能,这里我们使用 Traffic 数据集,该数据集有 862 条时间序列数据。我们将在每条时间序列上训练一个共享的模型 (单变量设定)。每个时间序列都代表了一个传感器的占有率值,值的范围在 0 到 1 之间。下面的这些超参数我们将在所有模型中保持一致。 ```python # Traffic prediction_length is 24. Reference: # https://github.com/awslabs/gluonts/blob/6605ab1278b6bf92d5e47343efcf0d22bc50b2ec/src/gluonts/dataset/repository/_lstnet.py#L105 prediction_length = 24 context_length = prediction_length*2 batch_size = 128 num_batches_per_epoch = 100 epochs = 50 scaling = "std" ``` 使用的 transformer 模型都很小: ```python encoder_layers=2 decoder_layers=2 d_model=16 ``` 这里我们不再讲解如何用 `Autoformer` 训练模型,读者可以参考之前两篇博客 ([TimeSeriesTransformer](https://huggingface.co/blog/time-series-transformers) 和 [Informer](https://huggingface.co/blog/informer)) 并替换模型为 `Autoformer` 、替换数据集为 `traffic` 。我们也训练了现成的模型放在 HuggingFace Hub 上,稍后的评测将会使用这里的模型。 ## 载入数据集 首先安装必要的库: ```python !pip install -q transformers datasets evaluate accelerate "gluonts[torch]" ujson tqdm ``` `traffic` 数据集 ([Lai et al. (2017)](https://arxiv.org/abs/1703.07015)) 包含了旧金山的交通数据。它包含 862 条以小时为时间单位的时间序列,代表了道路占有率的数值,其数值范围为 $[0, 1]$,记录了旧金山湾区高速公路从 2015 年到 2016 年的数据。 ```python from gluonts.dataset.repository.datasets import get_dataset dataset = get_dataset("traffic") freq = dataset.metadata.freq prediction_length = dataset.metadata.prediction_length ``` 我们可视化一条时间序列看看,并画出训练和测试集的划分: ```python import matplotlib.pyplot as plt train_example = next(iter(dataset.train)) test_example = next(iter(dataset.test)) num_of_samples = 4*prediction_length figure, axes = plt.subplots() axes.plot(train_example["target"][-num_of_samples:], color="blue") axes.plot( test_example["target"][-num_of_samples - prediction_length :], color="red", alpha=0.5, ) plt.show() ``` ![png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/output_15_0.png) 定义训练和测试集划分: ```python train_dataset = dataset.train test_dataset = dataset.test ``` ## 定义数据变换 接下来,我们定义数据的变换,尤其是时间相关特征的制作 (基于数据集本身和一些普适做法)。 我们定义一个 `Chain` ,代表 GluonTS 中一系列的变换 (这类似图像里 `torchvision.transforms.Compose` )。这让我们将一系列变换集成到一个处理流水线中。 下面代码中,每个变换都添加了注释,用以说明它们的作用。从更高层次讲,我们将遍历每一个时间序列,并添加或删除一些特征: ```python from transformers import PretrainedConfig from gluonts.time_feature import time_features_from_frequency_str from gluonts.dataset.field_names import FieldName from gluonts.transform import ( AddAgeFeature, AddObservedValuesIndicator, AddTimeFeatures, AsNumpyArray, Chain, ExpectedNumInstanceSampler, RemoveFields, SelectFields, SetField, TestSplitSampler, Transformation, ValidationSplitSampler, VstackFeatures, RenameFields, ) def create_transformation(freq: str, config: PretrainedConfig) -> Transformation: # create a list of fields to remove later remove_field_names = [] if config.num_static_real_features == 0: remove_field_names.append(FieldName.FEAT_STATIC_REAL) if config.num_dynamic_real_features == 0: remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL) if config.num_static_categorical_features == 0: remove_field_names.append(FieldName.FEAT_STATIC_CAT) return Chain( # step 1: remove static/dynamic fields if not specified [RemoveFields(field_names=remove_field_names)] # step 2: convert the data to NumPy (potentially not needed) + ( [ AsNumpyArray( field=FieldName.FEAT_STATIC_CAT, expected_ndim=1, dtype=int, ) ] if config.num_static_categorical_features > 0 else [] ) + ( [ AsNumpyArray( field=FieldName.FEAT_STATIC_REAL, expected_ndim=1, ) ] if config.num_static_real_features > 0 else [] ) + [ AsNumpyArray( field=FieldName.TARGET, # we expect an extra dim for the multivariate case: expected_ndim=1 if config.input_size == 1 else 2, ), # step 3: handle the NaN's by filling in the target with zero # and return the mask (which is in the observed values) # true for observed values, false for nan's # the decoder uses this mask (no loss is incurred for unobserved values) # see loss_weights inside the xxxForPrediction model AddObservedValuesIndicator( target_field=FieldName.TARGET, output_field=FieldName.OBSERVED_VALUES, ), # step 4: add temporal features based on freq of the dataset # these serve as positional encodings AddTimeFeatures( start_field=FieldName.START, target_field=FieldName.TARGET, output_field=FieldName.FEAT_TIME, time_features=time_features_from_frequency_str(freq), pred_length=config.prediction_length, ), # step 5: add another temporal feature (just a single number) # tells the model where in the life the value of the time series is # sort of running counter AddAgeFeature( target_field=FieldName.TARGET, output_field=FieldName.FEAT_AGE, pred_length=config.prediction_length, log_scale=True, ), # step 6: vertically stack all the temporal features into the key FEAT_TIME VstackFeatures( output_field=FieldName.FEAT_TIME, input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE] + ( [FieldName.FEAT_DYNAMIC_REAL] if config.num_dynamic_real_features > 0 else [] ), ), # step 7: rename to match HuggingFace names RenameFields( mapping={ FieldName.FEAT_STATIC_CAT: "static_categorical_features", FieldName.FEAT_STATIC_REAL: "static_real_features", FieldName.FEAT_TIME: "time_features", FieldName.TARGET: "values", FieldName.OBSERVED_VALUES: "observed_mask", } ), ] ) ``` ## 定义 `InstanceSplitter` 我们需要创建一个 `InstanceSplitter` ,用来给训练、验证和测试集提供采样窗口,得到一段时间的内的时间序列 (我们不可能把完整的整段数据输入给模型,毕竟时间太长,而且也有内存限制)。 这个实例分割工具每一次将会随机选取 `context_length` 长度的数据,以及紧随其后的 `prediction_length` 长度的窗口,并为相应的窗口标注 `past_` 或 `future_` 。这样可以保证 `values` 能被分为 `past_values` 和随后的 `future_values` ,各自作为编码器和解码器的输入。除了 `values` ,对于 `time_series_fields` 中的其它 key 对应的数据也是一样。 ```python from gluonts.transform import InstanceSplitter from gluonts.transform.sampler import InstanceSampler from typing import Optional def create_instance_splitter( config: PretrainedConfig, mode: str, train_sampler: Optional[InstanceSampler] = None, validation_sampler: Optional[InstanceSampler] = None, ) -> Transformation: assert mode in ["train", "validation", "test"] instance_sampler = { "train": train_sampler or ExpectedNumInstanceSampler( num_instances=1.0, min_future=config.prediction_length ), "validation": validation_sampler or ValidationSplitSampler(min_future=config.prediction_length), "test": TestSplitSampler(), }[mode] return InstanceSplitter( target_field="values", is_pad_field=FieldName.IS_PAD, start_field=FieldName.START, forecast_start_field=FieldName.FORECAST_START, instance_sampler=instance_sampler, past_length=config.context_length + max(config.lags_sequence), future_length=config.prediction_length, time_series_fields=["time_features", "observed_mask"], ) ``` ## 创建 PyTorch 的 DataLoader 接下来就该创建 PyTorch DataLoader 了: 这让我们能把数据整理成 batch 的形式,即 (input, output) 对的形式,或者说是 ( `past_values` , `future_values` ) 的形式。 ```python from typing import Iterable import torch from gluonts.itertools import Cyclic, Cached from gluonts.dataset.loader import as_stacked_batches def create_train_dataloader( config: PretrainedConfig, freq, data, batch_size: int, num_batches_per_epoch: int, shuffle_buffer_length: Optional[int] = None, cache_data: bool = True, **kwargs, ) -> Iterable: PREDICTION_INPUT_NAMES = [ "past_time_features", "past_values", "past_observed_mask", "future_time_features", ] if config.num_static_categorical_features > 0: PREDICTION_INPUT_NAMES.append("static_categorical_features") if config.num_static_real_features > 0: PREDICTION_INPUT_NAMES.append("static_real_features") TRAINING_INPUT_NAMES = PREDICTION_INPUT_NAMES + [ "future_values", "future_observed_mask", ] transformation = create_transformation(freq, config) transformed_data = transformation.apply(data, is_train=True) if cache_data: transformed_data = Cached(transformed_data) # we initialize a Training instance instance_splitter = create_instance_splitter(config, "train") # the instance splitter will sample a window of # context length + lags + prediction length (from the 366 possible transformed time series) # randomly from within the target time series and return an iterator. stream = Cyclic(transformed_data).stream() training_instances = instance_splitter.apply(stream, is_train=True) return as_stacked_batches( training_instances, batch_size=batch_size, shuffle_buffer_length=shuffle_buffer_length, field_names=TRAINING_INPUT_NAMES, output_type=torch.tensor, num_batches_per_epoch=num_batches_per_epoch, ) def create_test_dataloader( config: PretrainedConfig, freq, data, batch_size: int, **kwargs, ): PREDICTION_INPUT_NAMES = [ "past_time_features", "past_values", "past_observed_mask", "future_time_features", ] if config.num_static_categorical_features > 0: PREDICTION_INPUT_NAMES.append("static_categorical_features") if config.num_static_real_features > 0: PREDICTION_INPUT_NAMES.append("static_real_features") transformation = create_transformation(freq, config) transformed_data = transformation.apply(data, is_train=False) # we create a Test Instance splitter which will sample the very last # context window seen during training only for the encoder. instance_sampler = create_instance_splitter(config, "test") # we apply the transformations in test mode testing_instances = instance_sampler.apply(transformed_data, is_train=False) return as_stacked_batches( testing_instances, batch_size=batch_size, output_type=torch.tensor, field_names=PREDICTION_INPUT_NAMES, ) ``` ## 在 Autoformer 上评测 我们已经在这个数据集上预训练了一个 Autoformer 了,所以我们可以直接拿来模型在测试集上测一下: ```python from transformers import AutoformerConfig, AutoformerForPrediction config = AutoformerConfig.from_pretrained("kashif/autoformer-traffic-hourly") model = AutoformerForPrediction.from_pretrained("kashif/autoformer-traffic-hourly") test_dataloader = create_test_dataloader( config=config, freq=freq, data=test_dataset, batch_size=64, ) ``` 在推理时,我们使用模型的 `generate()` 方法来预测 `prediction_length` 步的未来数据,基于最近使用的对应时间序列的窗口长度。 ```python from accelerate import Accelerator accelerator = Accelerator() device = accelerator.device model.to(device) model.eval() forecasts_ = [] for batch in test_dataloader: outputs = model.generate( static_categorical_features=batch["static_categorical_features"].to(device) if config.num_static_categorical_features > 0 else None, static_real_features=batch["static_real_features"].to(device) if config.num_static_real_features > 0 else None, past_time_features=batch["past_time_features"].to(device), past_values=batch["past_values"].to(device), future_time_features=batch["future_time_features"].to(device), past_observed_mask=batch["past_observed_mask"].to(device), ) forecasts_.append(outputs.sequences.cpu().numpy()) ``` 模型输出的数据形状是 ( `batch_size` , `number of samples` , `prediction length` , `input_size` )。 在下面这个例子中,我们为预测接下来 24 小时的交通数据而得到了 100 条可能的数值,而 batch size 是 64: ```python forecasts_[0].shape >>> (64, 100, 24) ``` 我们在垂直方向把它们堆叠起来 (使用 `numpy.vstack` 函数),以此获取所有测试集时间序列的预测: 我们有 `7` 个滚动的窗口,所以有 `7 * 862 = 6034` 个预测。 ```python import numpy as np forecasts = np.vstack(forecasts_) print(forecasts.shape) >>> (6034, 100, 24) ``` 我们可以把预测结果和 ground truth 做个对比。为此,我们使用 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 这个库,它里面包含了 [MASE](https://huggingface.co/spaces/evaluate-metric/mase) 的度量方法。 我们对每个时间序列用这一度量标准计算相应的值,并算出其平均值: ```python from tqdm.autonotebook import tqdm from evaluate import load from gluonts.time_feature import get_seasonality mase_metric = load("evaluate-metric/mase") forecast_median = np.median(forecasts, 1) mase_metrics = [] for item_id, ts in enumerate(tqdm(test_dataset)): training_data = ts["target"][:-prediction_length] ground_truth = ts["target"][-prediction_length:] mase = mase_metric.compute( predictions=forecast_median[item_id], references=np.array(ground_truth), training=np.array(training_data), periodicity=get_seasonality(freq)) mase_metrics.append(mase["mase"]) ``` 所以 Autoformer 模型的结果是: ```python print(f"Autoformer univariate MASE: {np.mean(mase_metrics):.3f}") >>> Autoformer univariate MASE: 0.910 ``` 我们还可以画出任意时间序列预测针对其 ground truth 的对比,这需要以下函数: ```python import matplotlib.dates as mdates import pandas as pd test_ds = list(test_dataset) def plot(ts_index): fig, ax = plt.subplots() index = pd.period_range( start=test_ds[ts_index][FieldName.START], periods=len(test_ds[ts_index][FieldName.TARGET]), freq=test_ds[ts_index][FieldName.START].freq, ).to_timestamp() ax.plot( index[-5*prediction_length:], test_ds[ts_index]["target"][-5*prediction_length:], label="actual", ) plt.plot( index[-prediction_length:], np.median(forecasts[ts_index], axis=0), label="median", ) plt.gcf().autofmt_xdate() plt.legend(loc="best") plt.show() ``` 比如,测试集中第四个时间序列的结果对比,画出来是这样: ```python plot(4) ``` ![png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/output_44_0.png) ## 在 DLinear 上评测 `gluonts` 提供了一种 DLinear 的实现,我们将使用这个实现区训练、测评该算法: ```python from gluonts.torch.model.d_linear.estimator import DLinearEstimator # Define the DLinear model with the same parameters as the Autoformer model estimator = DLinearEstimator( prediction_length=dataset.metadata.prediction_length, context_length=dataset.metadata.prediction_length*2, scaling=scaling, hidden_dimension=2, batch_size=batch_size, num_batches_per_epoch=num_batches_per_epoch, trainer_kwargs=dict(max_epochs=epochs) ) ``` 训练模型: ```python predictor = estimator.train( training_data=train_dataset, cache_data=True, shuffle_buffer_length=1024 ) >>> INFO:pytorch_lightning.callbacks.model_summary: | Name | Type | Params --------------------------------------- 0 | model | DLinearModel | 4.7 K --------------------------------------- 4.7 K Trainable params 0 Non-trainable params 4.7 K Total params 0.019 Total estimated model params size (MB) Training: 0it [00:00, ?it/s] ... INFO:pytorch_lightning.utilities.rank_zero:Epoch 49, global step 5000: 'train_loss' was not in top 1 INFO:pytorch_lightning.utilities.rank_zero:`Trainer.fit` stopped: `max_epochs=50` reached. ``` 在测试集上评测: ```python from gluonts.evaluation import make_evaluation_predictions, Evaluator forecast_it, ts_it = make_evaluation_predictions( dataset=dataset.test, predictor=predictor, ) d_linear_forecasts = list(forecast_it) d_linear_tss = list(ts_it) evaluator = Evaluator() agg_metrics, _ = evaluator(iter(d_linear_tss), iter(d_linear_forecasts)) ``` 所以 DLinear 对应的结果是: ```python dlinear_mase = agg_metrics["MASE"] print(f"DLinear MASE: {dlinear_mase:.3f}") >>> DLinear MASE: 0.965 ``` 同样地,我们画出预测结果与 ground truth 的对比曲线图: ```python def plot_gluonts(index): plt.plot(d_linear_tss[index][-4 * dataset.metadata.prediction_length:].to_timestamp(), label="target") d_linear_forecasts[index].plot(show_label=True, color='g') plt.legend() plt.gcf().autofmt_xdate() plt.show() ``` ```python plot_gluonts(4) ``` ![png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/148_autoformer/output_54_0.png) 实际上, `traffic` 数据集在平日和周末会出现传感器中模式的分布偏移。那我们还应该怎么做呢?由于 DLinear 没有足够的能力去处理协方差信息,或者说是任何的日期时间的特征,我们给出的窗口大小无法覆盖全面,使得让模型有足够信息去知道当前是在预测平日数据还是周末数据。因此模型只会去预测更为普适的结果,这就导致其预测分布偏向平日数据,因而导致对周末数据的预测变得更差。当然,如果我们给一个足够大的窗口,一个线性模型也可以识别出周末的模式,但当我们的数据中存在以月或以季度为单位的模式分布时,那就需要更大的窗口了。 ## 总结 所以 transformer 模型和线性模型对比的结论是什么呢?不同模型在测试集上的 MASE 指标如下所示: |Dataset | Transformer (uni.) | Transformer (mv.) | Informer (uni.)| Informer (mv.) | Autoformer (uni.) | DLinear | |:--:|:--:| :--:| :--:| :--:| :--:|:-------:| |`Traffic` | **0.876** | 1.046 | 0.924 | 1.131 | 0.910 | 0.965 | 可以看到,我们去年引入的 [最原始的 Transformer 模型](https://huggingface.co/docs/transformers/model_doc/time_series_transformer) 获得了最好的性能指标。其次,多变量模型一般都比对应的单变量模型更差,原因在于序列间的相关性关系一般都较难预测。额外添加的波动通常会损坏预测结果,或者模型可能会学到一些错误的相关性信息。最近的一些论文,如 [CrossFormer](https://openreview.net/forum?id=vSVLM2j9eie) (ICLR 23) 和 [CARD](https://arxiv.org/abs/2305.12095) 也在尝试解决这些 transformer 模型中的问题。 多变量模型通常在训练数据足够大的时候才会表现得好。但当我们与单变量模型在小的公开数据集上对比时,通常单变量模型会表现得更好。相对于线性模型,通常其相应尺寸的单变量 transformer 模型或其它神经网络类模型会表现得更好。 总结来讲,transformer 模型在时间序列预测领域,远没有达到要被淘汰的境地。 然而大规模训练数据对它巨大潜力的挖掘是至关重要的,这一点不像 CV 或 NLP 领域,时间序列预测缺乏大规模公开数据集。 当前绝大多数的时间序列预训练模型也不过是在诸如 [UCR & UEA](https://www.timeseriesclassification.com/) 这样的少量样本上训练的。 即使这些基准数据集为时间序列预测的发展进步提供了基石,其较小的规模和泛化性的缺失使得大规模预训练仍然面临诸多困难。 所以对于时间序列预测领域来讲,发展大规模、强泛化性的数据集 (就像 CV 领域的 ImageNet 一样) 是当前最重要的事情。这将会极大地促进时间序列分析领域与训练模型的发展研究,提升与训练模型在时间序列预测方面的能力。 ## 声明 我们诚挚感谢 [Lysandre Debut](https://github.com/LysandreJik) 和 [Pedro Cuenca](https://github.com/pcuenca) 提供的深刻见解和对本项目的帮助。 ❤️
6
0
hf_public_repos/blog
hf_public_repos/blog/zh/elixir-bumblebee.md
--- title: "从 GPT2 到 Stable Diffusion:Elixir 社区迎来了 Hugging Face" thumbnail: /blog/assets/120_elixir-bumblebee/thumbnail.png authors: - user: josevalim guest: true translators: - user: Slinae - user: chenglu proofreader: true --- # 从 GPT2 到 Stable Diffusion:Elixir 社区迎来了 Hugging Face 上周,[Elixir 社区](https://elixir-lang.org/) 向大家宣布,Elixir 语言社区新增从 GPT2 到 Stable Diffusion 的一系列神经网络模型。这些模型得以实现归功于 [刚刚发布的 Bumblebee 库](https://news.livebook.dev/announcing-bumblebee-gpt2-stable-diffusion-and-more-in-elixir-3Op73O)。Bumblebee 库是使用纯 Elixir 语言实现的 Hugging Face Transformers 库。 为了帮助大家使用开始这些模型, [Livebook](https://livebook.dev/) —— 用于 Elixir 语言的计算 Notebook 平台团队创建了「智能单元」集合,让开发者可以仅用三次点击即搭建各种神经网络模型任务。 <iframe width="100%" style="aspect-ratio: 16 / 9;"src="https://www.youtube.com/embed/g3oyh3g1AtQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> 由于 Elixir 运行在支持并发和分布式的 Erlang 虚拟机上,开发者可以将这些模型嵌入 [Phoenix Web 应用]((https://phoenixframework.org/),作为他们现有 Phoenix Web 应用的一部分,集成在 [使用 Broadway 的数据处理管道中](https://elixir-broadway.org),将模型和 [Nerves 嵌入式系统]((https://www.nerves-project.org/) 一起部署,而无需依赖第三方软件。在所有场景中,Bumblebee 模型都会编译到 CPU 和 GPU 中。 ## 背景 将机器学习模型引入 Elixir 的努力始于大约 2 年前的 [Numerical Elixir (Nx) 项目计划](https://github.com/elixir-nx/nx/tree/main/nx)。Nx 项目实现 Elixir 多维张量和「数值定义」,作为可编译到 CPU/GPU 的 Elixir 子集。Nx 项目没有重造轮子,而是使用 Google XLA 绑定 ([EXLA](https://github.com/elixir-nx/nx/tree/main/exla)) 和 Libtorch ([Torchx](https://github.com/elixir-nx/nx/tree/main/torchx)) 进行 CPU/GPU 编译。 Nx 项目的倡议还催生了其他几个项目。[Axon](https://github.com/elixir-nx/axon) 项目从其他项目,如 [Flax](https://github.com/google/flax) 和 [PyTorch Ignite](https://pytorch.org/ignite/index.html) 项目中获得启发,为 Elixir 引进了可进行功能组合的神经网络。 [Explorer](https://github.com/elixir-nx/explorer) 项目借鉴了 [dplyr](https://dplyr.tidyverse.org/) 和 [Rust's Polars](https://www.pola.rs/),为 Elixir 社区引进了富有表现力和高性能的数据框 (DataFrame)。 [Bumblebee](https://github.com/elixir-nx/bumblebee) 和 [Tokenizers](https://github.com/elixir-nx/tokenizers) 是我们最新发布的库函数。我们感谢 Hugging Face 对机器学习领域跨社区和跨工具协作的支持,以及 Hugging Face 在加速 Elixir 生态建设中起的关键作用。 下一步,我们计划专注于使用 Elixir 进行神经网络训练和迁移学习,让开发者可以根据业务和应用的需求,增强和专注于预训练模型。我们同时也希望发布更多有关传统机器学习算法的进展。 ## 上手实践 如果你想尝试使用 Bumblebee 库,你可以: * 下载 [Livebook v0.8](https://livebook.dev/),从 Notebook 中的 "+ Smart" 单元菜单自动生成 "Neural Networks Tasks",我们目前正致力于在其他平台和空间上运行 Livebook (敬请期待!😉) * 我们同时也提供了 Bumblebee 模型在 Phoenix (+ LiveView) apps 中的应用示例:[单文件 Phoenix 应用](https://github.com/elixir-nx/bumblebee/tree/main/examples/phoenix)。这些示例为将它们集成到您的生产应用中提供了必要的构建模块 * 想获取更多的实践方法,详阅 [Notebooks](https://github.com/elixir-nx/bumblebee/tree/main/notebooks) 如果你想帮助我们构建 Elixir 机器学习生态系统,欢迎尝试使用以上的项目,并提供一些建议。这里有许多有趣的领域,从编译开发到模型构建。我们非常欢迎你可以参与进来,一起为 Bumblebee 带来更多的模型和模型架构。Elixir 社区的未来发展方向是并发式、分布式和趣味性的。
7
0
hf_public_repos/blog
hf_public_repos/blog/zh/the-age-of-ml-as-code.md
--- title: 机器学习即代码的时代已经到来 thumbnail: /blog/assets/31_age_of_ml_as_code/05_vision_transformer.png authors: - user: juliensimon translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 机器学习即代码的时代已经到来 <!-- {blog_metadata} --> <!-- {authors} --> >> 译者注: 到底是 AI 会吃掉软件还是软件会吃掉 AI?为了 job security 工程师应该把宝押在哪儿?这篇 2021 年的文章提供的一些视角似乎印证了它现在的流行,有点“运筹于帷幄之中,决胜于数年之后”的意思,颇值得软件架构师和产品经理们内省一番。 2021 版的 [《人工智能现状报告》](https://www.stateof.ai/2021-report-launch.html) 于上周发布。Kaggle 的 [机器学习和数据科学现状调查](https://www.kaggle.com/c/kaggle-survey-2021) 也于同一周发布了。这两份报告中有很多值得学习和探讨的内容,其中一些要点引起了我的注意。 > “人工智能正越来越多地应用于关键基础设施,如国家电网以及新冠大流行期间的自动化超市仓储计算。然而,人们也在质疑该行业的成熟度是否赶上了其不断增长的部署规模。” 无可否认,以机器学习引擎的应用正渗透至 IT 的每个角落。但这对公司和组织意味着什么?我们如何构建坚如磐石的机器学习工作流?每个公司应该聘请 100 名数据科学家抑或是 100 名 DevOps 工程师吗? > “Transformer 模型已成为 ML 的通用架构。不仅适用于自然语言处理,还适用于语音、计算机视觉甚至蛋白质结构预测。” 老一辈人的血泪教训是: IT 领域 [没有万能灵丹](https://en.wikipedia.org/wiki/No_Silver_Bullet)。然而,[transformer](https://arxiv.org/abs/1706.03762) 架构又确实在各式各样的机器学习任务中都非常有效。但我们如何才能跟上机器学习创新的疯狂脚步呢?我们真的需要专家级的技能才能用上这些最先进的模型吗?抑或是否有更短的路径可以在更短的时间内创造商业价值? 好,以下是我的一些想法。 ### 面向大众的机器学习! 机器学习无处不在,或者至少它试图无处不在。几年前,福布斯写道“[软件吞噬了世界,而现在人工智能正在吞噬软件](https://www.forbes.com/sites/cognitiveworld/2019/08/29/software-ate-the-world-now-ai-is-eating-software/)”,但这到底意味着什么?如果这意味着机器学习模型应该取代数千行僵化的遗留代码,那么我完全赞成。去死,邪恶的商业规则,去死! 现在,这是否意味着机器学习实际上将取代软件工程?现在肯定有很多关于 [人工智能生成的代码](https://www.wired.com/story/ai-latest-trick-writing-computer-code/) 的幻想,其中一些技术确实很有趣,例如用于 [找出 bug 和性能问题](https://aws.amazon.com/codeguru) 的技术。然而,我们不仅不应该考虑摆脱开发人员,还应该努力为尽可能多的开发人员提供支持,以使机器学习成为另一个无聊的 IT 工作负载 (但 [无聊的技术很棒](http://boringtechnology.club/))。换句话说,我们真正需要的是软件吃掉机器学习! ### 这次情况没有什么不同 多年来,我一直在虚张声势地宣称: 软件工程已有的长达十年历史的最佳实践同样适用于数据科学和机器学习: 版本控制、可重用性、可测试性、自动化、部署、监控、性能、优化等。我一度感觉自己很孤单,直到谷歌铁骑的出现: > “做你擅长的,以卓越工程师的身份而不是以卓越机器学习专家的身份去做机器学习。” - [《机器学习的规则》](https://developers.google.com/machine-learning/guides/rules-of-ml),Google 没有必要重新发明轮子。DevOps 运动在 10 多年前就解决了这些问题。现在,数据科学和机器学习社区应该立即采用这些经过验证的工具和流程并作适当调整。这是我们在生产中构建强大、可扩展、可重复的机器学习系统的唯一方法。如果将其称为 MLOps 对事情有帮助,那就这么叫它!关键是其内涵,名字并不重要。 确实是时候停止将概念验证和沙箱 A/B 测试视为显著成就了。它们只是迈向生产的一小块垫脚石,即它是唯一可以验证假设及其对业务的影响的地方。每个数据科学家和机器学习工程师都应该致力于尽可能快、尽可能频繁地将他们的模型投入生产。 **能用的生产模型无一例外都比出色的沙箱模型好**。 ### 基础设施?所以呢? 都 2021 年了,IT 基础设施不应再成为阻碍。软件不久前已经吞噬了它,通过云 API、基础设施即代码、Kubeflow 等将其抽象化。是的,即使是自建基础设施也已经被软件吞噬了。 机器学习基础设施也很快就会发生同样的情况。 Kaggle 调查显示,75% 的受访者使用云服务,超过 45% 的受访者使用企业机器学习平台,其中 Amazon SageMaker、Databricks 和 Azure ML Studio 位列前三。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/01_entreprise_ml.png"> </kbd> 借助 MLOps、软件定义的基础设施和平台,将任意一个伟大的想法从沙箱中拖出来并将其投入生产已变得前所未有地容易。回答最开始的问题,我很确定你需要雇用更多精通 ML 的软件和 DevOps 工程师,而不是更多的数据科学家。但其实在内心深处,你本来就知道这一点,对吗? 现在,我们来谈谈 transformer 模型。 --- ### Transformers! Transformers! Transformers! ([鲍尔默风格](https://www.youtube.com/watch?v=Vhh_GeBPOhs)) AI 现状报告称: “Transformer 架构已经远远超出了 NLP 的范围,并且正在成为 ML 的通用架构”。例如,最近的模型,如 Google 的 [Vision Transformer](https://paperswithcode.com/method/vision-transformer)、无卷积 transformer 架构以及混合了 transformer 和卷积的 [CoAtNet](https://paperswithcode.com/paper/coatnet-marrying-convolution-and-attention) 为 ImageNet 上的图像分类设定了新的基准,同时对训练计算资源的需求更低。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/02_vision_transformer.png"> </kbd> Transformer 模型在音频 (如语音识别) 以及点云 (一种用于对自动驾驶场景等 3D 环境进行建模的技术) 方面也表现出色。 Kaggle 的调查也呼应了 transformer 模型的崛起。它们的使用量逐年增长,而 RNN、CNN 和梯度提升算法则在减少。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/03_transformers.png"> </kbd> 除了提高准确性之外,transformer 模型也在持续加强其在迁移学习方面的能力,这样大家就可以节约训练时间和计算成本,更快地实现业务价值。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/04_general_transformers.png"> </kbd> 借助 transformer 模型,机器学习世界正逐渐从“ _好!!让我们从头开始构建和训练我们自己的深度学习模型_ ”转变为“ _让我们选择一个经过验证的现成模型,用我们自己的数据对其进行微调,然后早点回家吃晚饭。_ ” 从很多方面来说,这都是一件好事。技术水平在不断进步,几乎没有人能跟上其步伐。还记得我之前提到的 Google Vision Transformer 模型吗?你想现在就测试一下吗?在 Hugging Face 上,这 [再简单不过了](https://huggingface.co/google/vit-base-patch16-224)。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/05_vision_transformer.png"> </kbd> 那如果想试试 [Big Science 项目](https://bigscience.huggingface.co/) 最新的 [零样本文本生成模型](https://huggingface.co/bigscience) 呢? <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/06_big_science.png"> </kbd> 你还可以对另外 [16000 多个模型](https://huggingface.co/models) 以及 [1600 多个数据集](https://huggingface.co/datasets) 做同样的事情。进一步地,你还可以用我们提供的其他工具进行 [推理](https://huggingface.co/inference-api)、[AutoNLP](https://huggingface.co/autonlp)、[延迟优化](https://huggingface.co/infinity) 及 [硬件加速](https://huggingface.co/hardware)。我们甚至还能帮你启动项目,完成 [从建模到生产](https://huggingface.co/support) 的全过程。 Hugging Face 的使命是让机器学习对初学者和专家来说都尽可能友好且高效。 我们相信你只要编写尽可能少的代码就能训练、优化和部署模型。 我们相信内置的最佳实践。 我们坚信基础设施应尽可能透明。 我们相信,没有什么比快速高质的生产级模型更好的了。 ### 机器学习即代码,就这里,趁现在! 大家似乎都同意这句话。我们的 [Github](https://github.com/huggingface) 有超过 52000 颗星。在 Hugging Face 首次出现在 Kaggle 调查报告中时,其使用率就已超过 10%。 <kbd> <img src="https://huggingface.co/blog/assets/31_age_of_ml_as_code/07_kaggle.png"> </kbd> **谢谢你们**,我们才刚刚开始! --- _对 Hugging Face 如何帮助你的组织构建和部署生产级机器学习解决方案感兴趣?请通过 [j​​[email protected]](mailto:[email protected]) 联系我 (招聘、推销勿扰)。_
8
0
hf_public_repos/blog
hf_public_repos/blog/zh/bloom-inference-pytorch-scripts.md
--- title: "使用 DeepSpeed 和 Accelerate 进行超快 BLOOM 模型推理" thumbnail: /blog/assets/bloom-inference-pytorch-scripts/thumbnail.png authors: - user: stas - user: sgugger translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 使用 DeepSpeed 和 Accelerate 进行超快 BLOOM 模型推理 本文展示了如何使用 1760 亿 (176B) 参数的 [BLOOM 模型](https://huggingface.co/bigscience/bloom) 生成文本时如何获得超快的词吞吐 (per token throughput)。 因为在使用 bf16 (bfloat16) 权重时该模型内存占用为 352 GB (`176*2`),所以最高效的硬件配置是使用 8x80GB 的 A100 GPU。也可使用 2x8x40GB 的 A100 或者 2x8x48GB 的 A6000。使用这些 GPU 的主要原因是截至本文成稿时为止它们是能提供最大显存的 GPU,但你也可以使用其他 GPU。比如,可以使用 24x32GB V100。 一般来讲,使用单节点会带来最快的吞吐,因为大多数时候节点内的 GPU 互联硬件比节点间的快,但未必总是如此。 如果你没有这么高端的硬件或没有这么多硬件,你仍可能通过 CPU 卸载 (CPU offload) 或是 NVMe 卸载 (NVMe offload) 的方式在更小的 GPU 上对 BLOOM 进行推理。当然,生成时间会慢很多。 我们计划涉及 [8 比特量化方案](https://huggingface.co/blog/hf-bitsandbytes-integration),该方案以稍慢的吞吐为代价将显存需求减少到一半。我们还会讨论 [BitsAndBytes](https://github.com/TimDettmers/bitsandbytes) 和 [Deepspeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/) 库。 ## 测试基准 事不宜迟,我们先展示一些数据吧。 为了保持一致性,除非另有说明,本文的测试基准都是在相同的配有 512GB CPU 内存的 8x80GB A100 节点上完成的,该节点来自 法国 [Jean Zay 超算中心](http://www.idris.fr/eng/jean-zay/index.html)。这一配置对于节省检查点加载时间非常重要,如果磁盘加载缓慢,就需要更长的检查点加载时间。我们在多个进程中并行执行 IO 任务的情况下更是如此。 所有的测试基准都是使用 [贪心搜索](https://huggingface.co/blog/how-to-generate#greedy-search) 完成最多 100 个词的生成任务: ``` Generate args {'max_length': 100, 'do_sample': False} ``` 输入提示词仅包含几个词。我们会缓存先前见到的词,因为每次重新计算它们相当慢。 首先,让我们快速看一下从开始到准备好花了多长时间, 即模型加载和准备花了多长时间: | 方法 | 秒 | | :---------------------- | :--- | | accelerate | 121 | | ds-inference shard-int8 | 61 | | ds-inference shard-fp16 | 60 | | ds-inference unsharded | 662 | | ds-zero | 462 | Deepspeed-Inference 使用了预分片的权重仓库,整个加载时间大约在 1 分钟。Accelerrate 的加载时间也很优秀,只有大约 2 分钟。其他方案就慢得多。 加载时间有可能重要也可能并不重要,因为一旦加载成功你可以一遍遍持续不断地生成词而不再需要额外地加载开销。 接着是最重要的测试基准指标:词生成吞吐 (token generation throughput)。这个吞吐的度量比较简单,即:生成 100 个新词的时间除以 100 和 batch size (也就是除以生成的总词数)。 下面列出了 8x80GB GPU 的吞吐,单位为毫秒: | 方法 \ bs | 1 | 8 | 16 | 32 | 64 | 128 | 256 | 512 | | :---------------- | :----- | :---- | :---- | :---- | :--- | :--- | :--- | :--- | | accelerate bf16 | 230.38 | 31.78 | 17.84 | 10.89 | oom | | | | | accelerate int8 | 286.56 | 40.92 | 22.65 | 13.27 | oom | | | | | ds-inference fp16 | 44.02 | 5.70 | 3.01 | 1.68 | 1.00 | 0.69 | oom | | | ds-inference int8 | 89.09 | 11.44 | 5.88 | 3.09 | 1.71 | 1.02 | 0.71 | oom | | ds-zero bf16 | 283 | 34.88 | oom | | | | | | 这里, 当内存耗尽 (Out Of Memory,OOM) 时即表明 batch size 太大 GPU 显存放不下了。 使用 Deepspeed-Inference 的张量并行 (Tensor Parallelism,TP) 和定制化融合 CUDA 核函数可以得到小于 1 毫秒的吞吐!太棒了!尽管使用这个方案去推理那些尚未被验证过的模型时,你可能会需要花一些时间去开发从而让它工作起来。 Accelerate 也超级快。它使用了非常简单的管线并行 (Pipeline Parallelism,PP)。因为它非常简单,所以它应该对任何模型都是开箱即用的。 因为 Deepspeed-ZeRO 可以并行处理多路生成流,其吞吐可以再除以 8 或者 16,具体数值取决于在调用 `generate` 时用了 8 个 GPU 还是 16 个 GPU。当然,这也意味着在 8x80GB A100 的情况下 (见上表) ,可以处理的 batch size 为 64 且吞吐可至大约 4 毫秒。因此,这 3 种方案的性能是接近的。 让我们再重新看一下这些数字是怎么计算出来的。举个例子,使用 Deepspeed-Inference fp16 模式实时生成 batch size 为 128、长度为 100 个新词的文本花了 8832 毫秒,因此我们可以这样计算吞吐:钟面时间 / ( batch size * 新词数 ) 或 `8821/(128*100) = 0.69`。 现在我们一起看看 Deepspeed-Inference 和 BitsAndBytes 提供的 int8 量化模型的威力,它仅需占用 bfloat16 或 float16 推理所需显存的一半。 以下为 4x80GB GPU 的吞吐,单位为毫秒: | 方法 bs | 1 | 8 | 16 | 32 | 64 | 128 | | :---------------- | :----- | :---- | :---- | :---- | :--- | :--- | | accelerate int8 | 284.15 | 40.14 | 21.97 | oom | | | | ds-inference int8 | 156.51 | 20.11 | 10.38 | 5.50 | 2.96 | oom | 你只需在下述 3 个脚本里添加 `--benchmark` 即可重现这些测试基准的结果。 ## 方案 首先获取最新的演示代码仓库: ``` git clone https://github.com/huggingface/transformers-bloom-inference cd transformers-bloom-inference ``` 本文我们准备使用 `bloom-inference-scripts/` 文件夹下的 3 个脚本。 下面我们按框架的字母序逐一展示相关方案。 ## HuggingFace Accelerate [Accelerate](https://github.com/huggingface/accelerate) Accelerate 按如下步骤进行大模型推理: 1. 用空的权重实例化模型。 2. 分析每层的大小以及每个设备 (CPU, CPU) 的可用空间,并决定每层应该在哪个设备上推理。 3. 逐比特加载模型 checkpoint 并把权重加载到相应的设备。 然后,它会使用钩子代码 (hook) 来确保模型正确运行,钩子代码被用于在正确的设备间传输输入和输出,并在前向轮运行前加载那些卸载到 CPU (甚至硬盘) 上的权重到 GPU,然后在前向轮结束后再次把权重卸载。 在有多个 GPU 且有足够空间放下整个模型的情形下,该方案在 GPU 间逐个切换直至所有层运行完毕。每个给定的时间只有一个 GPU 工作,这听起来很没效率。但尽管该方案 GPU 存在空闲,它的吞吐却相当不错。 因为相同的代码可以运行在任意给定的设置中,所以本方案非常灵活。Accelerate 首先使用所有可用的 GPU,当显存已满时会卸载到 CPU 内存直至卸载到硬盘。卸载到 CPU 或硬盘会让推理变慢。举个例子,与 8x80 A100 上的 10 毫秒相比,已有用户报告,不作任何代码改动,在 2 个 A100 上运行 BLOOM 吞吐是每词 15 秒。 你可以你从 [Accelerate 文档](https://huggingface.co/docs/accelerate/big_modeling) 中获取本方案的更多信息。 ### 设置 ``` pip install transformers>=4.21.3 accelerate>=0.12.0 ``` ### 运行 简单执行如下命令: ``` python bloom-inference-scripts/bloom-accelerate-inference.py --name bigscience/bloom --batch_size 1 --benchmark ``` 如需使用 [BitsAndBytes](https://github.com/TimDettmers/bitsandbytes) 的 8 比特量化方案,首先要安装 `bitsandbytes`: ``` pip install bitsandbytes ``` 然后在前述命令行中增加 `--dtype int8`: ``` python bloom-inference-scripts/bloom-accelerate-inference.py --name bigscience/bloom --dtype int8 --batch_size 1 --benchmark ``` 如果你有 4 个以上 GPU,你可以通过如下命令限制脚本只使用其中 4 个 GPU: ``` CUDA_VISIBLE_DEVICES=0,1,2,3 python bloom-inference-scripts/bloom-accelerate-inference.py --name bigscience/bloom --dtype int8 --batch_size 1 --benchmark ``` 在这个例子中,不 OOM 的最大 batch size 是 40。如果你深入研究脚本,你会看到我们需要调整显存分配映射从而把第一个 GPU 解放出来去仅处理激活和先前词的缓存。 ## DeepSpeed-Inference [DeepSpeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/) 使用张量并行 (Tensor Parallelism) 以及高效的融合 CUDA 核函数在 128 这个大 batch size 下达到了每词 1 毫秒的超快推理性能。 ### 设置 ``` pip install deepspeed>=0.7.3 ``` ### 运行 1. 最快的方法是使用 TP 预分片 (TP = Tensor Parallel) 的 checkpoint,与非预分片的 bloom checkpoint 相比,它仅需大约 1 分钟即可加载: ``` deepspeed --num_gpus 8 bloom-inference-scripts/bloom-ds-inference.py --name microsoft/bloom-deepspeed-inference-fp16 ``` 1a. 如果你想要运行原始 bloom checkpoint,这个 checkpoint 一旦加载就会跟之前的方案跑到相同的吞吐,但加载需要花 10 到 20 分钟: ``` deepspeed --num_gpus 8 bloom-inference-scripts/bloom-ds-inference.py --name bigscience/bloom ``` 2a. 8 比特量化版本与一般的半精度版本相比仅需一半 GPU 显存: ``` deepspeed --num_gpus 8 bloom-inference-scripts/bloom-ds-inference.py --name microsoft/bloom-deepspeed-inference-int8 --dtype int8 ``` 这里我们使用 `microsoft/bloom-deepspeed-inference-int8` checkpoint 并告诉脚本跑在 `int8` 模式。 当然,现在仅需 4x80GB A100 GPU 就够了: ``` deepspeed --num_gpus 4 bloom-inference-scripts/bloom-ds-inference.py --name microsoft/bloom-deepspeed-inference-int8 --dtype int8 ``` 这种情况下,不 OOM 的最大 batch size 是 128。 可以看到,本方案中有两个因素在获得更好的性能上起到了主导作用。 1. 本方案的吞吐提高主要来自于张量并行 (Tensor Parallelism,TP) 而不是 Acclerate 的管线并行 (Pipeline Parallelism,PP)。因为 Accelerate 旨在成为非常通用的方案,因此也非常不幸地很难最大化 GPU 使用率。它首先在 GPU 0 上完成所有计算,然后是 GPU 1,等等,一直到 GPU 8,这意味着任何时刻都有 7 个 GPU 是空闲的。而另一方面,DeepSpeed-Inference 使用了 TP,意味着它会向所有 GPU 发送张量,在每个 GPU 上计算部分生成结果,然后在所有的 GPU 间通信计算结果,并继续做下一层。这就是说 TP 所有的 GPU 都同时是活跃的,但它需要比 PP 多得多的通信。 2. DeepSpeed-Inference 还使用了定制的 CUDA 核函数以避免分配太多内存以及太多进出 GPU 的张量拷贝。这么做会减少显存需求及核函数启动次数从而提高吞吐,另外还可以支持更大的 batch size 从而进一步增加总吞吐。 如果你对更多的例子感兴趣,可以看看t [Accelerate GPT-J inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/gptj-deepspeed-inference) 或 [Accelerate BERT inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/bert-deepspeed-inference)。 ## Deepspeed ZeRO-Inference [Deepspeed ZeRO](https://www.deepspeed.ai/tutorials/zero/) 使用一个魔术般的分片方法,使得它可以输入几乎任何模型并将它扩展到少至几个多至上百个 GPU,进行训练或推理。 ### 设置 ``` pip install deepspeed ``` ### 运行 注意到现在为止的脚本都是所有 GPU 都处理相同的输入,但你其实可以在每个 GPU 上运行不同的流,从而得到 `n_gpu` 倍的吞吐。你不能用 Deepspeed-Inference 达到这个目的。 ``` deepspeed --num_gpus 8 bloom-inference-scripts/bloom-ds-zero-inference.py --name bigscience/bloom --batch_size 1 --benchmark ``` 请记住用户可以用 ZeRO 同时创建多个不同的流,因此总性能应该是每秒每词的吞吐除以参与计算的 GPU 的数目,因此根据你是使用 16 个 GPU 还是 8 个 GPU,可以获得 8 倍或者 16 倍的更快性能。 你还可以在一个小型 GPU 上试试卸载方案,运行的时间会很长,但是如果你没有 8 个巨型 GPU 的话这也是一个聊甚于无的方案。 CPU 卸载 (1x GPUs): ``` deepspeed --num_gpus 1 bloom-inference-scripts/bloom-ds-zero-inference.py --name bigscience/bloom --batch_size 8 --cpu_offload --benchmark ``` NVMe 卸载 (1x GPUs): ``` deepspeed --num_gpus 1 bloom-inference-scripts/bloom-ds-zero-inference.py --name bigscience/bloom --batch_size 8 --nvme_offload_path=/path/to/nvme_offload --benchmark ``` 请确保在你的高速 NVMe 盘上预留约 400GB 的空间,并把 `/path/to/nvme_offload` 设成它。 ## 更多客户端及服务器方案 你可以从 [transformers-bloom-inference](https://github.com/huggingface/transformers-bloom-inference) 找到更多非常高效的方案,包括服务器方案。 这里我们提供一些预览。 服务器方案: * [Mayank Mishra](https://github.com/mayank31398) 拿着本博文中讨论的所有演示代码并把它们变成了一个网络服务包,你可以从 [这里](https://github.com/huggingface/transformers-bloom-inference) 下载。 * [Nicolas Patry](https://github.com/Narsil) 开发了一个超高效的 [基于 Rust 的网络服务方案]((https://github.com/Narsil/bloomserver)。 更多的客户端方案: * [Thomas Wang](https://github.com/thomasw21) 正在开发一个很快的 [定制 CUDA 核函数的 BLOOM 模型](https://github.com/huggingface/transformers_bloom_parallel)。 * HuggingFace 的 JAX 组已开发了一个 [基于 JAX 的方案](https://github.com/huggingface/bloom-jax-inference)。 因为如果你在本博文发布几个月后读到它,很有可能它已经不能反映最新的状态了,你可以去 [transformers-bloom-inference 的 GitHub 仓库](https://github.com/huggingface/transformers-bloom-inference) 找到最新的方案。 ## 致谢 万分感谢如下这些人,他们提出了好的问题并帮助提高了本文的可读性:Olatunji Ruwase 和 Philipp Schmid。
9