text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
""" Linear layer (alternate definition) """ import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. """ def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias)
pytorch-image-models/timm/layers/linear.py/0
{ "file_path": "pytorch-image-models/timm/layers/linear.py", "repo_id": "pytorch-image-models", "token_count": 282 }
186
""" Depthwise Separable Conv Modules Basic DWS convs. Other variations of DWS exist with batch norm or activations between the DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): """ Separable Conv w/ trailing Norm and Activation """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): """ Separable Conv """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x
pytorch-image-models/timm/layers/separable_conv.py/0
{ "file_path": "pytorch-image-models/timm/layers/separable_conv.py", "repo_id": "pytorch-image-models", "token_count": 1138 }
187
import dataclasses import logging import os from copy import deepcopy from typing import Optional, Dict, Callable, Any, Tuple from torch import nn as nn from torch.hub import load_state_dict_from_url from timm.models._features import FeatureListNet, FeatureHookNet from timm.models._features_fx import FeatureGraphNet from timm.models._helpers import load_state_dict from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf from timm.models._manipulate import adapt_input_conv from timm.models._pretrained import PretrainedCfg from timm.models._prune import adapt_model_from_file from timm.models._registry import get_pretrained_cfg _logger = logging.getLogger(__name__) # Global variables for rarely used pretrained checkpoint download progress and hash check. # Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle. _DOWNLOAD_PROGRESS = False _CHECK_HASH = False _USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0 __all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained', 'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg'] def _resolve_pretrained_source(pretrained_cfg): cfg_source = pretrained_cfg.get('source', '') pretrained_url = pretrained_cfg.get('url', None) pretrained_file = pretrained_cfg.get('file', None) pretrained_sd = pretrained_cfg.get('state_dict', None) hf_hub_id = pretrained_cfg.get('hf_hub_id', None) # resolve where to load pretrained weights from load_from = '' pretrained_loc = '' if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): # hf-hub specified as source via model identifier load_from = 'hf-hub' assert hf_hub_id pretrained_loc = hf_hub_id else: # default source == timm or unspecified if pretrained_sd: # direct state_dict pass through is the highest priority load_from = 'state_dict' pretrained_loc = pretrained_sd assert isinstance(pretrained_loc, dict) elif pretrained_file: # file load override is the second-highest priority if set load_from = 'file' pretrained_loc = pretrained_file else: old_cache_valid = False if _USE_OLD_CACHE: # prioritized old cached weights if exists and env var enabled old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True): # hf-hub available as alternate weight source in default_cfg load_from = 'hf-hub' pretrained_loc = hf_hub_id elif pretrained_url: load_from = 'url' pretrained_loc = pretrained_url if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None): # if a filename override is set, return tuple for location w/ (hub_id, filename) pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename'] return load_from, pretrained_loc def set_pretrained_download_progress(enable=True): """ Set download progress for pretrained weights on/off (globally). """ global _DOWNLOAD_PROGRESS _DOWNLOAD_PROGRESS = enable def set_pretrained_check_hash(enable=True): """ Set hash checking for pretrained weights on/off (globally). """ global _CHECK_HASH _CHECK_HASH = enable def load_custom_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict] = None, load_fn: Optional[Callable] = None, ): r"""Loads a custom (read non .pth) weight file Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls a passed in custom load fun, or the `load_pretrained` model member fn. If the object is already present in `model_dir`, it's deserialized and returned. The default value of `model_dir` is ``<hub_dir>/checkpoints`` where `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. Args: model: The instantiated model to load weights into pretrained_cfg (dict): Default pretrained model cfg load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named 'laod_pretrained' on the model will be called if it exists """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: _logger.warning("Invalid pretrained config, cannot load weights.") return load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if not load_from: _logger.warning("No pretrained weights exist for this model. Using random initialization.") return if load_from == 'hf-hub': _logger.warning("Hugging Face hub not currently supported for custom load pretrained models.") elif load_from == 'url': pretrained_loc = download_cached_file( pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS, ) if load_fn is not None: load_fn(model, pretrained_loc) elif hasattr(model, 'load_pretrained'): model.load_pretrained(pretrained_loc) else: _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") def load_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict] = None, num_classes: int = 1000, in_chans: int = 3, filter_fn: Optional[Callable] = None, strict: bool = True, ): """ Load pretrained checkpoint Args: model (nn.Module) : PyTorch model module pretrained_cfg (Optional[Dict]): configuration for pretrained weights / target dataset num_classes (int): num_classes for target model in_chans (int): in_chans for target model filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) strict (bool): strict load of checkpoint """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.") load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if load_from == 'state_dict': _logger.info(f'Loading pretrained weights from state dict') state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override elif load_from == 'file': _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict(pretrained_loc) elif load_from == 'url': _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): pretrained_loc = download_cached_file( pretrained_loc, progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, ) model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict_from_url( pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, ) elif load_from == 'hf-hub': _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') if isinstance(pretrained_loc, (list, tuple)): state_dict = load_state_dict_from_hf(*pretrained_loc) else: state_dict = load_state_dict_from_hf(pretrained_loc) else: model_name = pretrained_cfg.get('architecture', 'this model') raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.") if filter_fn is not None: try: state_dict = filter_fn(state_dict, model) except TypeError as e: # for backwards compat with filter fn that take one arg state_dict = filter_fn(state_dict) input_convs = pretrained_cfg.get('first_conv', None) if input_convs is not None and in_chans != 3: if isinstance(input_convs, str): input_convs = (input_convs,) for input_conv_name in input_convs: weight_name = input_conv_name + '.weight' try: state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) _logger.info( f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') except NotImplementedError as e: del state_dict[weight_name] strict = False _logger.warning( f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') classifiers = pretrained_cfg.get('classifier', None) label_offset = pretrained_cfg.get('label_offset', 0) if classifiers is not None: if isinstance(classifiers, str): classifiers = (classifiers,) if num_classes != pretrained_cfg['num_classes']: for classifier_name in classifiers: # completely discard fully connected if model num_classes doesn't match pretrained weights state_dict.pop(classifier_name + '.weight', None) state_dict.pop(classifier_name + '.bias', None) strict = False elif label_offset > 0: for classifier_name in classifiers: # special case for pretrained weights with an extra background class in pretrained weights classifier_weight = state_dict[classifier_name + '.weight'] state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] classifier_bias = state_dict[classifier_name + '.bias'] state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] load_result = model.load_state_dict(state_dict, strict=strict) if load_result.missing_keys: _logger.info( f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.' f' This is expected if model is being adapted.') if load_result.unexpected_keys: _logger.warning( f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.' f' This may be expected if model is being adapted.') def pretrained_cfg_for_features(pretrained_cfg): pretrained_cfg = deepcopy(pretrained_cfg) # remove default pretrained cfg fields that don't have much relevance for feature backbone to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size? for tr in to_remove: pretrained_cfg.pop(tr, None) return pretrained_cfg def _filter_kwargs(kwargs, names): if not kwargs or not names: return for n in names: kwargs.pop(n, None) def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter): """ Update the default_cfg and kwargs before passing to model Args: pretrained_cfg: input pretrained cfg (updated in-place) kwargs: keyword args passed to model build fn (updated in-place) kwargs_filter: keyword arg keys that must be removed before model __init__ """ # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') if pretrained_cfg.get('fixed_input_size', False): # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size default_kwarg_names += ('img_size',) for n in default_kwarg_names: # for legacy reasons, model __init__args uses img_size + in_chans as separate args while # pretrained_cfg has one input_size=(C, H ,W) entry if n == 'img_size': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[-2:]) elif n == 'in_chans': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[0]) elif n == 'num_classes': default_val = pretrained_cfg.get(n, None) # if default is < 0, don't pass through to model if default_val is not None and default_val >= 0: kwargs.setdefault(n, pretrained_cfg[n]) else: default_val = pretrained_cfg.get(n, None) if default_val is not None: kwargs.setdefault(n, pretrained_cfg[n]) # Filter keyword args for task specific model variants (some 'features only' models, etc.) _filter_kwargs(kwargs, names=kwargs_filter) def resolve_pretrained_cfg( variant: str, pretrained_cfg=None, pretrained_cfg_overlay=None, ) -> PretrainedCfg: model_with_tag = variant pretrained_tag = None if pretrained_cfg: if isinstance(pretrained_cfg, dict): # pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg pretrained_cfg = PretrainedCfg(**pretrained_cfg) elif isinstance(pretrained_cfg, str): pretrained_tag = pretrained_cfg pretrained_cfg = None # fallback to looking up pretrained cfg in model registry by variant identifier if not pretrained_cfg: if pretrained_tag: model_with_tag = '.'.join([variant, pretrained_tag]) pretrained_cfg = get_pretrained_cfg(model_with_tag) if not pretrained_cfg: _logger.warning( f"No pretrained configuration specified for {model_with_tag} model. Using a default." f" Please add a config to the model pretrained_cfg registry or pass explicitly.") pretrained_cfg = PretrainedCfg() # instance with defaults pretrained_cfg_overlay = pretrained_cfg_overlay or {} if not pretrained_cfg.architecture: pretrained_cfg_overlay.setdefault('architecture', variant) pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay) return pretrained_cfg def build_model_with_cfg( model_cls: Callable, variant: str, pretrained: bool, pretrained_cfg: Optional[Dict] = None, pretrained_cfg_overlay: Optional[Dict] = None, model_cfg: Optional[Any] = None, feature_cfg: Optional[Dict] = None, pretrained_strict: bool = True, pretrained_filter_fn: Optional[Callable] = None, kwargs_filter: Optional[Tuple[str]] = None, **kwargs, ): """ Build model with specified default_cfg and optional model_cfg This helper fn aids in the construction of a model including: * handling default_cfg and associated pretrained weight loading * passing through optional model_cfg for models with config based arch spec * features_only model adaptation * pruning config / model adaptation Args: model_cls (nn.Module): model class variant (str): model variant name pretrained (bool): load pretrained weights pretrained_cfg (dict): model's pretrained weight/task config model_cfg (Optional[Dict]): model's architecture config feature_cfg (Optional[Dict]: feature extraction adapter config pretrained_strict (bool): load pretrained weights strictly pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model **kwargs: model args passed through to model __init__ """ pruned = kwargs.pop('pruned', False) features = False feature_cfg = feature_cfg or {} # resolve and update model pretrained config and model kwargs pretrained_cfg = resolve_pretrained_cfg( variant, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay ) # FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model pretrained_cfg = pretrained_cfg.to_dict() _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) # Setup for feature extraction wrapper done at end of this fn if kwargs.pop('features_only', False): features = True feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) if 'out_indices' in kwargs: feature_cfg['out_indices'] = kwargs.pop('out_indices') # Instantiate the model if model_cfg is None: model = model_cls(**kwargs) else: model = model_cls(cfg=model_cfg, **kwargs) model.pretrained_cfg = pretrained_cfg model.default_cfg = model.pretrained_cfg # alias for backwards compat if pruned: model = adapt_model_from_file(model, variant) # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) if pretrained: load_pretrained( model, pretrained_cfg=pretrained_cfg, num_classes=num_classes_pretrained, in_chans=kwargs.get('in_chans', 3), filter_fn=pretrained_filter_fn, strict=pretrained_strict, ) # Wrap the model in a feature extraction module if enabled if features: feature_cls = FeatureListNet output_fmt = getattr(model, 'output_fmt', None) if output_fmt is not None: feature_cfg.setdefault('output_fmt', output_fmt) if 'feature_cls' in feature_cfg: feature_cls = feature_cfg.pop('feature_cls') if isinstance(feature_cls, str): feature_cls = feature_cls.lower() if 'hook' in feature_cls: feature_cls = FeatureHookNet elif feature_cls == 'fx': feature_cls = FeatureGraphNet else: assert False, f'Unknown feature class {feature_cls}' model = feature_cls(model, **feature_cfg) model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg) return model
pytorch-image-models/timm/models/_builder.py/0
{ "file_path": "pytorch-image-models/timm/models/_builder.py", "repo_id": "pytorch-image-models", "token_count": 7677 }
188
""" Model Registry Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch import re import sys import warnings from collections import defaultdict, deque from copy import deepcopy from dataclasses import replace from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple from ._pretrained import PretrainedCfg, DefaultCfg __all__ = [ 'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs', 'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', 'get_pretrained_cfg_value', 'is_model_pretrained' ] _module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module _model_to_module: Dict[str, str] = {} # mapping of model names to module names _model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns _model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present _model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects _model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs _model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names _module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict) _deprecated_models: Dict[str, Optional[str]] = {} def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]: model_name, *tag_list = model_name.split('.', 1) tag = tag_list[0] if tag_list else no_tag return model_name, tag def get_arch_name(model_name: str) -> str: return split_model_name_tag(model_name)[0] def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]): out = defaultdict(DefaultCfg) default_set = set() # no tag and tags ending with * are prioritized as default for k, v in cfgs.items(): if isinstance(v, dict): v = PretrainedCfg(**v) has_weights = v.has_weights model, tag = split_model_name_tag(k) is_default_set = model in default_set priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set) tag = tag.strip('*') default_cfg = out[model] if priority: default_cfg.tags.appendleft(tag) default_set.add(model) elif has_weights and not default_cfg.is_pretrained: default_cfg.tags.appendleft(tag) else: default_cfg.tags.append(tag) if has_weights: default_cfg.is_pretrained = True default_cfg.cfgs[tag] = v return out def register_model(fn: Callable[..., Any]) -> Callable[..., Any]: # lookup containing module mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' # add model to __all__ in module model_name = fn.__name__ if hasattr(mod, '__all__'): mod.__all__.append(model_name) else: mod.__all__ = [model_name] # type: ignore # add entries to registry dict/sets if model_name in _model_entrypoints: warnings.warn( f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being ' 'registered conflicts with an existing name. Please check if this is not expected.', stacklevel=2, ) _model_entrypoints[model_name] = fn _model_to_module[model_name] = module_name _module_to_models[module_name].add(model_name) if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: # this will catch all models that have entrypoint matching cfg key, but miss any aliasing # entrypoints or non-matching combos default_cfg = mod.default_cfgs[model_name] if not isinstance(default_cfg, DefaultCfg): # new style default cfg dataclass w/ multiple entries per model-arch assert isinstance(default_cfg, dict) # old style cfg dict per model-arch pretrained_cfg = PretrainedCfg(**default_cfg) default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg}) for tag_idx, tag in enumerate(default_cfg.tags): is_default = tag_idx == 0 pretrained_cfg = default_cfg.cfgs[tag] model_name_tag = '.'.join([model_name, tag]) if tag else model_name replace_items = dict(architecture=model_name, tag=tag if tag else None) if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/': # auto-complete hub name w/ architecture.tag replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag pretrained_cfg = replace(pretrained_cfg, **replace_items) if is_default: _model_pretrained_cfgs[model_name] = pretrained_cfg if pretrained_cfg.has_weights: # add tagless entry if it's default and has weights _model_has_pretrained.add(model_name) if tag: _model_pretrained_cfgs[model_name_tag] = pretrained_cfg if pretrained_cfg.has_weights: # add model w/ tag if tag is valid _model_has_pretrained.add(model_name_tag) _model_with_tags[model_name].append(model_name_tag) else: _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances) _model_default_cfgs[model_name] = default_cfg return fn def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''): def _fn(pretrained=False, **kwargs): assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.' current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__ warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2) pretrained_cfg = kwargs.pop('pretrained_cfg', None) return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs) return _fn def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]): mod = sys.modules[module_name] module_name_split = module_name.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' for deprecated, current in deprecation_map.items(): if hasattr(mod, '__all__'): mod.__all__.append(deprecated) current_fn = None current_tag = '' if current: current_name, current_tag = split_model_name_tag(current) current_fn = getattr(mod, current_name) deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag) setattr(mod, deprecated, deprecated_entrypoint_fn) _model_entrypoints[deprecated] = deprecated_entrypoint_fn _model_to_module[deprecated] = module_name _module_to_models[module_name].add(deprecated) _deprecated_models[deprecated] = current _module_to_deprecated_models[module_name][deprecated] = current def _natural_key(string_: str) -> List[Union[int, str]]: """See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _expand_filter(filter: str): """ expand a 'base_filter' to 'base_filter.*' if no tag portion""" filter_base, filter_tag = split_model_name_tag(filter) if not filter_tag: return ['.'.join([filter_base, '*']), filter] else: return [filter] def list_models( filter: Union[str, List[str]] = '', module: str = '', pretrained: bool = False, exclude_filters: Union[str, List[str]] = '', name_matches_cfg: bool = False, include_tags: Optional[bool] = None, ) -> List[str]: """ Return list of available model names, sorted alphabetically Args: filter - Wildcard filter string that works with fnmatch module - Limit model selection to a specific submodule (ie 'vision_transformer') pretrained - Include only models with valid pretrained weights if True exclude_filters - Wildcard filters to exclude models after including them with filter name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases) include_tags - Include pretrained tags in model names (model.tag). If None, defaults set to True when pretrained=True else False (default: None) Returns: models - The sorted list of models Example: model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module """ if filter: include_filters = filter if isinstance(filter, (tuple, list)) else [filter] else: include_filters = [] if include_tags is None: # FIXME should this be default behaviour? or default to include_tags=True? include_tags = pretrained all_models: Set[str] = _module_to_models[module] if module else set(_model_entrypoints.keys()) all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings if include_tags: # expand model names to include names w/ pretrained tags models_with_tags: Set[str] = set() for m in all_models: models_with_tags.update(_model_with_tags[m]) all_models = models_with_tags # expand include and exclude filters to include a '.*' for proper match if no tags in filter include_filters = [ef for f in include_filters for ef in _expand_filter(f)] exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)] if include_filters: models: Set[str] = set() for f in include_filters: include_models = fnmatch.filter(all_models, f) # include these models if len(include_models): models = models.union(include_models) else: models = all_models if exclude_filters: if not isinstance(exclude_filters, (tuple, list)): exclude_filters = [exclude_filters] for xf in exclude_filters: exclude_models = fnmatch.filter(models, xf) # exclude these models if len(exclude_models): models = models.difference(exclude_models) if pretrained: models = _model_has_pretrained.intersection(models) if name_matches_cfg: models = set(_model_pretrained_cfgs).intersection(models) return sorted(models, key=_natural_key) def list_pretrained( filter: Union[str, List[str]] = '', exclude_filters: str = '', ) -> List[str]: return list_models( filter=filter, pretrained=True, exclude_filters=exclude_filters, include_tags=True, ) def get_deprecated_models(module: str = '') -> Dict[str, str]: all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models return deepcopy(all_deprecated) def is_model(model_name: str) -> bool: """ Check if a model name exists """ arch_name = get_arch_name(model_name) return arch_name in _model_entrypoints def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]: """Fetch a model entrypoint for specified model name """ arch_name = get_arch_name(model_name) if module_filter and arch_name not in _module_to_models.get(module_filter, {}): raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.') return _model_entrypoints[arch_name] def list_modules() -> List[str]: """ Return list of module names that contain models / model entrypoints """ modules = _module_to_models.keys() return sorted(modules) def is_model_in_modules( model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]] ) -> bool: """Check if a model exists within a subset of modules Args: model_name - name of model to check module_names - names of modules to search in """ arch_name = get_arch_name(model_name) assert isinstance(module_names, (tuple, list, set)) return any(arch_name in _module_to_models[n] for n in module_names) def is_model_pretrained(model_name: str) -> bool: return model_name in _model_has_pretrained def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]: if model_name in _model_pretrained_cfgs: return deepcopy(_model_pretrained_cfgs[model_name]) arch_name, tag = split_model_name_tag(model_name) if arch_name in _model_default_cfgs: # if model arch exists, but the tag is wrong, error out raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.') if allow_unregistered: # if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created return None raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.') def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]: """ Get a specific model default_cfg value by key. None if key doesn't exist. """ cfg = get_pretrained_cfg(model_name, allow_unregistered=False) return getattr(cfg, cfg_key, None)
pytorch-image-models/timm/models/_registry.py/0
{ "file_path": "pytorch-image-models/timm/models/_registry.py", "repo_id": "pytorch-image-models", "token_count": 5428 }
189
""" EdgeNeXt Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` - https://arxiv.org/abs/2206.10589 Original code and weights from https://github.com/mmaaz60/EdgeNeXt Modifications and additions for timm by / Copyright 2022, Ross Wightman """ import math from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, SelectAdaptivePool2d, create_conv2d, \ use_fused_attn, NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim def forward(self, shape: Tuple[int, int, int]): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) y_embed = inv_mask.cumsum(1, dtype=torch.float32) x_embed = inv_mask.cumsum(2, dtype=torch.float32) eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos class ConvBlock(nn.Module): def __init__( self, dim, dim_out=None, kernel_size=7, stride=1, conv_bias=True, expand_ratio=4, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., ): super().__init__() dim_out = dim_out or dim self.shortcut_after_dw = stride > 1 or dim != dim_out self.conv_dw = create_conv2d( dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) self.norm = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.shortcut_after_dw: shortcut = x x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class CrossCovarianceAttn(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0. ): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v) x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class SplitTransposeBlock(nn.Module): def __init__( self, dim, num_scales=1, num_heads=8, expand_ratio=4, use_pos_emb=True, conv_bias=True, qkv_bias=True, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., attn_drop=0., proj_drop=0. ): super().__init__() width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) self.width = width self.num_scales = max(1, num_scales - 1) convs = [] for i in range(self.num_scales): convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) self.convs = nn.ModuleList(convs) self.pos_embd = None if use_pos_emb: self.pos_embd = PositionalEncodingFourier(dim=dim) self.norm_xca = norm_layer(dim) self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.xca = CrossCovarianceAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm = norm_layer(dim, eps=1e-6) self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # scales code re-written for torchscript as per my res2net fixes -rw # NOTE torch.split(x, self.width, 1) causing issues with ONNX export spx = x.chunk(len(self.convs) + 1, dim=1) spo = [] sp = spx[0] for i, conv in enumerate(self.convs): if i > 0: sp = sp + spx[i] sp = conv(sp) spo.append(sp) spo.append(spx[-1]) x = torch.cat(spo, 1) # XCA B, C, H, W = x.shape x = x.reshape(B, C, H * W).permute(0, 2, 1) if self.pos_embd is not None: pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) x = x.reshape(B, H, W, C) # Inverted Bottleneck x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class EdgeNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, stride=2, depth=2, num_global_blocks=1, num_heads=4, scales=2, kernel_size=7, expand_ratio=4, use_pos_emb=False, downsample_block=False, conv_bias=True, ls_init_value=1.0, drop_path_rates=None, norm_layer=LayerNorm2d, norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU ): super().__init__() self.grad_checkpointing = False if downsample_block or stride == 1: self.downsample = nn.Identity() else: self.downsample = nn.Sequential( norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) ) in_chs = out_chs stage_blocks = [] for i in range(depth): if i < depth - num_global_blocks: stage_blocks.append( ConvBlock( dim=in_chs, dim_out=out_chs, stride=stride if downsample_block and i == 0 else 1, conv_bias=conv_bias, kernel_size=kernel_size, expand_ratio=expand_ratio, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) else: stage_blocks.append( SplitTransposeBlock( dim=in_chs, num_scales=scales, num_heads=num_heads, expand_ratio=expand_ratio, use_pos_emb=use_pos_emb, conv_bias=conv_bias, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EdgeNeXt(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', dims=(24, 48, 88, 168), depths=(3, 3, 9, 3), global_block_counts=(0, 1, 1, 1), kernel_sizes=(3, 5, 7, 9), heads=(8, 8, 8, 8), d2_scales=(2, 2, 3, 4), use_pos_emb=(False, True, False, False), ls_init_value=1e-6, head_init_scale=1., expand_ratio=4, downsample_block=False, conv_bias=True, stem_type='patch', head_norm_first=False, act_layer=nn.GELU, drop_path_rate=0., drop_rate=0., ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate norm_layer = partial(LayerNorm2d, eps=1e-6) norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) self.feature_info = [] assert stem_type in ('patch', 'overlap') if stem_type == 'patch': self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), norm_layer(dims[0]), ) else: self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), norm_layer(dims[0]), ) curr_stride = 4 stages = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] in_chs = dims[0] for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 # FIXME support dilation / output_stride curr_stride *= stride stages.append(EdgeNeXtStage( in_chs=in_chs, out_chs=dims[i], stride=stride, depth=depths[i], num_global_blocks=global_block_counts[i], num_heads=heads[i], drop_path_rates=dp_rates[i], scales=d2_scales[i], expand_ratio=expand_ratio, kernel_size=kernel_sizes[i], use_pos_emb=use_pos_emb[i], ls_init_value=ls_init_value, downsample_block=downsample_block, conv_bias=conv_bias, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, act_layer=act_layer, )) # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 in_chs = dims[i] self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = dims[-1] if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_tf_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint # models were released as train checkpoints... :/ if 'model_ema' in state_dict: state_dict = state_dict['model_ema'] elif 'model' in state_dict: state_dict = state_dict['model'] elif 'state_dict' in state_dict: state_dict = state_dict['state_dict'] out_dict = {} import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_edgenext(variant, pretrained=False, **kwargs): model = build_model_with_cfg( EdgeNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'edgenext_xx_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_x_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_small.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.in21k_ft_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_small_rw.sw_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 320, 320), test_crop_pct=1.0, ), }) @register_model def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: # 1.33M & 260.58M @ 256 resolution # 71.23% Top-1 accuracy # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: # 2.34M & 538.0M @ 256 resolution # 75.00% Top-1 accuracy # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=31.61 versus 28.49 for MobileViT_XS # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: # 5.59M & 1260.59M @ 256 resolution # 79.43% Top-1 accuracy # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=20.47 versus 18.86 for MobileViT_S # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304)) return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: # 18.51M & 3840.93M @ 256 resolution # 82.5% (normal) 83.7% (USI) Top-1 accuracy # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=xx.xx versus xx.xx for MobileViT_S # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584]) return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict( depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), downsample_block=True, conv_bias=False, stem_type='overlap') return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/edgenext.py/0
{ "file_path": "pytorch-image-models/timm/models/edgenext.py", "repo_id": "pytorch-image-models", "token_count": 11040 }
190
""" PP-HGNet (V1 & V2) Reference: https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ImageNet1k/PP-HGNetV2.md The Paddle Implement of PP-HGNet (https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/docs/en/models/PP-HGNet_en.md) PP-HGNet: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet.py PP-HGNetv2: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet_v2.py """ import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, DropPath, create_conv2d from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['HighPerfGpuNet'] class LearnableAffineBlock(nn.Module): def __init__( self, scale_value=1.0, bias_value=0.0 ): super().__init__() self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True) self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True) def forward(self, x): return self.scale * x + self.bias class ConvBNAct(nn.Module): def __init__( self, in_chs, out_chs, kernel_size, stride=1, groups=1, padding='', use_act=True, use_lab=False ): super().__init__() self.use_act = use_act self.use_lab = use_lab self.conv = create_conv2d( in_chs, out_chs, kernel_size, stride=stride, padding=padding, groups=groups, ) self.bn = nn.BatchNorm2d(out_chs) if self.use_act: self.act = nn.ReLU() else: self.act = nn.Identity() if self.use_act and self.use_lab: self.lab = LearnableAffineBlock() else: self.lab = nn.Identity() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.lab(x) return x class LightConvBNAct(nn.Module): def __init__( self, in_chs, out_chs, kernel_size, groups=1, use_lab=False ): super().__init__() self.conv1 = ConvBNAct( in_chs, out_chs, kernel_size=1, use_act=False, use_lab=use_lab, ) self.conv2 = ConvBNAct( out_chs, out_chs, kernel_size=kernel_size, groups=out_chs, use_act=True, use_lab=use_lab, ) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class EseModule(nn.Module): def __init__(self, chs): super().__init__() self.conv = nn.Conv2d( chs, chs, kernel_size=1, stride=1, padding=0, ) self.sigmoid = nn.Sigmoid() def forward(self, x): identity = x x = x.mean((2, 3), keepdim=True) x = self.conv(x) x = self.sigmoid(x) return torch.mul(identity, x) class StemV1(nn.Module): # for PP-HGNet def __init__(self, stem_chs): super().__init__() self.stem = nn.Sequential(*[ ConvBNAct( stem_chs[i], stem_chs[i + 1], kernel_size=3, stride=2 if i == 0 else 1) for i in range( len(stem_chs) - 1) ]) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.stem(x) x = self.pool(x) return x class StemV2(nn.Module): # for PP-HGNetv2 def __init__(self, in_chs, mid_chs, out_chs, use_lab=False): super().__init__() self.stem1 = ConvBNAct( in_chs, mid_chs, kernel_size=3, stride=2, use_lab=use_lab, ) self.stem2a = ConvBNAct( mid_chs, mid_chs // 2, kernel_size=2, stride=1, use_lab=use_lab, ) self.stem2b = ConvBNAct( mid_chs // 2, mid_chs, kernel_size=2, stride=1, use_lab=use_lab, ) self.stem3 = ConvBNAct( mid_chs * 2, mid_chs, kernel_size=3, stride=2, use_lab=use_lab, ) self.stem4 = ConvBNAct( mid_chs, out_chs, kernel_size=1, stride=1, use_lab=use_lab, ) self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True) def forward(self, x): x = self.stem1(x) x = F.pad(x, (0, 1, 0, 1)) x2 = self.stem2a(x) x2 = F.pad(x2, (0, 1, 0, 1)) x2 = self.stem2b(x2) x1 = self.pool(x) x = torch.cat([x1, x2], dim=1) x = self.stem3(x) x = self.stem4(x) return x class HighPerfGpuBlock(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, layer_num, kernel_size=3, residual=False, light_block=False, use_lab=False, agg='ese', drop_path=0., ): super().__init__() self.residual = residual self.layers = nn.ModuleList() for i in range(layer_num): if light_block: self.layers.append( LightConvBNAct( in_chs if i == 0 else mid_chs, mid_chs, kernel_size=kernel_size, use_lab=use_lab, ) ) else: self.layers.append( ConvBNAct( in_chs if i == 0 else mid_chs, mid_chs, kernel_size=kernel_size, stride=1, use_lab=use_lab, ) ) # feature aggregation total_chs = in_chs + layer_num * mid_chs if agg == 'se': aggregation_squeeze_conv = ConvBNAct( total_chs, out_chs // 2, kernel_size=1, stride=1, use_lab=use_lab, ) aggregation_excitation_conv = ConvBNAct( out_chs // 2, out_chs, kernel_size=1, stride=1, use_lab=use_lab, ) self.aggregation = nn.Sequential( aggregation_squeeze_conv, aggregation_excitation_conv, ) else: aggregation_conv = ConvBNAct( total_chs, out_chs, kernel_size=1, stride=1, use_lab=use_lab, ) att = EseModule(out_chs) self.aggregation = nn.Sequential( aggregation_conv, att, ) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def forward(self, x): identity = x output = [x] for layer in self.layers: x = layer(x) output.append(x) x = torch.cat(output, dim=1) x = self.aggregation(x) if self.residual: x = self.drop_path(x) + identity return x class HighPerfGpuStage(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, block_num, layer_num, downsample=True, stride=2, light_block=False, kernel_size=3, use_lab=False, agg='ese', drop_path=0., ): super().__init__() self.downsample = downsample if downsample: self.downsample = ConvBNAct( in_chs, in_chs, kernel_size=3, stride=stride, groups=in_chs, use_act=False, use_lab=use_lab, ) else: self.downsample = nn.Identity() blocks_list = [] for i in range(block_num): blocks_list.append( HighPerfGpuBlock( in_chs if i == 0 else out_chs, mid_chs, out_chs, layer_num, residual=False if i == 0 else True, kernel_size=kernel_size, light_block=light_block, use_lab=use_lab, agg=agg, drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, ) ) self.blocks = nn.Sequential(*blocks_list) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class ClassifierHead(nn.Module): def __init__( self, num_features, num_classes, pool_type='avg', drop_rate=0., use_last_conv=True, class_expand=2048, use_lab=False ): super(ClassifierHead, self).__init__() self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=False, input_fmt='NCHW') if use_last_conv: last_conv = nn.Conv2d( num_features, class_expand, kernel_size=1, stride=1, padding=0, bias=False, ) act = nn.ReLU() if use_lab: lab = LearnableAffineBlock() self.last_conv = nn.Sequential(last_conv, act, lab) else: self.last_conv = nn.Sequential(last_conv, act) else: self.last_conv = nn.Indentity() if drop_rate > 0: self.dropout = nn.Dropout(drop_rate) else: self.dropout = nn.Identity() self.flatten = nn.Flatten() self.fc = nn.Linear(class_expand if use_last_conv else num_features, num_classes) def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.last_conv(x) x = self.dropout(x) x = self.flatten(x) if pre_logits: return x x = self.fc(x) return x class HighPerfGpuNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, global_pool='avg', use_last_conv=True, class_expand=2048, drop_rate=0., drop_path_rate=0., use_lab=False, **kwargs, ): super(HighPerfGpuNet, self).__init__() stem_type = cfg["stem_type"] stem_chs = cfg["stem_chs"] stages_cfg = [cfg["stage1"], cfg["stage2"], cfg["stage3"], cfg["stage4"]] self.num_classes = num_classes self.drop_rate = drop_rate self.use_last_conv = use_last_conv self.class_expand = class_expand self.use_lab = use_lab assert stem_type in ['v1', 'v2'] if stem_type == 'v2': self.stem = StemV2( in_chs=in_chans, mid_chs=stem_chs[0], out_chs=stem_chs[1], use_lab=use_lab) else: self.stem = StemV1([in_chans] + stem_chs) current_stride = 4 stages = [] self.feature_info = [] block_depths = [c[3] for c in stages_cfg] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(block_depths)).split(block_depths)] for i, stage_config in enumerate(stages_cfg): in_chs, mid_chs, out_chs, block_num, downsample, light_block, kernel_size, layer_num = stage_config stages += [HighPerfGpuStage( in_chs=in_chs, mid_chs=mid_chs, out_chs=out_chs, block_num=block_num, layer_num=layer_num, downsample=downsample, light_block=light_block, kernel_size=kernel_size, use_lab=use_lab, agg='ese' if stem_type == 'v1' else 'se', drop_path=dpr[i], )] self.num_features = out_chs if downsample: current_stride *= 2 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) if num_classes > 0: self.head = ClassifierHead( self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate, use_last_conv=use_last_conv, class_expand=class_expand, use_lab=use_lab ) else: if global_pool == 'avg': self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: self.head = nn.Identity() for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes if num_classes > 0: self.head = ClassifierHead( self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_last_conv=self.use_last_conv, class_expand=self.class_expand, use_lab=self.use_lab) else: if global_pool: self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: self.head = nn.Identity() def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x model_cfgs = dict( # PP-HGNet hgnet_tiny={ "stem_type": 'v1', "stem_chs": [48, 48, 96], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [96, 96, 224, 1, False, False, 3, 5], "stage2": [224, 128, 448, 1, True, False, 3, 5], "stage3": [448, 160, 512, 2, True, False, 3, 5], "stage4": [512, 192, 768, 1, True, False, 3, 5], }, hgnet_small={ "stem_type": 'v1', "stem_chs": [64, 64, 128], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [128, 128, 256, 1, False, False, 3, 6], "stage2": [256, 160, 512, 1, True, False, 3, 6], "stage3": [512, 192, 768, 2, True, False, 3, 6], "stage4": [768, 224, 1024, 1, True, False, 3, 6], }, hgnet_base={ "stem_type": 'v1', "stem_chs": [96, 96, 160], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [160, 192, 320, 1, False, False, 3, 7], "stage2": [320, 224, 640, 2, True, False, 3, 7], "stage3": [640, 256, 960, 3, True, False, 3, 7], "stage4": [960, 288, 1280, 2, True, False, 3, 7], }, # PP-HGNetv2 hgnetv2_b0={ "stem_type": 'v2', "stem_chs": [16, 16], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [16, 16, 64, 1, False, False, 3, 3], "stage2": [64, 32, 256, 1, True, False, 3, 3], "stage3": [256, 64, 512, 2, True, True, 5, 3], "stage4": [512, 128, 1024, 1, True, True, 5, 3], }, hgnetv2_b1={ "stem_type": 'v2', "stem_chs": [24, 32], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [32, 32, 64, 1, False, False, 3, 3], "stage2": [64, 48, 256, 1, True, False, 3, 3], "stage3": [256, 96, 512, 2, True, True, 5, 3], "stage4": [512, 192, 1024, 1, True, True, 5, 3], }, hgnetv2_b2={ "stem_type": 'v2', "stem_chs": [24, 32], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [32, 32, 96, 1, False, False, 3, 4], "stage2": [96, 64, 384, 1, True, False, 3, 4], "stage3": [384, 128, 768, 3, True, True, 5, 4], "stage4": [768, 256, 1536, 1, True, True, 5, 4], }, hgnetv2_b3={ "stem_type": 'v2', "stem_chs": [24, 32], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [32, 32, 128, 1, False, False, 3, 5], "stage2": [128, 64, 512, 1, True, False, 3, 5], "stage3": [512, 128, 1024, 3, True, True, 5, 5], "stage4": [1024, 256, 2048, 1, True, True, 5, 5], }, hgnetv2_b4={ "stem_type": 'v2', "stem_chs": [32, 48], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [48, 48, 128, 1, False, False, 3, 6], "stage2": [128, 96, 512, 1, True, False, 3, 6], "stage3": [512, 192, 1024, 3, True, True, 5, 6], "stage4": [1024, 384, 2048, 1, True, True, 5, 6], }, hgnetv2_b5={ "stem_type": 'v2', "stem_chs": [32, 64], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [64, 64, 128, 1, False, False, 3, 6], "stage2": [128, 128, 512, 2, True, False, 3, 6], "stage3": [512, 256, 1024, 5, True, True, 5, 6], "stage4": [1024, 512, 2048, 2, True, True, 5, 6], }, hgnetv2_b6={ "stem_type": 'v2', "stem_chs": [48, 96], # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num "stage1": [96, 96, 192, 2, False, False, 3, 6], "stage2": [192, 192, 512, 3, True, False, 3, 6], "stage3": [512, 384, 1024, 6, True, True, 5, 6], "stage4": [1024, 768, 2048, 3, True, True, 5, 6], }, ) def _create_hgnet(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) return build_model_with_cfg( HighPerfGpuNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.965, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', 'first_conv': 'stem.stem1.conv', 'test_crop_pct': 1.0, 'test_input_size': (3, 288, 288), **kwargs, } default_cfgs = generate_default_cfgs({ 'hgnet_tiny.paddle_in1k': _cfg( first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_tiny.ssld_in1k': _cfg( first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_small.paddle_in1k': _cfg( first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_small.ssld_in1k': _cfg( first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_base.ssld_in1k': _cfg( first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnetv2_b0.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b0.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b1.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b1.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b2.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b2.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b3.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b3.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b4.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b4.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b5.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b5.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b6.ssld_stage2_ft_in1k': _cfg( hf_hub_id='timm/'), 'hgnetv2_b6.ssld_stage1_in22k_in1k': _cfg( hf_hub_id='timm/'), }) @register_model def hgnet_tiny(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_tiny', pretrained=pretrained, **kwargs) @register_model def hgnet_small(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_small', pretrained=pretrained, **kwargs) @register_model def hgnet_base(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_base', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b0(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b0', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b1(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b1', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b2(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b2', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b3(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b3', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b4(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b4', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b5(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b5', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b6(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b6', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/hgnet.py/0
{ "file_path": "pytorch-image-models/timm/models/hgnet.py", "repo_id": "pytorch-image-models", "token_count": 13284 }
191
""" Nested Transformer (NesT) in PyTorch A PyTorch implement of Aggregating Nested Transformers as described in: 'Aggregating Nested Transformers' - https://arxiv.org/abs/2105.12723 The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights have been converted with convert/convert_nest_flax.py Acknowledgments: * The paper authors for sharing their research, code, and model weights * Ross Wightman's existing code off which I based this Copyright 2021 Alexander Soare """ import collections.abc import logging import math from functools import partial import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): """ This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with an extra "image block" dim """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v # (B, H, T, N, C'), permute -> (B, T, N, C', H) x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x # (B, T, N, C) class TransformerLayer(nn.Module): """ This is much like `.vision_transformer.Block` but: - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") - Uses modified Attention layer that handles the "block" dimension """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): """ x is expected to have shape (B, C, H, W) """ _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) # Layer norm done over channel dim only x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x # (B, C, H//2, W//2) def blockify(x, block_size: int): """image to blocks Args: x (Tensor): with shape (B, H, W, C) block_size (int): edge length of a single square block in units of H, W """ B, H, W, C = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x # (B, T, N, C) @register_notrace_function # reason: int receives Proxy def deblockify(x, block_size: int): """blocks to image Args: x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block block_size (int): edge length of a single square block in units of desired H, W """ B, T, _, C = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x # (B, H, W, C) class NestLevel(nn.Module): """ Single hierarchical level of a Nested Transformer """ def __init__( self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=[], norm_layer=None, act_layer=None, pad_type='', ): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() # Transformer encoder if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[ TransformerLayer( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): """ expects x as (B, C, H, W) """ x = self.pool(x) x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer x = blockify(x, self.block_size) # (B, T, N, C') x = x + self.pos_embed if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) # (B, T, N, C') x = deblockify(x, self.block_size) # (B, H', W', C') # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage return x.permute(0, 3, 1, 2) # (B, C, H', W') class Nest(nn.Module): """ Nested Transformer (NesT) A PyTorch impl of : `Aggregating Nested Transformers` - https://arxiv.org/abs/2105.12723 """ def __init__( self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg', ): """ Args: img_size (int, tuple): input image size in_chans (int): number of input channels patch_size (int): patch size num_levels (int): number of block hierarchies (T_d in the paper) embed_dims (int, tuple): embedding dimensions of each level num_heads (int, tuple): number of attention heads for each level depths (int, tuple): number of transformer layers for each level num_classes (int): number of classes for classification head mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer for transformer layers act_layer: (nn.Module): activation layer in MLP of transformer layers pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME weight_init: (str): weight init scheme global_pool: (str): type of pooling operation to apply to final feature map Notes: - Default values follow NesT-B from the original Jax code. - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. - For those following the paper, Table A1 may have errors! - https://github.com/google-research/nested-transformer/issues/2 """ super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size # Number of blocks at each level self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' # Block edge size in units of patches # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False, ) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] # Build up each hierarchical level levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel( self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type, )) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) # Final normalization layer self.norm = norm_layer(embed_dims[-1]) # Classifier global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. for level in self.levels: trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[ (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): """ NesT weight initialization Can replicate Jax implementation. Otherwise follows vision_transformer.py """ if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): """ Rescale the grid of position embeddings when loading from state_dict Expected shape of position embeddings is (1, T, N, C), and considers only square images """ _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] num_blocks_new, seq_length_new = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new*seq_length_new)) # First change to (1, C, H, W) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) # Now change to new (1, T, N, C) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): """ resize positional embeddings of pretrained weights """ pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg( Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), # (weights from official Google JAX impl, require 'SAME' padding) 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, { 'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx', })
pytorch-image-models/timm/models/nest.py/0
{ "file_path": "pytorch-image-models/timm/models/nest.py", "repo_id": "pytorch-image-models", "token_count": 10075 }
192
""" SEResNet implementation from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py Additional credit to https://github.com/creafz Original model: https://github.com/hujie-frank/SENet ResNet code gently borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. """ import math from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SENet'] def _weight_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = x.mean((2, 3), keepdim=True) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d( planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ ResNeXt bottleneck type C with a Squeeze-and-Excitation module. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes, reduction=reduction) self.downsample = downsample self.stride = stride def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SENet(nn.Module): def __init__( self, block, layers, groups, reduction, drop_rate=0.2, in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=1000, global_pool='avg'): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `last_linear` layer. - For all models: 1000 """ super(SENet, self).__init__() self.inplanes = inplanes self.num_classes = num_classes self.drop_rate = drop_rate if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ('conv1', nn.Conv2d( in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] self.num_features = 512 * block.expansion self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): _weight_init(m) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.layer0(x) x = self.pool0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_senet(variant, pretrained=False, **kwargs): return build_model_with_cfg(SENet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs } default_cfgs = generate_default_cfgs({ 'legacy_senet154.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), 'legacy_seresnet18.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', interpolation='bicubic'), 'legacy_seresnet34.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), 'legacy_seresnet50.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), 'legacy_seresnet101.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), 'legacy_seresnet152.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), 'legacy_seresnext26_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', interpolation='bicubic'), 'legacy_seresnext50_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), 'legacy_seresnext101_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), }) @register_model def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet18', pretrained, **model_args) @register_model def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet34', pretrained, **model_args) @register_model def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet50', pretrained, **model_args) @register_model def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet101', pretrained, **model_args) @register_model def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet152', pretrained, **model_args) @register_model def legacy_senet154(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) return _create_senet('legacy_senet154', pretrained, **model_args) @register_model def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) @register_model def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) @register_model def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
pytorch-image-models/timm/models/senet.py/0
{ "file_path": "pytorch-image-models/timm/models/senet.py", "repo_id": "pytorch-image-models", "token_count": 8344 }
193
""" Vision OutLOoker (VOLO) implementation Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112 Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below Modifications and additions for timm by / Copyright 2022, Ross Wightman """ # Copyright 2021 Sea Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_ from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['VOLO'] # model_registry will add each entrypoint fn to this class OutlookAttention(nn.Module): def __init__( self, dim, num_heads, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() head_dim = dim // num_heads self.num_heads = num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = head_dim ** -0.5 self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, x): B, H, W, C = x.shape v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) v = self.unfold(v).reshape( B, self.num_heads, C // self.num_heads, self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) attn = self.attn(attn).reshape( B, h * w, self.num_heads, self.kernel_size * self.kernel_size, self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk attn = attn * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) x = self.proj(x.permute(0, 2, 3, 1)) x = self.proj_drop(x) return x class Outlooker(nn.Module): def __init__( self, dim, kernel_size, padding, stride=1, num_heads=1, mlp_ratio=3., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, qkv_bias=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = OutlookAttention( dim, num_heads, kernel_size=kernel_size, padding=padding, stride=stride, qkv_bias=qkv_bias, attn_drop=attn_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, ) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, H, W, C = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, H, W, C) x = self.proj(x) x = self.proj_drop(x) return x class Transformer(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ClassAttention(nn.Module): def __init__( self, dim, num_heads=8, head_dim=None, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads if head_dim is not None: self.head_dim = head_dim else: head_dim = dim // num_heads self.head_dim = head_dim self.scale = head_dim ** -0.5 self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.head_dim * self.num_heads, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) attn = ((q * self.scale) @ k.transpose(-2, -1)) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) cls_embed = self.proj(cls_embed) cls_embed = self.proj_drop(cls_embed) return cls_embed class ClassBlock(nn.Module): def __init__( self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttention( dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, ) # NOTE: drop path for stochastic depth self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, ) def forward(self, x): cls_embed = x[:, :1] cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x))) cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed))) return torch.cat([cls_embed, x[:, 1:]], dim=1) def get_block(block_type, **kargs): if block_type == 'ca': return ClassBlock(**kargs) def rand_bbox(size, lam, scale=1): """ get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) return: bounding box """ W = size[1] // scale H = size[2] // scale cut_rat = np.sqrt(1. - lam) cut_w = (W * cut_rat).astype(int) cut_h = (H * cut_rat).astype(int) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 class PatchEmbed(nn.Module): """ Image to Patch Embedding. Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding """ def __init__( self, img_size=224, stem_conv=False, stem_stride=1, patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384, ): super().__init__() assert patch_size in [4, 8, 16] if stem_conv: self.conv = nn.Sequential( nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), ) else: self.conv = None self.proj = nn.Conv2d( hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) self.num_patches = (img_size // patch_size) * (img_size // patch_size) def forward(self, x): if self.conv is not None: x = self.conv(x) x = self.proj(x) # B, C, H, W return x class Downsample(nn.Module): """ Image to Patch Embedding, downsampling between stage1 and stage2 """ def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): super().__init__() self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.proj(x) # B, C, H, W x = x.permute(0, 2, 3, 1) return x def outlooker_blocks( block_fn, index, dim, layers, num_heads=1, kernel_size=3, padding=1, stride=2, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate outlooker layer in stage1 return: outlooker layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, kernel_size=kernel_size, padding=padding, stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks def transformer_blocks( block_fn, index, dim, layers, num_heads, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate transformer layers in stage2 return: transformer layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks class VOLO(nn.Module): """ Vision Outlooker, the main class of our model """ def __init__( self, layers, img_size=224, in_chans=3, num_classes=1000, global_pool='token', patch_size=8, stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=(True, False, False, False), outlook_attention=(True, False, False, False), mlp_ratio=3.0, qkv_bias=False, drop_rate=0., pos_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, post_layers=('ca', 'ca'), use_aux_head=True, use_mix_token=False, pooling_scale=2, ): super().__init__() num_layers = len(layers) mlp_ratio = to_ntuple(num_layers)(mlp_ratio) img_size = to_2tuple(img_size) self.num_classes = num_classes self.global_pool = global_pool self.mix_token = use_mix_token self.pooling_scale = pooling_scale self.num_features = embed_dims[-1] if use_mix_token: # enable token mixing, see token labeling for details. self.beta = 1.0 assert global_pool == 'token', "return all tokens if mix_token is enabled" self.grad_checkpointing = False self.patch_embed = PatchEmbed( stem_conv=True, stem_stride=2, patch_size=patch_size, in_chans=in_chans, hidden_dim=stem_hidden_dim, embed_dim=embed_dims[0], ) # inital positional encoding, we add positional encoding after outlooker blocks patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) # set the main block in network network = [] for i in range(len(layers)): if outlook_attention[i]: # stage 1 stage = outlooker_blocks( Outlooker, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) network.append(stage) else: # stage 2 stage = transformer_blocks( Transformer, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, drop_path_rate=drop_path_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) network.append(stage) if downsamples[i]: # downsampling between two stages network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) self.network = nn.ModuleList(network) # set post block, for example, class attention layers self.post_network = None if post_layers is not None: self.post_network = nn.ModuleList([ get_block( post_layers[i], dim=embed_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio[-1], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, drop_path=0., norm_layer=norm_layer) for i in range(len(post_layers)) ]) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) trunc_normal_(self.cls_token, std=.02) # set output type if use_aux_head: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: self.aux_head = None self.norm = norm_layer(self.num_features) # Classifier head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[ (r'^network\.(\d+)\.(\d+)', None), (r'^network\.(\d+)', (0,)), ], blocks2=[ (r'^cls_token', (0,)), (r'^post_network\.(\d+)', None), (r'^norm', (99999,)) ], ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() if self.aux_head is not None: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_tokens(self, x): for idx, block in enumerate(self.network): if idx == 2: # add positional encoding after outlooker blocks x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) B, H, W, C = x.shape x = x.reshape(B, -1, C) return x def forward_cls(self, x): B, N, C = x.shape cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat([cls_tokens, x], dim=1) for block in self.post_network: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) return x def forward_train(self, x): """ A separate forward fn for training with mix_token (if a train script supports). Combining multiple modes in as single forward with different return types is torchscript hell. """ x = self.patch_embed(x) x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # mix token, see token labeling for details. if self.mix_token and self.training: lam = np.random.beta(self.beta, self.beta) patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) temp_x = x.clone() sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1 sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2 temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] x = temp_x else: bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) if self.global_pool == 'avg': x_cls = x.mean(dim=1) elif self.global_pool == 'token': x_cls = x[:, 0] else: x_cls = x if self.aux_head is None: return x_cls x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling if not self.training: return x_cls + 0.5 * x_aux.max(1)[0] if self.mix_token and self.training: # reverse "mix token", see token labeling for details. x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) temp_x = x_aux.clone() temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] x_aux = temp_x x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) def forward_features(self, x): x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': out = x.mean(dim=1) elif self.global_pool == 'token': out = x[:, 0] else: out = x x = self.head_drop(x) if pre_logits: return out out = self.head(out) if self.aux_head is not None: # generate classes in all feature tokens, see token labeling aux = self.aux_head(x[:, 1:]) out = out + 0.5 * aux.max(1)[0] return out def forward(self, x): """ simplified forward (without mix token training) """ x = self.forward_features(x) x = self.forward_head(x) return x def _create_volo(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg( VOLO, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), **kwargs } default_cfgs = generate_default_cfgs({ 'volo_d1_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', crop_pct=0.96), 'volo_d1_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d2_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', crop_pct=0.96), 'volo_d2_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d3_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', crop_pct=0.96), 'volo_d3_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', crop_pct=1.0, input_size=(3, 448, 448)), 'volo_d4_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', crop_pct=0.96), 'volo_d4_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', crop_pct=0.96), 'volo_d5_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_512.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', crop_pct=1.15, input_size=(3, 512, 512)), }) @register_model def volo_d1_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) return model @register_model def volo_d1_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) return model @register_model def volo_d2_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) return model @register_model def volo_d2_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) return model @register_model def volo_d3_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) return model @register_model def volo_d3_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) return model @register_model def volo_d4_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) return model @register_model def volo_d4_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) return model @register_model def volo_d5_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_512(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) return model
pytorch-image-models/timm/models/volo.py/0
{ "file_path": "pytorch-image-models/timm/models/volo.py", "repo_id": "pytorch-image-models", "token_count": 15802 }
194
""" PyTorch MADGRAD optimizer MADGRAD: https://arxiv.org/abs/2101.11075 Code from: https://github.com/facebookresearch/madgrad """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import TYPE_CHECKING, Any, Callable, Optional import torch import torch.optim if TYPE_CHECKING: from torch.optim.optimizer import _params_t else: _params_t = Any class MADGRAD(torch.optim.Optimizer): """ MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic Optimization. .. _MADGRAD: https://arxiv.org/abs/2101.11075 MADGRAD is a general purpose optimizer that can be used in place of SGD or Adam may converge faster and generalize better. Currently GPU-only. Typically, the same learning rate schedule that is used for SGD or Adam may be used. The overall learning rate is not comparable to either method and should be determined by a hyper-parameter sweep. MADGRAD requires less weight decay than other methods, often as little as zero. Momentum values used for SGD or Adam's beta1 should work here also. On sparse problems both weight_decay and momentum should be set to 0. Arguments: params (iterable): Iterable of parameters to optimize or dicts defining parameter groups. lr (float): Learning rate (default: 1e-2). momentum (float): Momentum value in the range [0,1) (default: 0.9). weight_decay (float): Weight decay, i.e. a L2 penalty (default: 0). eps (float): Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). """ def __init__( self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6, decoupled_decay: bool = False, ): if momentum < 0 or momentum >= 1: raise ValueError(f"Momentum {momentum} must be in the range [0,1]") if lr <= 0: raise ValueError(f"Learning rate {lr} must be positive") if weight_decay < 0: raise ValueError(f"Weight decay {weight_decay} must be non-negative") if eps < 0: raise ValueError(f"Eps must be non-negative") defaults = dict( lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) super().__init__(params, defaults) @property def supports_memory_efficient_fp16(self) -> bool: return False @property def supports_flat_params(self) -> bool: return True @torch.no_grad() def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: eps = group['eps'] lr = group['lr'] + eps weight_decay = group['weight_decay'] momentum = group['momentum'] ck = 1 - momentum for p in group["params"]: if p.grad is None: continue grad = p.grad if momentum != 0.0 and grad.is_sparse: raise RuntimeError("momentum != 0 is not compatible with sparse gradients") state = self.state[p] if len(state) == 0: state['step'] = 0 state['grad_sum_sq'] = torch.zeros_like(p) state['s'] = torch.zeros_like(p) if momentum != 0: state['x0'] = torch.clone(p).detach() state['step'] += 1 grad_sum_sq = state['grad_sum_sq'] s = state['s'] lamb = lr * math.sqrt(state['step']) # Apply weight decay if weight_decay != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * weight_decay) else: if grad.is_sparse: raise RuntimeError("weight_decay option is not compatible with sparse gradients") grad.add_(p, alpha=weight_decay) if grad.is_sparse: grad = grad.coalesce() grad_val = grad._values() p_masked = p.sparse_mask(grad) grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) s_masked = s.sparse_mask(grad) # Compute x_0 from other known quantities rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) # Dense + sparse op grad_sq = grad * grad grad_sum_sq.add_(grad_sq, alpha=lamb) grad_sum_sq_masked.add_(grad_sq, alpha=lamb) rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) s.add_(grad, alpha=lamb) s_masked._values().add_(grad_val, alpha=lamb) # update masked copy of p p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) # Copy updated masked p to dense p using an add operation p_masked._values().add_(p_kp1_masked_vals, alpha=-1) p.add_(p_masked, alpha=-1) else: if momentum == 0: # Compute x_0 from other known quantities rms = grad_sum_sq.pow(1 / 3).add_(eps) x0 = p.addcdiv(s, rms, value=1) else: x0 = state['x0'] # Accumulate second moments grad_sum_sq.addcmul_(grad, grad, value=lamb) rms = grad_sum_sq.pow(1 / 3).add_(eps) # Update s s.add_(grad, alpha=lamb) # Step if momentum == 0: p.copy_(x0.addcdiv(s, rms, value=-1)) else: z = x0.addcdiv(s, rms, value=-1) # p is a moving average of z p.mul_(1 - ck).add_(z, alpha=ck) return loss
pytorch-image-models/timm/optim/madgrad.py/0
{ "file_path": "pytorch-image-models/timm/optim/madgrad.py", "repo_id": "pytorch-image-models", "token_count": 3505 }
195
""" Step Scheduler Basic step LR schedule with warmup, noise. Hacked together by / Copyright 2020 Ross Wightman """ import math import torch from .scheduler import Scheduler class StepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: float, decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] return lrs
pytorch-image-models/timm/scheduler/step_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/step_lr.py", "repo_id": "pytorch-image-models", "token_count": 939 }
196
import random import numpy as np import torch def random_seed(seed=42, rank=0): torch.manual_seed(seed + rank) np.random.seed(seed + rank) random.seed(seed + rank)
pytorch-image-models/timm/utils/random.py/0
{ "file_path": "pytorch-image-models/timm/utils/random.py", "repo_id": "pytorch-image-models", "token_count": 68 }
197
use crate::app::Data; use tabled::settings::Merge; use tabled::{builder::Builder, settings::Style, Table}; #[allow(clippy::too_many_arguments)] pub(crate) fn parameters_table( tokenizer_name: String, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, frequency_penalty: Option<f32>, watermark: bool, do_sample: bool, ) -> Table { let mut builder = Builder::default(); builder.set_header(["Parameter", "Value"]); builder.push_record(["Model", &tokenizer_name]); builder.push_record(["Sequence Length", &sequence_length.to_string()]); builder.push_record(["Decode Length", &decode_length.to_string()]); builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]); builder.push_record(["N Runs", &n_runs.to_string()]); builder.push_record(["Warmups", &warmups.to_string()]); builder.push_record(["Temperature", &format!("{temperature:?}")]); builder.push_record(["Top K", &format!("{top_k:?}")]); builder.push_record(["Top P", &format!("{top_p:?}")]); builder.push_record(["Typical P", &format!("{typical_p:?}")]); builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]); builder.push_record(["Frequency Penalty", &format!("{frequency_penalty:?}")]); builder.push_record(["Watermark", &watermark.to_string()]); builder.push_record(["Do Sample", &do_sample.to_string()]); let mut table = builder.build(); table.with(Style::markdown()); table } pub(crate) fn latency_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header([ "Step", "Batch Size", "Average", "Lowest", "Highest", "p50", "p90", "p99", ]); add_latencies( &mut builder, "Prefill", &data.batch_size, &data.prefill_latencies, ); add_latencies( &mut builder, "Decode (token)", &data.batch_size, &data.decode_token_latencies, ); add_latencies( &mut builder, "Decode (total)", &data.batch_size, &data.decode_latencies, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } pub(crate) fn throughput_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]); add_throuhgputs( &mut builder, "Prefill", &data.batch_size, &data.prefill_throughputs, ); add_throuhgputs( &mut builder, "Decode", &data.batch_size, &data.decode_throughputs, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } fn add_latencies( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_latencies: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let latencies = &batch_latencies[i]; let (avg, min, max) = avg_min_max(latencies); let row = [ step, &b.to_string(), &format_value(avg, "ms"), &format_value(min, "ms"), &format_value(max, "ms"), &format_value(px(latencies, 50), "ms"), &format_value(px(latencies, 90), "ms"), &format_value(px(latencies, 99), "ms"), ]; builder.push_record(row); } } fn add_throuhgputs( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_throughputs: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let throughputs = &batch_throughputs[i]; let (avg, min, max) = avg_min_max(throughputs); let row = [ step, &b.to_string(), &format_value(avg, "tokens/secs"), &format_value(min, "tokens/secs"), &format_value(max, "tokens/secs"), ]; builder.push_record(row); } } fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) { let average = data.iter().sum::<f64>() / data.len() as f64; let min = data .iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max = data .iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); (average, *min, *max) } fn px(data: &Vec<f64>, p: u32) -> f64 { let i = (f64::from(p) / 100.0 * data.len() as f64) as usize; *data.get(i).unwrap_or(&std::f64::NAN) } fn format_value(value: f64, unit: &'static str) -> String { format!("{:.2} {unit}", value) }
text-generation-inference/benchmark/src/table.rs/0
{ "file_path": "text-generation-inference/benchmark/src/table.rs", "repo_id": "text-generation-inference", "token_count": 2298 }
198
from enum import Enum from pydantic import BaseModel, validator from typing import Optional, List, Union, Any from text_generation.errors import ValidationError # enum for grammar type class GrammarType(str, Enum): Json = "json" Regex = "regex" # Grammar type and value class Grammar(BaseModel): # Grammar type type: GrammarType # Grammar value value: Union[str, dict] class ToolCall(BaseModel): # Id of the tool call id: int # Type of the tool call type: str # Function details of the tool call function: dict class Message(BaseModel): # Role of the message sender role: str # Content of the message content: Optional[str] # Optional name of the message sender name: Optional[str] = None # Tool calls associated with the chat completion tool_calls: Optional[Any] = None class Tool(BaseModel): # Type of the tool type: str # Function details of the tool function: dict class ChatCompletionComplete(BaseModel): # Index of the chat completion index: int # Message associated with the chat completion message: Message # Log probabilities for the chat completion logprobs: Optional[Any] # Reason for completion finish_reason: str # Usage details of the chat completion usage: Any class Function(BaseModel): name: Optional[str] arguments: str class ChoiceDeltaToolCall(BaseModel): index: int id: str type: str function: Function class ChoiceDelta(BaseModel): role: str content: Optional[str] tool_calls: Optional[ChoiceDeltaToolCall] class Choice(BaseModel): index: int delta: ChoiceDelta logprobs: Optional[dict] = None finish_reason: Optional[str] = None class ChatCompletionChunk(BaseModel): id: str object: str created: int model: str system_fingerprint: str choices: List[Choice] class ChatComplete(BaseModel): # Chat completion details id: str object: str created: int model: str system_fingerprint: str choices: List[ChatCompletionComplete] usage: Any class ChatRequest(BaseModel): # Model identifier model: str # List of messages in the conversation messages: List[Message] # Penalty for frequency of new tokens frequency_penalty: Optional[float] = None # Bias values for token selection logit_bias: Optional[List[float]] = None # Whether to return log probabilities logprobs: Optional[bool] = None # Number of most likely tokens to return at each position top_logprobs: Optional[int] = None # Maximum number of tokens to generate max_tokens: Optional[int] = None # Number of chat completion choices to generate n: Optional[int] = None # Penalty for presence of new tokens presence_penalty: Optional[float] = None # Flag to indicate streaming response stream: bool = False # Random sampling seed seed: Optional[int] = None # Sampling temperature temperature: Optional[float] = None # Top-p value for nucleus sampling top_p: Optional[float] = None # List of tools to be used tools: Optional[List[Tool]] = None # Choice of tool to be used tool_choice: Optional[str] = None class Parameters(BaseModel): # Activate logits sampling do_sample: bool = False # Maximum number of generated tokens max_new_tokens: int = 20 # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] = None # Whether to prepend the prompt to the generated text return_full_text: bool = False # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] = [] # Random sampling seed seed: Optional[int] = None # The value used to module the logits distribution. temperature: Optional[float] = None # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] = None # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] = None # truncate inputs tokens to the given size truncate: Optional[int] = None # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] = None # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] = None # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool = False # Get generation details details: bool = False # Get decoder input token logprobs and ids decoder_input_details: bool = False # Return the N most likely tokens at each step top_n_tokens: Optional[int] = None # grammar to use for generation grammar: Optional[Grammar] = None @validator("best_of") def valid_best_of(cls, field_value, values): if field_value is not None: if field_value <= 0: raise ValidationError("`best_of` must be strictly positive") if field_value > 1 and values["seed"] is not None: raise ValidationError("`seed` must not be set when `best_of` is > 1") sampling = ( values["do_sample"] | (values["temperature"] is not None) | (values["top_k"] is not None) | (values["top_p"] is not None) | (values["typical_p"] is not None) ) if field_value > 1 and not sampling: raise ValidationError("you must use sampling when `best_of` is > 1") return field_value @validator("repetition_penalty") def valid_repetition_penalty(cls, v): if v is not None and v <= 0: raise ValidationError("`repetition_penalty` must be strictly positive") return v @validator("seed") def valid_seed(cls, v): if v is not None and v < 0: raise ValidationError("`seed` must be positive") return v @validator("temperature") def valid_temp(cls, v): if v is not None and v <= 0: raise ValidationError("`temperature` must be strictly positive") return v @validator("top_k") def valid_top_k(cls, v): if v is not None and v <= 0: raise ValidationError("`top_k` must be strictly positive") return v @validator("top_p") def valid_top_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError("`top_p` must be > 0.0 and < 1.0") return v @validator("truncate") def valid_truncate(cls, v): if v is not None and v <= 0: raise ValidationError("`truncate` must be strictly positive") return v @validator("typical_p") def valid_typical_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError("`typical_p` must be > 0.0 and < 1.0") return v @validator("top_n_tokens") def valid_top_n_tokens(cls, v): if v is not None and v <= 0: raise ValidationError("`top_n_tokens` must be strictly positive") return v @validator("grammar") def valid_grammar(cls, v): if v is not None: if v.type == GrammarType.Regex and not v.value: raise ValidationError("`value` cannot be empty for `regex` grammar") if v.type == GrammarType.Json and not v.value: raise ValidationError("`value` cannot be empty for `json` grammar") return v class Request(BaseModel): # Prompt inputs: str # Generation parameters parameters: Optional[Parameters] = None # Whether to stream output tokens stream: bool = False @validator("inputs") def valid_input(cls, v): if not v: raise ValidationError("`inputs` cannot be empty") return v @validator("stream") def valid_best_of_stream(cls, field_value, values): parameters = values["parameters"] if ( parameters is not None and parameters.best_of is not None and parameters.best_of > 1 and field_value ): raise ValidationError( "`best_of` != 1 is not supported when `stream` == True" ) return field_value # Decoder input tokens class InputToken(BaseModel): # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] = None # Generated tokens class Token(BaseModel): # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: Optional[float] = None # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(str, Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence(BaseModel): # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] = None # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] = None # `generate` details class Details(BaseModel): # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] = None # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] = None # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] = None # `generate` return value class Response(BaseModel): # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails(BaseModel): # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] = None # `generate_stream` return value class StreamResponse(BaseModel): # Generated token token: Token # Most likely tokens top_tokens: Optional[List[Token]] = None # Complete generated text # Only available when the generation is finished generated_text: Optional[str] = None # Generation details # Only available when the generation is finished details: Optional[StreamDetails] = None # Inference API currently deployed model class DeployedModel(BaseModel): model_id: str sha: str
text-generation-inference/clients/python/text_generation/types.py/0
{ "file_path": "text-generation-inference/clients/python/text_generation/types.py", "repo_id": "text-generation-inference", "token_count": 4249 }
199
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality earlier than the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient("http://127.0.0.1:8080") for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` If you want additional details, you can add `details=True`. In this case, you get a `TextGenerationStreamResponse` which contains additional information such as the probabilities and the tokens. For the final response in the stream, it also returns the full generated text. ```python for details in client.text_generation("How do you make cheese?", max_new_tokens=12, details=True, stream=True): print(details) #TextGenerationStreamResponse(token=Token(id=193, text='\n', logprob=-0.007358551, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=2044, text='To', logprob=-1.1357422, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=717, text=' make', logprob=-0.009841919, special=False), generated_text=None, details=None) #... #TextGenerationStreamResponse(token=Token(id=25, text='.', logprob=-1.3408203, special=False), generated_text='\nTo make cheese, you need to start with milk.', details=StreamDetails(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=12, seed=None)) ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient("http://127.0.0.1:8080") async for token in await client.text_generation("How do you make cheese?", stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` ### Streaming with cURL To use the `generate_stream` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl -N 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. `npm install @huggingface/inference` If you're using the free Inference API, you can use `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint`. Let's We can create a `HfInferenceEndpoint` providing our endpoint URL and credential. ```js import { HfInferenceEndpoint } from '@huggingface/inference' const hf = new HfInferenceEndpoint('https://YOUR_ENDPOINT.endpoints.huggingface.cloud', 'hf_YOUR_TOKEN') // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips' const stream = hf.textGenerationStream({ inputs: prompt }) for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text) } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
text-generation-inference/docs/source/conceptual/streaming.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/streaming.md", "repo_id": "text-generation-inference", "token_count": 1972 }
200
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.6914062, "text": "What" }, { "id": 338, "logprob": -1.4746094, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8623047, "text": "Learning" }, { "id": 29973, "logprob": -0.7558594, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9228516, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4609375, "special": false, "text": "What" }, { "id": 338, "logprob": -0.57177734, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5722656, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5859375, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.02633667, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4335938, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15991211, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62060547, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_load_sharded.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_load_sharded.json", "repo_id": "text-generation-inference", "token_count": 5776 }
201
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.7890625, "text": "Test" }, { "id": 2009, "logprob": -9.625, "text": "request" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -2.3359375, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.8779297, "special": false, "text": "Test" }, { "id": 2009, "logprob": -1.2744141, "special": false, "text": " request" }, { "id": 13, "logprob": -1.6933594, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.4648438, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.15600586, "special": false, "text": " request" }, { "id": 13, "logprob": -0.8027344, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.23022461, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.0069885254, "special": false, "text": " request" }, { "id": 13, "logprob": -0.02218628, "special": false, "text": "\n" } ], "top_tokens": null }, "generated_text": "\nTest request\nTest request\nTest request\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json", "repo_id": "text-generation-inference", "token_count": 1046 }
202
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 2271, "logprob": null, "text": "Test" }, { "id": 1681, "logprob": -8.8515625, "text": " request" } ], "seed": null, "tokens": [ { "id": 198, "logprob": -2.9023438, "special": false, "text": "\n" }, { "id": 2, "logprob": -2.9160156, "special": false, "text": "#" }, { "id": 4230, "logprob": -3.1035156, "special": false, "text": " Create" }, { "id": 264, "logprob": -1.1025391, "special": false, "text": " a" }, { "id": 1681, "logprob": -1.6914062, "special": false, "text": " request" }, { "id": 198, "logprob": -1.1953125, "special": false, "text": "\n" }, { "id": 2035, "logprob": -1.3203125, "special": false, "text": "request" }, { "id": 284, "logprob": -0.13537598, "special": false, "text": " =" }, { "id": 7388, "logprob": -1.2402344, "special": false, "text": " requests" }, { "id": 670, "logprob": -0.2775879, "special": false, "text": ".get" } ], "top_tokens": null }, "generated_text": "\n# Create a request\nrequest = requests.get" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json", "repo_id": "text-generation-inference", "token_count": 988 }
203
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -6.9804688, "text": "User" }, { "id": 29901, "logprob": -0.006122589, "text": ":" }, { "id": 32000, "logprob": -0.8417969, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -9.918213e-05, "text": "<image>" }, { "id": 32000, "logprob": -2.3841858e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.1679688, "text": "Can" }, { "id": 366, "logprob": -0.014091492, "text": "you" }, { "id": 2649, "logprob": -4.4726562, "text": "tell" }, { "id": 592, "logprob": -0.2998047, "text": "me" }, { "id": 263, "logprob": -4.15625, "text": "a" }, { "id": 1407, "logprob": -9.3828125, "text": "very" }, { "id": 3273, "logprob": -1.9716797, "text": "short" }, { "id": 5828, "logprob": -0.27734375, "text": "story" }, { "id": 2729, "logprob": -3.5605469, "text": "based" }, { "id": 373, "logprob": -0.00064468384, "text": "on" }, { "id": 278, "logprob": -0.14160156, "text": "the" }, { "id": 1967, "logprob": -0.06915283, "text": "image" }, { "id": 29973, "logprob": -0.16381836, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0026664734, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.583069e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.8119812e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -2.9802322e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.2186508e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1865234, "special": false, "text": " stands" } ] }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -6.9804688, "text": "User" }, { "id": 29901, "logprob": -0.006122589, "text": ":" }, { "id": 32000, "logprob": -0.8417969, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -9.942055e-05, "text": "<image>" }, { "id": 32000, "logprob": -2.3841858e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.1679688, "text": "Can" }, { "id": 366, "logprob": -0.014091492, "text": "you" }, { "id": 2649, "logprob": -4.4726562, "text": "tell" }, { "id": 592, "logprob": -0.2998047, "text": "me" }, { "id": 263, "logprob": -4.15625, "text": "a" }, { "id": 1407, "logprob": -9.3828125, "text": "very" }, { "id": 3273, "logprob": -1.9716797, "text": "short" }, { "id": 5828, "logprob": -0.27734375, "text": "story" }, { "id": 2729, "logprob": -3.5605469, "text": "based" }, { "id": 373, "logprob": -0.0006451607, "text": "on" }, { "id": 278, "logprob": -0.14160156, "text": "the" }, { "id": 1967, "logprob": -0.06915283, "text": "image" }, { "id": 29973, "logprob": -0.16381836, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0026664734, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.571148e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.8119812e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1865234, "special": false, "text": " stands" } ] }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -6.9804688, "text": "User" }, { "id": 29901, "logprob": -0.006122589, "text": ":" }, { "id": 32000, "logprob": -0.8417969, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -9.918213e-05, "text": "<image>" }, { "id": 32000, "logprob": -2.3841858e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.1679688, "text": "Can" }, { "id": 366, "logprob": -0.014091492, "text": "you" }, { "id": 2649, "logprob": -4.4726562, "text": "tell" }, { "id": 592, "logprob": -0.2998047, "text": "me" }, { "id": 263, "logprob": -4.15625, "text": "a" }, { "id": 1407, "logprob": -9.3828125, "text": "very" }, { "id": 3273, "logprob": -1.9716797, "text": "short" }, { "id": 5828, "logprob": -0.27734375, "text": "story" }, { "id": 2729, "logprob": -3.5605469, "text": "based" }, { "id": 373, "logprob": -0.00064468384, "text": "on" }, { "id": 278, "logprob": -0.14160156, "text": "the" }, { "id": 1967, "logprob": -0.06915283, "text": "image" }, { "id": 29973, "logprob": -0.16381836, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0026664734, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.59499e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.8119812e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1865234, "special": false, "text": " stands" } ] }, "generated_text": " \nAssistant: A rooster stands" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -6.9804688, "text": "User" }, { "id": 29901, "logprob": -0.006122589, "text": ":" }, { "id": 32000, "logprob": -0.8417969, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -9.942055e-05, "text": "<image>" }, { "id": 32000, "logprob": -2.3841858e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.1679688, "text": "Can" }, { "id": 366, "logprob": -0.014091492, "text": "you" }, { "id": 2649, "logprob": -4.4726562, "text": "tell" }, { "id": 592, "logprob": -0.2998047, "text": "me" }, { "id": 263, "logprob": -4.15625, "text": "a" }, { "id": 1407, "logprob": -9.3828125, "text": "very" }, { "id": 3273, "logprob": -1.9716797, "text": "short" }, { "id": 5828, "logprob": -0.27734375, "text": "story" }, { "id": 2729, "logprob": -3.5605469, "text": "based" }, { "id": 373, "logprob": -0.0006451607, "text": "on" }, { "id": 278, "logprob": -0.14160156, "text": "the" }, { "id": 1967, "logprob": -0.06915283, "text": "image" }, { "id": 29973, "logprob": -0.16381836, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0026664734, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.571148e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.8119812e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.0994415e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1865234, "special": false, "text": " stands" } ] }, "generated_text": " \nAssistant: A rooster stands" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json", "repo_id": "text-generation-inference", "token_count": 9573 }
204
{ "choices": [ { "finish_reason": "eos_token", "index": 0, "logprobs": null, "message": { "content": null, "name": null, "role": "assistant", "tool_calls": { "function": { "description": null, "name": "tools", "parameters": { "format": "celsius", "location": "New York, NY", "num_days": 14 } }, "id": 0, "type": "function" } }, "usage": null } ], "created": 1709079417, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "1.4.3-native", "usage": { "completion_tokens": 29, "prompt_tokens": 316, "total_tokens": 345 } }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools.json", "repo_id": "text-generation-inference", "token_count": 469 }
205
import pytest @pytest.fixture(scope="module") def flash_neox_sharded_handle(launcher): with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox_sharded(flash_neox_sharded_handle): await flash_neox_sharded_handle.health(300) return flash_neox_sharded_handle.client @pytest.mark.asyncio async def test_flash_neox(flash_neox_sharded, response_snapshot): response = await flash_neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot): responses = await generate_load( flash_neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_neox_sharded.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_neox_sharded.py", "repo_id": "text-generation-inference", "token_count": 491 }
206
[package] name = "text-generation-client" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] futures = "^0.3" grpc-metadata = { path = "../grpc-metadata" } prost = "^0.12" thiserror = "^1.0" tokio = { version = "^1.32", features = ["sync"] } tonic = "^0.10" tower = "^0.4" tracing = "^0.1" [build-dependencies] tonic-build = "0.10.1" prost-build = "0.12.1"
text-generation-inference/router/client/Cargo.toml/0
{ "file_path": "text-generation-inference/router/client/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 180 }
207
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
text-generation-inference/sagemaker-entrypoint.sh/0
{ "file_path": "text-generation-inference/sagemaker-entrypoint.sh", "repo_id": "text-generation-inference", "token_count": 239 }
208
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #include "column_remap.cuh" #include "../util.cuh" const int SHUF_BLOCKSIZE_X = 256; const int SHUF_BLOCKSIZE_Y = 16; __global__ void column_remap_kernel ( const half* __restrict__ x, half* __restrict__ x_new, const int x_width, const int x_height, const uint32_t* x_map ) { int x_column = SHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x; int x_row = SHUF_BLOCKSIZE_Y * blockIdx.y; int x_stride = x_width; int x_idx = x_row * x_stride + x_column; int x_row_end = min(x_row + SHUF_BLOCKSIZE_Y, x_height); int x_idx_end = x_row_end * x_stride + x_column; int s_column = x_map[x_column]; int s_idx = x_row * x_stride + s_column; while (x_idx < x_idx_end) { x_new[x_idx] = x[s_idx]; x_idx += x_stride; s_idx += x_stride; } } // Remap columns in x to correspond to sequential group index before matmul // // perform x -> seq_x such that seq_x @ seq_w == x @ w void column_remap_cuda ( const half* x, half* x_new, const int x_height, const int x_width, const uint32_t* x_map ) { dim3 threads(SHUF_BLOCKSIZE_X, 1, 1); dim3 blocks ( (x_width + SHUF_BLOCKSIZE_X - 1) / SHUF_BLOCKSIZE_X, (x_height + SHUF_BLOCKSIZE_Y - 1) / SHUF_BLOCKSIZE_Y, 1 ); column_remap_kernel<<<blocks, threads>>>(x, x_new, x_width, x_height, x_map); }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu", "repo_id": "text-generation-inference", "token_count": 696 }
209
#include "q_gemm.cuh" #include "util.cuh" #include "matrix_view.cuh" #include "../config.h" #include "quant/qdq_2.cuh" #include "quant/qdq_3.cuh" #include "quant/qdq_4.cuh" #include "quant/qdq_5.cuh" #include "quant/qdq_6.cuh" #include "quant/qdq_8.cuh" #define GPTQ_BLOCK_KN_SIZE 128 #define GPTQ_BLOCK_M_SIZE_MAX 8 #define GPTQ_MAX_GROUPS_IN_BLOCK (GPTQ_BLOCK_KN_SIZE / 32) #define EXL2_BLOCK_KN_SIZE 64 #define EXL2_BLOCK_M_SIZE_MAX 8 #define EXL2_MAX_GROUPS_IN_BLOCK (EXL2_BLOCK_KN_SIZE / 32) #define CLEAR_N_SIZE 256 #include "q_gemm_kernel.cuh" #include "q_gemm_kernel_gptq.cuh" void gemm_half_q_half_cuda_part ( const half* a, QMatrix* b, half* c, int size_m, int size_n, int size_k, int m_count, bool clear, const half* r_weights, int r_weights_stride, bool mul_r_weights ) { const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (!b->is_gptq) { dim3 blockDim, gridDim; blockDim.x = EXL2_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; gridDim.x = DIVIDE(size_n, EXL2_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); gridDim.z = DIVIDE(size_k, EXL2_BLOCK_KN_SIZE); fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights); kernel<<<gridDim, blockDim, 0, stream>>> ( a, b->cuda_q_weight, b->cuda_q_scale, b->cuda_q_scale_max, c, size_m, size_n, size_k, b->groups, b->cuda_q_group_map, b->cuda_q_perm, b->rows_8, b->rows_6, b->rows_5, b->rows_4, b->rows_3, b->rows_2, clear, r_weights, r_weights_stride ); } else { dim3 blockDim, gridDim; blockDim.x = GPTQ_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; gridDim.x = DIVIDE(size_n, GPTQ_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); gridDim.z = DIVIDE(size_k, GPTQ_BLOCK_KN_SIZE); fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(m_count, r_weights != NULL, mul_r_weights); // DBGX((uint64_t) r_weights); // if (r_weights) // print_global_mem(r_weights, 1, 1, 1); // DBGI(r_weights_stride); kernel<<<gridDim, blockDim, 0, stream>>> ( a, b->cuda_q_weight, b->cuda_gptq_qzeros, b->cuda_gptq_scales, c, size_m, size_n, size_k, b->groups, b->gptq_groupsize, b->cuda_q_perm, b->rows_4, clear, r_weights, r_weights_stride ); } } void gemm_half_q_half_cuda ( cublasHandle_t cublas_handle, const half* a, QMatrix* b, half* c, int size_m, int size_n, int size_k, bool clear, half* temp_dq, bool force_cuda, const half* r_weights, const int r_weights_stride, bool mul_r_weights ) { if (size_m > MAX_Q_GEMM_ROWS && !force_cuda) { // Reconstruct FP16 matrix, then cuBLAS if (!temp_dq) temp_dq = b->temp_dq; b->reconstruct(temp_dq); //cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH); const half alpha = __float2half(1.0f); const half beta = clear ? __float2half(0.0f) : __float2half(1.0f); cublasHgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, size_n, size_m, size_k, &alpha, temp_dq, size_n, a, size_k, &beta, c, size_n); //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; //cublasSgemmEx(cublas_handle, // CUBLAS_OP_N, // CUBLAS_OP_N, // size_n, size_m, size_k, // &alpha, temp_dq, CUDA_R_16F, size_n, // a, CUDA_R_16F, size_k, // &beta, c, CUDA_R_16F, size_n); //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; //cublasGemmEx(cublas_handle, // CUBLAS_OP_N, CUBLAS_OP_N, // size_n, size_m, size_k, // &alpha, temp_dq, CUDA_R_16F, size_n, // a, CUDA_R_16F, size_k, // &beta, c, CUDA_R_16F, size_n, // CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP); } else { // Quantized matmul int block_m_size_max = b->is_gptq ? GPTQ_BLOCK_M_SIZE_MAX : EXL2_BLOCK_M_SIZE_MAX; int max_chunks = size_m / block_m_size_max; int last_chunk = max_chunks * block_m_size_max; int last_chunk_size = size_m - last_chunk; if (max_chunks) { gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, block_m_size_max, clear, r_weights, r_weights_stride, mul_r_weights); } if (last_chunk_size) { gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear, r_weights, r_weights_stride, mul_r_weights); } } } __global__ void clear_kernel ( half* __restrict__ c, const int size_m, const int size_n ) { int m = blockIdx.y; int n = (blockIdx.x * CLEAR_N_SIZE + threadIdx.x) * 8; if (n >= size_n) return; int4* c_ptr = (int4*)(c + m * size_n + n); *c_ptr = {}; } void clear_tensor_cuda ( half* c, int size_m, int size_n ) { // dim3 blockDim, gridDim; // blockDim.x = CLEAR_N_SIZE; // blockDim.y = 1; // gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE); // gridDim.y = size_m; // clear_kernel<<<gridDim, blockDim>>>(c, size_m, size_n); }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu", "repo_id": "text-generation-inference", "token_count": 3563 }
210
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, ) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" w = [ weights.get_sharded(f"{p}.bias", dim=0) for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"] ] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) return TensorParallelColumnLinear( get_linear(weight, bias=bias, quantize=config.quantize) ) class Qwen2Attention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.max_past = ( config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size, base=config.rope_theta, device=weights.device, ) self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv paged_attention.reshape_and_cache( kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, window_size_left=self.max_past, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class Qwen2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=False, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=False, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class Qwen2Layer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"model.layers.{layer_id}" self.self_attn = Qwen2Attention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = Qwen2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) mlp_output = self.mlp(normed_attn_res_output) return mlp_output, attn_res class Qwen2Model(torch.nn.Module): def __init__(self, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ Qwen2Layer( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, true_max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class Qwen2ForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.model = Qwen2Model(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) self.max_past = config.sliding_window self.max_past_tensor = ( torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor] = None, lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor) hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py", "repo_id": "text-generation-inference", "token_count": 6592 }
211
import math import os import time import itertools import torch import torch.distributed import numpy as np from loguru import logger from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.utils.speculate import get_speculate from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.models.cache_manager import ( get_cache_manager, set_cache_manager, BLOCK_SIZE, ) from text_generation_server.pb import generate_pb2 from text_generation_server.models.globals import MEM_POOL, ENABLE_CUDA_GRAPHS from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser from text_generation_server.utils.dist import MEMORY_FRACTION tracer = trace.get_tracer(__name__) @dataclass class FlashCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] # request id -> idx in list mapping requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor position_ids: torch.Tensor speculative_ids: torch.Tensor # Flash Attention values # tensor of length b containing the cumulative sequence lengths of the sequences in the batch, only used in prefill cu_seqlen_prefill: Optional[torch.Tensor] # Paged Attention values # Set when creating the batch # CPU tensor of length b indicating the start of each sequence in slots start_slots: torch.Tensor # tensor of indices of the currently used slots, length = \sum_{i=0}^{b} s_i in prefill, length = b in decode slot_indices: torch.Tensor # List of tuple of ints representing the number of blocks and slots needed by each sequence needed_blocks_slots: Optional[List[Tuple[int, int]]] # Set in prefill by the CacheManager # list of length b of list of length s_i // block_size block_tables: Optional[List[List[int]]] # tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences block_tables_tensor: Optional[torch.Tensor] # tensor of length \sum_{i=0}^{b} max_s_i holding the paged attention slots for all sequences slots: Optional[torch.Tensor] max_seqlen: int # Prefill metadata tensors to efficiently compute logprobs prefill_head_indices: Optional[torch.Tensor] prefill_next_token_indices: Optional[torch.tensor] prefill_cu_outlens: Optional[List[int]] # All tokens all_input_ids: List[List[int]] all_input_ids_tensor: torch.Tensor # Lengths of all generations present in the batch input_lengths: List[int] input_lengths_tensor: torch.Tensor prefix_offsets: List[Optional[int]] read_offsets: List[Optional[int]] # Generation helpers next_token_chooser: HeterogeneousNextTokenChooser stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor # Number of blocks in this batch blocks: int # Maximum number of blocks max_blocks: int def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.blocks * BLOCK_SIZE, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "FlashCausalLMBatch": batch_inputs = [] max_truncation = 0 for r in pb.requests: batch_inputs.append(r.inputs) max_truncation = max(max_truncation, r.truncate) batch_tokenized_inputs = tokenizer( batch_inputs, truncation=True, max_length=max_truncation )["input_ids"] position_ids = [] speculative_ids = [] cu_seqlen_prefill = [0] needed_blocks_slots = [] start_slots = [] slot_indices = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] requests_idx_mapping = {} all_prefill_logprobs = True no_prefill_logprobs = True prefill_head_indices = [] prefill_next_token_indices = [] prefill_cu_outlens = [0] next_token_chooser_parameters = [] stopping_criterias = [] top_n_tokens = [] # Cumulative length cumulative_length = 0 cumulative_max_length = 0 prefill_out_cumulative_length = 0 blocks = 0 max_seqlen = 0 max_length = 0 max_blocks = 0 # Parse batch for i, (r, tokenized_input) in enumerate( zip(pb.requests, batch_tokenized_inputs) ): # request id -> idx in list mapping requests_idx_mapping[r.id] = i tokenized_input = tokenized_input[-r.truncate :] input_length = len(tokenized_input) input_lengths.append(input_length) prefix_offsets.append(input_length - 5) read_offsets.append(input_length) all_input_ids.append(tokenized_input) # Position ids request_position_ids = torch.arange(0, input_length, dtype=torch.int32) position_ids.append(request_position_ids) # Add cumulative lengths of all previous inputs cu_seqlen_prefill.append(cumulative_length + input_length) next_token_chooser_parameters.append(r.parameters) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) max_new_tokens = stopping_criteria.max_new_tokens stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) # Paged attention # Remove one as the first token des not have a past speculative_length = get_speculate() total_tokens = input_length + max_new_tokens - 1 + speculative_length needed_blocks = math.ceil(total_tokens / BLOCK_SIZE) blocks += needed_blocks needed_blocks_slots.append((needed_blocks, total_tokens)) start_slots.append(cumulative_max_length) request_slot_indices = torch.arange( cumulative_max_length, cumulative_max_length + input_length, dtype=torch.int64, ) slot_indices.append(request_slot_indices) all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs if r.prefill_logprobs: prefill_head_indices.append(request_position_ids + cumulative_length) prefill_next_token_indices.append( prefill_out_cumulative_length + input_length - 1 ) prefill_cu_outlens.append(prefill_out_cumulative_length + input_length) prefill_out_cumulative_length += input_length else: prefill_head_indices.append( torch.tensor( [cumulative_length + input_length - 1], dtype=torch.int32 ) ) prefill_next_token_indices.append(prefill_out_cumulative_length) prefill_cu_outlens.append(prefill_out_cumulative_length + 1) prefill_out_cumulative_length += 1 # Update cumulative_length += input_length cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) max_length = max( max_length, input_length + max_new_tokens + speculative_length ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device, tokenizer ) start_slots = torch.tensor(start_slots, dtype=torch.int64) # Padded all_input_ids_tensor all_input_ids_tensor = np.zeros( (len(all_input_ids), max_length), dtype=np.int64 ) for i, input_ids in enumerate(all_input_ids): all_input_ids_tensor[i, : len(input_ids)] = input_ids # Create tensors on device all_input_ids_tensor = torch.tensor( all_input_ids_tensor, dtype=torch.int64, device=device ) if len(pb.requests) > 1: input_ids = np.concatenate(all_input_ids, dtype=np.int64) position_ids = torch.cat(position_ids) slot_indices = torch.cat(slot_indices) else: input_ids = all_input_ids[0] position_ids = position_ids[0] slot_indices = slot_indices[0] cu_seqlen_prefill = torch.tensor( cu_seqlen_prefill, device=device, dtype=torch.int32 ) position_ids = position_ids.to(device) slot_indices = slot_indices.to(device) input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) input_lengths_tensor = torch.tensor( input_lengths, dtype=torch.int32, device=device ) if all_prefill_logprobs: prefill_head_indices = None prefill_next_token_indices = cu_seqlen_prefill[1:] - 1 elif no_prefill_logprobs: prefill_head_indices = cu_seqlen_prefill[1:] - 1 prefill_next_token_indices = None else: prefill_head_indices = torch.tensor( torch.cat(prefill_head_indices), dtype=torch.int64, device=device ) prefill_next_token_indices = torch.tensor( prefill_next_token_indices, dtype=torch.int64, device=device ) top_n_tokens_tensor = torch.tensor( top_n_tokens, device=device, dtype=torch.int64 ) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, start_slots=start_slots, slot_indices=slot_indices, needed_blocks_slots=needed_blocks_slots, block_tables=None, block_tables_tensor=None, slots=None, max_seqlen=max_seqlen, prefill_head_indices=prefill_head_indices, prefill_next_token_indices=prefill_next_token_indices, prefill_cu_outlens=prefill_cu_outlens, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, speculative_ids=None, ) @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]) -> "FlashCausalLMBatch": if len(request_ids) == 0: raise ValueError("Batch must have at least one request") # We assume that if len(requests) == len(self) then the requests are the same if len(request_ids) == len(self): return self device = self.input_ids.device # New values after filtering requests_idx_mapping = {} # Used to index into tensors indices = [] # slots to keep after filtering slot_filtering_indices = torch.zeros( self.slots.shape[0], dtype=torch.bool, device=device ) # Create on CPU to only move to GPU once instead of at every copy slot_indices = torch.empty(len(request_ids), dtype=torch.int64) max_seqlen = 0 requests = [] start_slots = [] block_tables = [] all_input_ids = [] input_lengths = [] prefix_offsets = [] read_offsets = [] stopping_criterias = [] top_n_tokens = [] blocks = 0 max_blocks = 0 # Cumulative length cumulative_max_length = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] indices.append(idx) requests_idx_mapping[request_id] = i requests.append(self.requests[idx]) # Get length request_input_length = self.input_lengths[idx] max_seqlen = max(max_seqlen, request_input_length) all_input_ids.append(self.all_input_ids[idx]) input_lengths.append(request_input_length) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) request_block_table = self.block_tables[idx] blocks += len(request_block_table) block_tables.append(request_block_table) start_slots.append(cumulative_max_length) # Copy to tensor (CPU) slot_indices[i] = cumulative_max_length + request_input_length - 1 # Set slice slot_filtering_indices[ self.start_slots[idx] : self.start_slots[idx] + request_input_length + remaining_tokens - 1 ] = True cumulative_max_length += request_input_length + remaining_tokens - 1 max_blocks = max(max_blocks, len(request_block_table)) block_indices_to_free = [] # Iterate on all requests for i, r in enumerate(self.requests): # Filter requests that are not part of the new batch if r.id not in requests_idx_mapping.keys(): block_indices_to_free.extend(self.block_tables[i]) # Free blocks get_cache_manager().free(block_indices_to_free) # Needed to avoid dropping blocks when the batches will go out of scope self.block_tables = None # Index into tensors input_ids = self.input_ids[indices] position_ids = self.position_ids[indices] all_input_ids_tensor = self.all_input_ids_tensor[indices] block_tables_tensor = self.block_tables_tensor[indices] input_lengths_tensor = self.input_lengths_tensor[indices] slots = self.slots[slot_filtering_indices] next_token_chooser = self.next_token_chooser.filter(indices) top_n_tokens_tensor = self.top_n_tokens_tensor[indices] speculative_ids = ( self.speculative_ids[indices] if self.speculative_ids is not None else None ) start_slots = torch.tensor(start_slots, dtype=torch.int64) # Move to GPU now that we have the whole tensor slot_indices = slot_indices.to(device) return type(self)( batch_id=self.batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, start_slots=start_slots, slot_indices=slot_indices, needed_blocks_slots=None, block_tables=block_tables, block_tables_tensor=block_tables_tensor, slots=slots, max_seqlen=max_seqlen, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, speculative_ids=speculative_ids, ) @classmethod @tracer.start_as_current_span("concatenate") def concatenate(cls, batches: List["FlashCausalLMBatch"]) -> "FlashCausalLMBatch": # Batch attributes requests = [] requests_idx_mapping = {} blocks = 0 total_batch_size = 0 total_slots = 0 max_blocks = 0 max_length = 0 max_seqlen = 0 for b in batches: total_batch_size += len(b) total_slots += len(b.slots) blocks += b.blocks speculative_length = ( b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 ) max_blocks = max(max_blocks, b.max_blocks) max_seqlen = max(max_seqlen, b.max_seqlen) max_length = max( max_length, max( input_length + stopping_criteria.max_new_tokens + speculative_length - stopping_criteria.current_tokens for input_length, stopping_criteria in zip( b.input_lengths, b.stopping_criterias ) ), ) input_ids = batches[0].input_ids.new_empty(total_batch_size) position_ids = batches[0].position_ids.new_empty(total_batch_size) slots = batches[0].slots.new_empty(total_slots) slot_indices = batches[0].slot_indices.new_empty(total_batch_size) input_lengths_tensor = batches[0].input_lengths_tensor.new_empty( total_batch_size ) block_tables_tensor = batches[0].block_tables_tensor.new_zeros( (total_batch_size, max_blocks) ) all_input_ids_tensor = batches[0].all_input_ids_tensor.new_zeros( (total_batch_size, max_length) ) top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( total_batch_size, ) start_slots = [] block_tables = [] all_input_ids = [] input_lengths = [] prefix_offsets = [] read_offsets = [] next_token_chooser_parameters = [] fsm_grammar_states = [] stopping_criterias = [] top_n_tokens = [] # Cumulative length cumulative_batch_size = 0 cumulative_slots = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + cumulative_batch_size start_index = cumulative_batch_size end_index = cumulative_batch_size + len(batch) slots_start_index = cumulative_slots slots_end_index = cumulative_slots + len(batch.slots) # Copy tensors (GPU) input_ids[start_index:end_index] = batch.input_ids position_ids[start_index:end_index] = batch.position_ids slot_indices[start_index:end_index] = batch.slot_indices + cumulative_slots input_lengths_tensor[start_index:end_index] = batch.input_lengths_tensor top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor slots[slots_start_index:slots_end_index] = batch.slots all_input_ids_tensor[ start_index:end_index, : batch.all_input_ids_tensor.shape[1] ] = batch.all_input_ids_tensor[:, :max_length] block_tables_tensor[ start_index:end_index, : batch.block_tables_tensor.shape[1] ] = batch.block_tables_tensor[:, :max_blocks] start_slots.append(batch.start_slots + cumulative_slots) block_tables.extend(batch.block_tables) all_input_ids.extend(batch.all_input_ids) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) next_token_chooser_parameters.extend([r.parameters for r in batch.requests]) fsm_grammar_states.extend(batch.next_token_chooser.fsm_grammar_states) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) # Update cumulative_batch_size += len(batch) cumulative_slots += len(batch.slots) start_slots = torch.concat(start_slots) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype=batches[0].next_token_chooser.dtype, device=batches[0].next_token_chooser.device, tokenizer=batches[0].next_token_chooser.tokenizer, fsm_grammar_states=fsm_grammar_states, ) speculative_ids = ( torch.cat([b.speculative_ids for b in batches], dim=0) if batches[0].speculative_ids is not None else None ) # Needed to avoid dropping blocks when the batches will go out of scope for b in batches: b.block_tables = None del b return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, start_slots=start_slots, slot_indices=slot_indices, needed_blocks_slots=None, block_tables=block_tables, block_tables_tensor=block_tables_tensor, slots=slots, max_seqlen=max_seqlen, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, speculative_ids=speculative_ids, ) def __del__(self): if self.block_tables is not None and self.block_tables: # Free blocks get_cache_manager().free( list(itertools.chain.from_iterable(self.block_tables)) ) def __len__(self): return len(self.requests) class FlashCausalLM(Model): def __init__( self, model: torch.nn.Module, tokenizer: PreTrainedTokenizerBase, num_layers: int, num_kv_heads: int, head_size: int, dtype: torch.dtype, device: torch.device, rank: int = 0, world_size: int = 1, sliding_window: Optional[int] = None, ): self.num_layers = num_layers self.num_kv_heads = num_kv_heads self.head_size = head_size self.cuda_graphs = {} super(FlashCausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=False, dtype=dtype, device=device, rank=rank, world_size=world_size, sliding_window=sliding_window, ) @property def batch_type(self) -> Type[FlashCausalLMBatch]: return FlashCausalLMBatch def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device) position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) slots = torch.arange(bs, dtype=torch.int32, device=self.device) input_lengths = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s block_tables = ( torch.arange(max_bt, dtype=torch.int32, device=self.device) .repeat(bs) .reshape((bs, max_bt)) ) kv_cache = get_cache_manager().kv_cache self.cuda_graphs[bs] = { "input_ids": input_ids, "position_ids": position_ids, "kv_cache": kv_cache, "block_tables": block_tables, "slots": slots, "input_lengths": input_lengths, } graph = torch.cuda.CUDAGraph() self.cuda_graphs[bs]["graph"] = graph torch.cuda.synchronize() # Run once outside to warmup self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, lm_head_indices=None, ) torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): logits, speculative_logits = self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, lm_head_indices=None, ) self.cuda_graphs[bs]["logits"] = logits self.cuda_graphs[bs]["speculative_logits"] = speculative_logits torch.cuda.synchronize() def warmup(self, batch: FlashCausalLMBatch): # The warmup batch is the biggest batch we could ever receive torch.cuda.empty_cache() try: cache_manager = set_cache_manager( batch.blocks, self.num_layers, self.num_kv_heads, self.head_size, self.sliding_window is not None, self.dtype, self.device, ) max_bt = batch.max_blocks max_s = max_bt * get_cache_manager().block_size _, batch, _ = self.generate_token(batch) except torch.cuda.OutOfMemoryError as e: raise RuntimeError( f"Not enough memory to handle {len(batch.input_ids)} prefill tokens. " f"You need to decrease `--max-batch-prefill-tokens`" ) from e torch.cuda.synchronize(self.device) # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm) # Calculate the number of blocks that can be allocated with the free memory dtype_size = torch.tensor([], dtype=self.dtype).element_size() cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size total_free_memory, _ = torch.cuda.mem_get_info(self.device) total_gpu_memory = torch.cuda.get_device_properties(self.device).total_memory free_memory = max( 0, total_free_memory - (1 - MEMORY_FRACTION) * total_gpu_memory ) num_blocks = ( # Leave 5% for some wiggle room int((free_memory * 0.95) // total_cache_size) # Add batch.blocks as we allocated it above, so it is included in the peak memory. + cache_manager.num_blocks ) del batch del cache_manager set_cache_manager( num_blocks, self.num_layers, self.num_kv_heads, self.head_size, self.sliding_window is not None, self.dtype, self.device, ) if ENABLE_CUDA_GRAPHS: try: logger.info("Experimental support for Cuda Graphs is enabled") # Warmup cuda graphs for bs in [1, 2, 4] + [8 * i for i in range(8)]: if self.speculate is None or self.speculate + 1 <= bs: self.cuda_graph_warmup(bs, max_s, max_bt) except Exception: logger.exception(f"Decode cuda graph warmup failed") return int(num_blocks * BLOCK_SIZE) def forward( self, batch: FlashCausalLMBatch ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = get_cache_manager().kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat( [input_ids.unsqueeze(-1), speculative_ids], dim=1 ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = ( position_ids.unsqueeze(-1).expand(B, new_length) + arange ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = ( input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) # Add Copy the block tables for all members block_tables = ( block_tables.unsqueeze(1) .expand(B, new_length, -1) .reshape(B * new_length, -1) .contiguous() ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = get_cache_manager().kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices bs = input_ids.shape[0] padded_bs = bs if bs == 3: padded_bs = 4 elif 3 < bs <= 8: padded_bs = 8 elif bs > 8: padded_bs = (bs + 7) // 8 * 8 # Try to find an associated cuda graph cuda_graph = self.cuda_graphs.get(padded_bs, None) if ( cu_seqlen_prefill is not None or cuda_graph is None or batch.speculative_ids is not None ): return self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, lm_head_indices=lm_head_indices, ) # Copy inputs to the static inputs of the cuda graph # Static inputs are potentially padded cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids cuda_graph["block_tables"][ : block_tables.shape[0], : block_tables.shape[1] ] = block_tables cuda_graph["slots"].fill_(-1) cuda_graph["slots"][: slots.shape[0]] = slots cuda_graph["input_lengths"].zero_() cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths # Replay the graph cuda_graph["graph"].replay() # Slice output to the correct shape speculative_logits = ( cuda_graph["speculative_logits"][:bs] if cuda_graph["speculative_logits"] is not None else None ) logits = cuda_graph["logits"][:bs] return logits, speculative_logits @tracer.start_as_current_span("generate_token") def generate_token( self, batch: FlashCausalLMBatch ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]: start = time.time_ns() prefill = batch.cu_seqlen_prefill is not None prefill_logprobs = batch.prefill_next_token_indices is not None if batch.needed_blocks_slots: # Allocate blocks to this batch block_tables, block_tables_tensor, slots = get_cache_manager().allocate( batch.needed_blocks_slots, batch.blocks, batch.max_blocks, batch.input_ids.device, ) batch.needed_blocks_slots = None batch.block_tables = block_tables batch.block_tables_tensor = block_tables_tensor batch.slots = slots try: out, speculative_logits = self.forward(batch) except Exception as e: del batch raise e if prefill: next_token_logits = ( out[batch.prefill_next_token_indices] if prefill_logprobs else out ) if speculative_logits is not None: speculative_logits = ( speculative_logits[batch.prefill_next_token_indices] if prefill_logprobs else speculative_logits ) else: next_token_logits = out speculate = get_speculate() ( next_input_ids, next_token_logprobs, logprobs, accepted_ids, speculative_ids, ) = batch.next_token_chooser( batch.all_input_ids_tensor[:, : batch.max_seqlen], next_token_logits, speculate, batch.speculative_ids, speculative_logits, ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids ) if prefill: if len(batch) > 1 and prefill_logprobs: # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs # When batch == 1, we will just use the batch.input_ids values directly prefill_tokens_indices = batch.input_ids.new_zeros(len(out)) next_position_ids = batch.position_ids.new_empty(len(batch)) batch.slot_indices = batch.slot_indices[batch.cu_seqlen_prefill[1:] - 1] # We do not need cu_seqlen_prefill anymore batch.cu_seqlen_prefill = None else: prefill_logprobs = None next_position_ids = batch.position_ids # Cumulative length cumulative_length = 0 # Results generations: List[Generation] = [] stopped = True # Zipped iterator iterator = zip(batch.input_lengths, batch.all_input_ids, accepted_ids) # We do two for loops as the first one can run completely asynchronously from the GPU while for the second # one, we need to first do a GPU <-> CPU sync # It is faster if we delay this sync for the maximum amount of time # For each member of the batch index = 0 for i, (input_length, all_input_ids, n_accepted_ids) in enumerate(iterator): # Indexing metadata start_index = cumulative_length end_index = cumulative_length + input_length if prefill: # Indexing metadata out_start_index = batch.prefill_cu_outlens[i] out_end_index = batch.prefill_cu_outlens[i + 1] out_length = out_end_index - out_start_index # Initialize position_ids # In decode, we do not need this as we can just increment position ids next_position_ids[i] = batch.position_ids[end_index - 1] # Used to gather prefill logprobs # Copy batch.input_ids to prefill_token_indices if prefill_logprobs: if len(batch) > 1: prefill_tokens_indices[out_start_index : out_end_index - 1] = ( batch.input_ids[start_index + 1 : start_index + out_length] ) else: # Set prefill_tokens_indices to the correct slice prefill_tokens_indices = batch.input_ids[ start_index + 1 : start_index + out_length ] for j in range(n_accepted_ids): batch.all_input_ids_tensor[i, input_length + j] = next_input_ids[index] index += 1 cumulative_length += input_length # Update values batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1] batch.speculative_ids = speculative_ids batch.position_ids = next_position_ids + accepted_ids batch.input_lengths_tensor += accepted_ids batch.slot_indices += accepted_ids if prefill and prefill_logprobs: # Get prefill logprobs prefill_logprobs_tensor = torch.log_softmax(out, -1) prefill_logprobs = torch.gather( prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1) ) # GPU <-> CPU sync prefill_logprobs = prefill_logprobs.view(-1).tolist() # GPU <-> CPU sync next_token_logprobs = next_token_logprobs.tolist() next_token_ids = next_input_ids.tolist() accepted_ids = accepted_ids.tolist() start_decode = time.time_ns() # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, batch.stopping_criterias, batch.all_input_ids, batch.next_token_chooser.do_sample, batch.next_token_chooser.seeds, batch.top_n_tokens, accepted_ids, batch_top_token_ids, batch_top_token_logprobs, ) # For each member of the batch index = 0 for i, ( request, input_length, prefix_offset, read_offset, stopping_criteria, all_input_ids, do_sample, seed, top_n_tokens, n_accepted_ids, top_token_ids, top_token_logprobs, ) in enumerate(iterator): # Append next token to all tokens next_token_texts = [] left = 0 current_stopped = False for j in range(index, index + n_accepted_ids): # Generated token next_token_id = next_token_ids[j] all_input_ids.append(next_token_id) next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids, prefix_offset, read_offset, ) next_token_texts.append(next_token_text) stop, reason = stopping_criteria( next_token_id, next_token_text, ) if stop: left = index + n_accepted_ids - j - 1 current_stopped = True break else: current_stopped = False stopped = stopped and current_stopped _next_token_ids = next_token_ids[index : index + n_accepted_ids - left] _next_token_logprobs = next_token_logprobs[ index : index + n_accepted_ids - left ] index += n_accepted_ids # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids, prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed if do_sample else None, ) else: generated_text = None # Prefill if prefill and request.prefill_logprobs: out_start_index = batch.prefill_cu_outlens[i] out_end_index = batch.prefill_cu_outlens[i + 1] # Remove generated token to only have prefill and add nan for first prompt token request_prefill_logprobs = [float("nan")] + prefill_logprobs[ out_start_index : out_end_index - 1 ] prefill_token_ids = all_input_ids[:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, request_prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None if top_n_tokens > 0: all_top_tokens = [] for top_token_ids, top_token_logprobs in zip( top_token_ids, top_token_logprobs ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens, ) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( _next_token_ids, _next_token_logprobs, next_token_texts, [nid in self.all_special_ids for nid in _next_token_ids], ), generated_text, top_tokens, ) generations.append(generation) # accept each new token for this specific request since we may # have more than one new token per request with speculative decoding for next_token_id in _next_token_ids: batch.next_token_chooser = ( batch.next_token_chooser.advance_grammar_single(i, next_token_id) ) # Update values batch.input_lengths[i] = input_length + n_accepted_ids if batch.input_lengths[i] > batch.max_seqlen: batch.max_seqlen = batch.input_lengths[i] batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.all_input_ids[i] = all_input_ids if stopped: del batch # No need to return a batch if we know that all requests stopped forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) batch.prefill_cu_outlens = None batch.prefill_head_indices = None batch.prefill_next_token_indices = None forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/flash_causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/flash_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 23791 }
212
import torch import torch.distributed from transformers import AutoTokenizer, PreTrainedTokenizerBase from typing import Optional import os from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaConfig, ) from loguru import logger from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) from text_generation_server.models.globals import ENABLE_CUDA_GRAPHS, MEM_POOL import time from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaModel, InferenceParams, ) from text_generation_server.models import Model from typing import Any, List, Optional, Tuple, Type, Dict from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.utils.tokens import batch_top_tokens, Sampling from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling def new_inference_params( n_blocks: int, batch_size: int, d_inner: int, d_conv: int, d_state: int, seqlen_offset: int, dtype: torch.dtype, device: torch.device, ): max_seqlen = 0 conv_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_conv, ), device=device, dtype=dtype, ) ssm_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_state, ), device=device, dtype=dtype, ) inference_params = InferenceParams( max_seqlen=max_seqlen, max_batch_size=batch_size, seqlen_offset=seqlen_offset, conv_states=conv_states, ssm_states=ssm_states, ) return inference_params @dataclass class MambaBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor # All tokens all_input_ids: List[torch.Tensor] # Lengths of all generations present in the batch input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] # Generation helpers next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor # Metadata used for padding max_input_length: int padding_right_offset: int # Maximum number of tokens this batch will grow to max_tokens: int # Past metadata keys_head_dim_last: bool = True # Inference params inference_params: Optional[Dict[str, Any]] = None def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "MambaBatch": inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} # Parse batch max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.inputs) next_token_choosers.append( NextTokenChooser.from_pb(r.parameters, device, tokenizer) ) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max( padding_right_offset, stopping_criteria.max_new_tokens ) tokenized_inputs = tokenizer( inputs, return_tensors="pt", padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation, ).to(device) for _ in pb.requests: input_len = tokenized_inputs["input_ids"].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs["attention_mask"].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs["input_ids"] all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor( top_n_tokens, device=device, dtype=torch.int64 ) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, # past_input_ids=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens, ) def filter(self, request_ids: List[int]) -> Optional["MambaBatch"]: if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self keep_indices = [] # New values after filtering requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 indices = [] for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) indices.append(idx) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens # TODO # Kept it simple by just updating the state, maybe updating the other CPU values is necessary. self.inference_params.conv_states = self.inference_params.conv_states[ :, indices ] self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices] return self @classmethod def concatenate(cls, batches: List["MambaBatch"]) -> "MambaBatch": # Used for padding total_batch_size = 0 max_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) # Batch attributes requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 max_seqlen = 0 seqlen_offset = 0 (n_blocks, _, d_inner, d_conv) = batches[0].inference_params.conv_states.shape (_, _, _, d_state) = batches[0].inference_params.ssm_states.shape dtype = batches[0].inference_params.conv_states.dtype device = batches[0].inference_params.conv_states.device inference_params = new_inference_params( n_blocks=n_blocks, batch_size=total_batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=device, dtype=dtype, ) # Batch tensors input_ids = None top_n_tokens_tensor = None # Used for slicing correctly inside the tensors # Equivalent to a cumsum on batch sizes start_index = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index # Slicing end index for this batch end_index = start_index + len(batch) # Create empty tensor # input_ids is always of shape [batch_size, 1] # We do not need to pad it if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) # Copy to correct indices input_ids[start_index:end_index] = batch.input_ids if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( total_batch_size, ) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor # Add eventual padding tokens that were added while concatenating max_tokens += batch.max_tokens + ( max_input_length - batch.max_input_length ) * len(batch) inference_params.max_seqlen = max( inference_params.max_seqlen, batch.inference_params.max_seqlen ) assert batch.inference_params.seqlen_offset != 0, "Invalid seqlen offset" inference_params.seqlen_offset = max( inference_params.seqlen_offset, batch.inference_params.seqlen_offset ) inference_params.conv_states[:, start_index:end_index] = ( batch.inference_params.conv_states ) inference_params.ssm_states[:, start_index:end_index] = ( batch.inference_params.ssm_states ) start_index = end_index return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, inference_params=inference_params, ) def __len__(self): return len(self.requests) class Mamba(Model): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, _rank, world_size = initialize_torch_distributed() if world_size > 1: raise RuntimeError("Mamba does not support Tensor Parallelism (TP)") self.cuda_graphs = {} if torch.cuda.is_available(): device = torch.device("cuda") # Bf16 is important. In f16 accumulations in the matmul are causing # differences while the server is under load. # This is detectable by the integration load test dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = MambaConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) tokenizer.bos_token_id = config.bos_token_id tokenizer.eos_token_id = config.eos_token_id tokenizer.pad_token = tokenizer.eos_token config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) model = MambaModel(config, weights) torch.distributed.barrier(group=self.process_group) super(Mamba, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, ) @property def batch_type(self) -> Type[MambaBatch]: return MambaBatch def warmup(self, batch) -> Optional[int]: # TODO: implement warmup for Mamba if needed if ENABLE_CUDA_GRAPHS: if self.speculate is None or self.speculate == 0: try: logger.info("Experimental support for Cuda Graphs is enabled") # Warmup cuda graphs for bs in [1, 2, 4] + [8 * i for i in range(1, 9)]: self.cuda_graph_warmup(bs) except Exception: logger.exception(f"Decode cuda graph warmup failed") return None def cuda_graph_warmup(self, batch_size: int): input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device) n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv # Inner takes the expand multiplication d_inner = self.model.config.d_inner # Important seqlen_offset to go through the update mecanism with the state seqlen_offset = 1 inference_params = new_inference_params( n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype, ) graph = torch.cuda.CUDAGraph() torch.cuda.synchronize() # Run once outside to warmup self.model.forward(input_ids=input_ids, inference_params=inference_params) torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): logits, speculative_logits = self.model.forward( input_ids=input_ids, inference_params=inference_params ) torch.cuda.synchronize() graph_dict = { "input_ids": input_ids, "inference_params": inference_params, "graph": graph, "logits": logits, "speculative_logits": speculative_logits, } self.cuda_graphs[batch_size] = graph_dict def forward( self, input_ids: torch.Tensor, inference_params: Any ) -> Tuple[torch.Tensor, torch.Tensor]: bs = input_ids.shape[0] padded_bs = bs if bs == 3: padded_bs = 4 elif 3 < bs <= 8: padded_bs = 8 elif bs > 8: padded_bs = (bs + 7) // 8 * 8 # Try to find an associated cuda graph cuda_graph = self.cuda_graphs.get(padded_bs, None) is_prefill = inference_params is None or inference_params.seqlen_offset == 0 if is_prefill or cuda_graph is None: return self.model( input_ids, inference_params=inference_params, ) # Copy inputs to the static inputs of the cuda graph # Static inputs are potentially padded cuda_graph["input_ids"][:bs] = input_ids cuda_graph["inference_params"].conv_states[ :, :bs ] = inference_params.conv_states cuda_graph["inference_params"].ssm_states[:, :bs] = inference_params.ssm_states # Replay the graph cuda_graph["graph"].replay() inference_params.conv_states.copy_( cuda_graph["inference_params"].conv_states[:, :bs] ) inference_params.ssm_states.copy_( cuda_graph["inference_params"].ssm_states[:, :bs] ) # Slice output to the correct shape speculative_logits = ( cuda_graph["speculative_logits"][:bs] if cuda_graph["speculative_logits"] is not None else None ) logits = cuda_graph["logits"][:bs] return logits, speculative_logits def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: start = time.time_ns() input_ids = ( batch.input_ids ) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids batch_size, max_seqlen = input_ids.shape # Inference params if batch.inference_params is None: # 0 is important here seqlen_offset = 0 n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv d_inner = self.model.config.d_inner inference_params = new_inference_params( n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype, ) batch.inference_params = inference_params # Forward pass logits, speculative_logits = self.forward( input_ids, inference_params=batch.inference_params ) # batch.inference_params = new_inference_params # Results generations: List[Generation] = [] stopped = True # Speculation is not active for causal accepted_ids = torch.ones_like(batch.input_ids)[:, 0] batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids, ) start_decode = time.time_ns() # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs, ) # For each member of the batch for i, ( request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, top_n_tokens, top_token_ids, top_token_logprobs, ) in enumerate(iterator): # Select next token next_token_id, logprobs = next_token_chooser( all_input_ids.view(1, -1), logits[-1:, :] ) # Append next token to all tokens all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 # Generated token next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids[:, 0], prefix_offset, read_offset ) # Evaluate stopping criteria stop, reason = stopping_criteria( next_token_id_squeezed, next_token_text, ) if not stop: stopped = False # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed ) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: # Remove generated token to only have prefill and add nan for first prompt token prefill_logprobs = [float("nan")] + torch.log_softmax( logits, -1 ).gather(1, all_input_ids[1:]).squeeze(1)[ -new_input_length:-1 ].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None if top_n_tokens > 0: toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens, ) else: top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( [next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, ) generations.append(generation) # Update values batch.next_token_choosers[i] = batch.next_token_choosers[ i ].advance_grammar(next_token_id_squeezed.item()) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) # We finished all generations in the batch; there is no next batch if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/mamba.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/mamba.py", "repo_id": "text-generation-inference", "token_count": 14236 }
213
import datetime import torch import os from loguru import logger from pathlib import Path from safetensors.torch import save_file, load_file, _find_shared_tensors, _is_complete from typing import List, Dict from collections import defaultdict def _remove_duplicate_names( state_dict: Dict[str, torch.Tensor], *, preferred_names: List[str] = None, discard_names: List[str] = None, ) -> Dict[str, List[str]]: if preferred_names is None: preferred_names = [] preferred_names = set(preferred_names) if discard_names is None: discard_names = [] discard_names = set(discard_names) shareds = _find_shared_tensors(state_dict) to_remove = defaultdict(list) for shared in shareds: complete_names = set( [name for name in shared if _is_complete(state_dict[name])] ) if not complete_names: if len(shared) == 1: # Force contiguous name = list(shared)[0] state_dict[name] = state_dict[name].clone() complete_names = {name} else: raise RuntimeError( f"Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue." ) keep_name = sorted(list(complete_names))[0] # Mecanism to preferentially select keys to keep # coming from the on-disk file to allow # loading models saved with a different choice # of keep_name preferred = complete_names.difference(discard_names) if preferred: keep_name = sorted(list(preferred))[0] if preferred_names: preferred = preferred_names.intersection(complete_names) if preferred: keep_name = sorted(list(preferred))[0] for name in sorted(shared): if name != keep_name: to_remove[keep_name].append(name) return to_remove def convert_file(pt_file: Path, sf_file: Path, discard_names: List[str]): """ Convert a pytorch file to a safetensors file This will remove duplicate tensors from the file. Unfortunately, this might not respect *transformers* convention. Forcing us to check for potentially different keys during load when looking for specific tensors (making tensor sharing explicit). """ loaded = torch.load(pt_file, map_location="cpu") if "state_dict" in loaded: loaded = loaded["state_dict"] to_removes = _remove_duplicate_names(loaded, discard_names=discard_names) metadata = {"format": "pt"} for kept_name, to_remove_group in to_removes.items(): for to_remove in to_remove_group: if to_remove not in metadata: metadata[to_remove] = kept_name del loaded[to_remove] # Force tensors to be contiguous loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_file) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_file, metadata=metadata) reloaded = load_file(sf_file) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f"The output tensors do not match for key {k}") def convert_files(pt_files: List[Path], sf_files: List[Path], discard_names: List[str]): assert len(pt_files) == len(sf_files) N = len(pt_files) # We do this instead of using tqdm because we want to parse the logs with the launcher for i, (pt_file, sf_file) in enumerate(zip(pt_files, sf_files)): # Skip blacklisted files if ( "arguments" in pt_file.name or "args" in pt_file.name or "training" in pt_file.name ): continue start = datetime.datetime.now() convert_file(pt_file, sf_file, discard_names) elapsed = datetime.datetime.now() - start logger.info(f"Convert: [{i + 1}/{N}] -- Took: {elapsed}")
text-generation-inference/server/text_generation_server/utils/convert.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/convert.py", "repo_id": "text-generation-inference", "token_count": 1769 }
214
import re from typing import List, Optional, Tuple import math import torch from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason, GrammarType from text_generation_server.utils.logits_process import ( FrequencyPenaltyLogitsProcessor, GrammarLogitProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, HeterogeneousFrequencyPenaltyLogitsProcessor, HeterogeneousTemperatureLogitsWarper, HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, HeterogeneousTypicalLogitsWarper, HeterogeneousGrammarLogitProcessor, static_warper, ) from text_generation_server.utils.watermark import WatermarkLogitsProcessor from transformers import PreTrainedTokenizerBase, RepetitionPenaltyLogitsProcessor class NextTokenChooser: def __init__( self, watermark: bool = False, temperature: float = 1.0, repetition_penalty: float = 1.0, frequency_penalty: float = 0.0, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, do_sample: bool = False, seed: int = 0, device: str = "cpu", tokenizer: Optional[PreTrainedTokenizerBase] = None, grammar: str = "", grammar_type: GrammarType = GrammarType.GRAMMAR_TYPE_NONE, fsm_grammar_state: int = 0, ): self.watermark_processor = ( WatermarkLogitsProcessor(device=device) if watermark else None ) self.repetition_processor = ( RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty and repetition_penalty != 1.0 else None ) self.frequency_processor = ( FrequencyPenaltyLogitsProcessor(penalty=frequency_penalty) if frequency_penalty and frequency_penalty != 0.0 else None ) self.grammar_processor = ( GrammarLogitProcessor(tokenizer, device, grammar, grammar_type) if grammar != "" else None ) self.tokenizer = tokenizer has_warpers = ( (temperature is not None and temperature != 1.0) or (top_k is not None and top_k != 0) or (top_p is not None and top_p < 1.0) or (typical_p is not None and typical_p < 1.0) ) if has_warpers: self.static_warper = static_warper( temperature=temperature, top_k=top_k, top_p=top_p, typical_p=typical_p ) else: self.static_warper = None sampling = do_sample or has_warpers self.choice = Sampling(seed, device) if sampling else Greedy() self.fsm_grammar_state = fsm_grammar_state self.grammar = grammar def __call__(self, input_ids, scores): if self.watermark_processor is not None: scores = self.watermark_processor(input_ids, scores) if self.repetition_processor is not None: scores = self.repetition_processor(input_ids, scores) if self.frequency_processor is not None: scores = self.frequency_processor(input_ids, scores) if self.grammar_processor is not None: scores = self.grammar_processor(scores, self.fsm_grammar_state) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) else: scores, next_logprob = self.static_warper(scores) next_id = self.choice(scores[-1]).view(1, 1) return next_id, next_logprob def advance_grammar(self, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_state = self.grammar_processor.advance( next_id, self.fsm_grammar_state ) return self @classmethod def from_pb( cls, pb: generate_pb2.NextTokenChooserParameters, device: torch.device, tokenizer: PreTrainedTokenizerBase, ) -> "NextTokenChooser": return NextTokenChooser( watermark=pb.watermark, temperature=pb.temperature, repetition_penalty=pb.repetition_penalty, frequency_penalty=pb.frequency_penalty, top_k=pb.top_k, top_p=pb.top_p, typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device, tokenizer=tokenizer, grammar=pb.grammar, grammar_type=pb.grammar_type, ) class StopSequenceCriteria: def __init__(self, stop_sequence: str): stop_sequence = re.escape(stop_sequence) self.regex = re.compile(f"{stop_sequence}$") def __call__(self, output: str) -> bool: if self.regex.findall(output): return True return False class StoppingCriteria: def __init__( self, eos_token_id: int, stop_sequence_criterias: List[StopSequenceCriteria], max_new_tokens: int = 20, ignore_eos_token: bool = False, ): self.eos_token_id = eos_token_id self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 self.current_output = "" self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: self.current_tokens += 1 if self.current_tokens >= self.max_new_tokens: return True, FinishReason.FINISH_REASON_LENGTH if not self.ignore_eos_token and last_token == self.eos_token_id: return True, FinishReason.FINISH_REASON_EOS_TOKEN if self.stop_sequence_criterias: self.current_output += last_output # There is no need to keep an output that is too long if len(self.current_output) > 300: # Slice to -200 to avoid doing it all the time self.current_output = self.current_output[-200:] for stop_sequence_criteria in self.stop_sequence_criterias: if stop_sequence_criteria(self.current_output): return True, FinishReason.FINISH_REASON_STOP_SEQUENCE return False, None @classmethod def from_pb( cls, pb: generate_pb2.StoppingCriteriaParameters, tokenizer: PreTrainedTokenizerBase, ) -> "StoppingCriteria": stop_sequence_criterias = [ StopSequenceCriteria(sequence) for sequence in pb.stop_sequences ] return StoppingCriteria( tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens, pb.ignore_eos_token, ) def create_n_gram_speculation( input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool, ): # Very trivial approach, find first match in the string. # This is much less refined than actual n-gram but seems to work # relatively OK in grounded mode and is by far much faster with # much less worst case complexity as everything happens on device. B = accepted_ids.shape[0] device = input_ids.device seeds = next_ids[accepted_ids.cumsum(dim=-1) - 1] indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange( speculate, device=device ) all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) speculative_ids = input_ids.gather(dim=-1, index=all_indices) return speculative_ids class HeterogeneousNextTokenChooser: def __init__( self, dtype: torch.dtype, device: torch.device, watermark: List[bool], temperature: List[float], repetition_penalty: List[float], frequency_penalty: List[float], top_k: List[int], top_p: List[float], typical_p: List[float], do_sample: List[bool], seeds: List[int], tokenizer: PreTrainedTokenizerBase, grammars: List[str], grammar_types: List[int], fsm_grammar_states=List[int], ): warpers = [] self.watermark_processor = ( HeterogeneousProcessorWrapper( { i: WatermarkLogitsProcessor(device=device) for i, do_watermark in enumerate(watermark) if do_watermark } ) if any(watermark) else None ) self.repetition_processor = ( HeterogeneousRepetitionPenaltyLogitsProcessor( repetition_penalty, dtype, device ) if any([x != 1.0 for x in repetition_penalty]) else None ) self.frequency_processor = ( HeterogeneousFrequencyPenaltyLogitsProcessor( frequency_penalty, dtype, device ) if any([x != 0.0 for x in frequency_penalty]) else None ) self.grammar_processor = ( HeterogeneousGrammarLogitProcessor( tokenizer, device, grammars, grammar_types ) if any([grammar != "" for grammar in grammars]) else None ) if any([x != 1.0 for x in temperature]): do_sample = [ sample or x != 1.0 for x, sample in zip(temperature, do_sample) ] warpers.append( HeterogeneousTemperatureLogitsWarper(temperature, dtype, device) ) if any([x != 0 for x in top_k]): do_sample = [sample or x != 0 for x, sample in zip(top_k, do_sample)] warpers.append(HeterogeneousTopKLogitsWarper(top_k, device)) if any([x < 1.0 for x in top_p]): do_sample = [sample or x < 1.0 for x, sample in zip(top_p, do_sample)] warpers.append(HeterogeneousTopPLogitsWarper(top_p, dtype, device)) if any([x < 1.0 for x in typical_p]): do_sample = [sample or x < 1.0 for x, sample in zip(typical_p, do_sample)] warpers.append(HeterogeneousTypicalLogitsWarper(typical_p, dtype, device)) self.warpers = warpers if any(do_sample): self.choice = HeterogeneousSampling(do_sample, seeds, device) else: self.choice = Greedy() self.seeds = seeds self.do_sample = do_sample self.dtype = dtype self.device = device self.tokenizer = tokenizer self.fsm_grammar_states = fsm_grammar_states self.grammars = grammars self.grammar_types = grammar_types def __call__( self, input_ids: torch.Tensor, scores: torch.Tensor, speculate: int, speculated_ids: Optional[torch.Tensor] = None, speculative_scores: Optional[torch.Tensor] = None, verbose=False, ): if speculated_ids is not None: B = scores.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 scores = scores.view(B, S, -1) else: B = scores.shape[0] S = 1 scores = scores.view(B, S, -1) next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) for j in range(S): _scores = scores[:, j] if self.watermark_processor is not None: _scores = self.watermark_processor(input_ids, _scores) if self.repetition_processor is not None: _scores = self.repetition_processor(input_ids, _scores) if self.frequency_processor is not None: _scores = self.frequency_processor(input_ids, _scores) if self.grammar_processor is not None: _scores = self.grammar_processor(_scores, self.fsm_grammar_states) for warper in self.warpers: _scores = warper(input_ids, _scores) _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids next_ids = next_ids.view(B * S) allscores = scores.view(B * S, -1) alllogprobs = torch.log_softmax(allscores, -1) if speculated_ids is not None: accepted_ids = [] B = next_ids.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 indices = [] for i in range(B): _next_ids = next_ids[i * S : (i + 1) * S] _speculated_ids = speculated_ids[i] validate_speculative = _next_ids[:-1] == _speculated_ids index = i * S accepted = 1 # First is always valid indices.append(index) for valid in validate_speculative.tolist(): if valid: index += 1 accepted += 1 indices.append(index) else: break accepted_ids.append(accepted) accepted_ids = torch.tensor( accepted_ids, device=input_ids.device, dtype=input_ids.dtype ) next_ids = next_ids[indices] logprobs = alllogprobs[indices] indices = torch.arange(B, device=input_ids.device) * S if speculative_scores is not None: speculative_scores = speculative_scores[indices + accepted_ids - 1] else: accepted_ids = torch.ones_like(next_ids) logprobs = alllogprobs next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) if speculate > 0: if speculative_scores is not None: # Medusa provided some scores speculative_ids = Greedy()(speculative_scores) else: # n-gram speculative_ids = create_n_gram_speculation( input_ids, next_ids, accepted_ids, speculate, verbose ) else: speculative_ids = None return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids def advance_grammar(self, next_ids: List[int]): if self.grammar_processor is not None: other_new_states = self.grammar_processor.advance_batch( next_ids, self.fsm_grammar_states ) self.fsm_grammar_states = other_new_states return self def advance_grammar_single(self, grammar_state_index: int, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_states[grammar_state_index] = ( self.grammar_processor.advance_at_index( next_id, self.fsm_grammar_states[grammar_state_index], grammar_state_index, ) ) return self def filter(self, indices): if self.watermark_processor is not None: self.watermark_processor = self.watermark_processor.filter(indices) if self.repetition_processor is not None: self.repetition_processor = self.repetition_processor.filter(indices) if self.frequency_processor is not None: self.frequency_processor = self.frequency_processor.filter(indices) if self.grammar_processor is not None: self.grammar_processor = self.grammar_processor.filter(indices) filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) if filtered_warper is not None: filtered_warpers.append(filtered_warper) self.warpers = filtered_warpers self.seeds = [self.seeds[i] for i in indices] self.do_sample = [self.do_sample[i] for i in indices] new_grammars = [] new_fsm_grammar_states = [] new_grammar_types = [] for i in indices: new_grammars.append(self.grammars[i]) new_fsm_grammar_states.append(self.fsm_grammar_states[i]) new_grammar_types.append(self.grammar_types[i]) self.grammars = new_grammars self.fsm_grammar_states = new_fsm_grammar_states self.grammar_types = new_grammar_types if any(self.do_sample): self.choice.filter(indices) else: self.choice = Greedy() return self @classmethod def from_pb( cls, pb: List[generate_pb2.NextTokenChooserParameters], dtype: torch.dtype, device: torch.device, tokenizer: PreTrainedTokenizerBase, fsm_grammar_states: Optional[List[int]] = None, ) -> "HeterogeneousNextTokenChooser": return HeterogeneousNextTokenChooser( watermark=[pb_.watermark for pb_ in pb], temperature=[pb_.temperature for pb_ in pb], repetition_penalty=[pb_.repetition_penalty for pb_ in pb], frequency_penalty=[pb_.frequency_penalty for pb_ in pb], top_k=[pb_.top_k for pb_ in pb], top_p=[pb_.top_p for pb_ in pb], typical_p=[pb_.typical_p for pb_ in pb], do_sample=[pb_.do_sample for pb_ in pb], seeds=[pb_.seed for pb_ in pb], device=device, dtype=dtype, tokenizer=tokenizer, grammars=[pb_.grammar for pb_ in pb], grammar_types=[pb_.grammar_type for pb_ in pb], fsm_grammar_states=( fsm_grammar_states if fsm_grammar_states else [0] * len(pb) ), ) class Sampling: def __init__(self, seed: int, device: str = "cpu"): self.generator = torch.Generator(device) self.generator.manual_seed(seed) self.seed = seed def __call__(self, logits): probs = torch.nn.functional.softmax(logits, -1) # Avoid GPU<->CPU sync done by torch multinomial # See: https://github.com/pytorch/pytorch/blob/925a3788ec5c06db62ca732a0e9425a26a00916f/aten/src/ATen/native/Distributions.cpp#L631-L637 q = torch.empty_like(probs).exponential_(1, generator=self.generator) return probs.div_(q).argmax() class Greedy: def __call__(self, logits): return logits.argmax(dim=-1) class HeterogeneousSampling: r""" Mixed greedy and probabilistic sampling. Compute both and pick the right one for each sample. """ def __init__(self, do_sample: List[bool], seeds: List[int], device: torch.device): self.seeds = seeds self.greedy_indices = [] self.sampling_mapping = {} for i, (sample, seed) in enumerate(zip(do_sample, seeds)): if sample: self.sampling_mapping[i] = Sampling(seed, device) else: self.greedy_indices.append(i) self.greedy = Greedy() def __call__(self, logits): out = torch.empty(logits.shape[0], dtype=torch.int64, device=logits.device) if self.greedy_indices: # Computing for all indices is faster than slicing torch.argmax(logits, -1, out=out) for i, sampling in self.sampling_mapping.items(): out[i] = sampling(logits[i]) return out def filter(self, indices): new_greedy_indices = [] new_sampling_mapping = {} for i, idx in enumerate(indices): if idx in self.sampling_mapping: new_sampling_mapping[i] = self.sampling_mapping[idx] else: new_greedy_indices.append(i) self.greedy_indices = new_greedy_indices self.sampling_mapping = new_sampling_mapping return self def batch_top_tokens( top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned. """ max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size top_n_tokens = [ min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size) ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values nth_highest = torch.gather( sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values top_n_indices = (logprobs >= nth_highest).nonzero() _, top_n_ishes = torch.unique_consecutive(top_n_indices[:, 0], return_counts=True) k = 1 if top_n_ishes.numel() == 0 else top_n_ishes.max() # Take a new topk for these new max n values top_k = torch.topk(logprobs, k=k, dim=1, sorted=True) top_n_ishes = top_n_ishes.tolist() top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() batch_top_token_ids = [] batch_top_token_logprobs = [] accepted_ids_list = accepted_ids.tolist() for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) _top_indices = top_indices[start:stop] _top_values = top_values[start:stop] _top_n_ishes = top_n_ishes[start:stop] _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] _top_n_ishes = _top_n_ishes[:n_accepted_ids] _top_n_tokens = _top_n_tokens[:n_accepted_ids] row_top_token_ids = [] row_top_token_logprobs = [] for idxs, vals, n, req_n in zip( _top_indices, _top_values, _top_n_ishes, _top_n_tokens ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] row_top_token_ids.append(indices) row_top_token_logprobs.append(values) batch_top_token_ids.append(row_top_token_ids) batch_top_token_logprobs.append(row_top_token_logprobs) return batch_top_token_ids, batch_top_token_logprobs
text-generation-inference/server/text_generation_server/utils/tokens.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/tokens.py", "repo_id": "text-generation-inference", "token_count": 10984 }
215
.PHONY: style check-style test DATA_DIR = data dir_guard=@mkdir -p $(@D) # Format source code automatically style: npm run lint # Check the source code is formatted correctly check-style: npm run lint-check TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json # Launch the test suite test: $(TESTS_RESOURCES) npm run test $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@ $(DATA_DIR)/tokenizer-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@ $(DATA_DIR)/bert-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
tokenizers/bindings/node/Makefile/0
{ "file_path": "tokenizers/bindings/node/Makefile", "repo_id": "tokenizers", "token_count": 406 }
216
import { byteLevelPreTokenizer, metaspacePreTokenizer, punctuationPreTokenizer, sequencePreTokenizer, splitPreTokenizer, whitespaceSplitPreTokenizer, } from '../../' describe('byteLevelPreTokenizer', () => { it('instantiates correctly', () => { const processor = byteLevelPreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('metaspacePreTokenizer', () => { it('instantiates correctly without any parameter', () => { const processor = metaspacePreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) it('accepts `undefined` as first parameter', () => { expect(metaspacePreTokenizer(undefined)).toBeDefined() }) it('accepts `undefined` as second parameter', () => { expect(metaspacePreTokenizer('t', undefined)).toBeDefined() }) it('can pre-tokenize strings', () => { const pretok = metaspacePreTokenizer() expect(pretok.preTokenizeString('Hello there friend')).toEqual([ ['▁Hello', [0, 5]], ['▁there', [5, 11]], ['▁friend', [11, 18]], ]) }) }) describe('punctuationPreTokenizer', () => { it('instantiates correctly without any parameter', () => { const processor = punctuationPreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) it('instantiates correctly with non-default split delimeter', () => { const processor = punctuationPreTokenizer('removed') expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('splitPreTokenizer', () => { it('instantiates correctly with invert parameter', () => { const processor = splitPreTokenizer(' ', 'mergedWithPrevious', false) expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('sequencePreTokenizer', () => { it('instantiates correctly', () => { const punctuation = punctuationPreTokenizer() const whitespace = whitespaceSplitPreTokenizer() const sequence2 = sequencePreTokenizer([]) expect(sequence2.constructor.name).toEqual('PreTokenizer') const sequence3 = sequencePreTokenizer([punctuation, whitespace]) expect(sequence3.constructor.name).toEqual('PreTokenizer') }) })
tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts", "repo_id": "tokenizers", "token_count": 728 }
217
{ "name": "tokenizers-linux-arm64-gnu", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-gnu.node", "files": [ "tokenizers.linux-arm64-gnu.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "glibc" ] }
tokenizers/bindings/node/npm/linux-arm64-gnu/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/package.json", "repo_id": "tokenizers", "token_count": 289 }
218
use crate::arc_rwlock_serde; use serde::{Deserialize, Serialize}; extern crate tokenizers as tk; use napi::bindgen_prelude::*; use napi_derive::napi; use std::sync::{Arc, RwLock}; use tk::decoders::DecoderWrapper; /// Decoder #[derive(Clone, Serialize, Deserialize)] #[napi] pub struct Decoder { #[serde(flatten, with = "arc_rwlock_serde")] decoder: Option<Arc<RwLock<DecoderWrapper>>>, } #[napi] impl Decoder { #[napi] pub fn decode(&self, tokens: Vec<String>) -> Result<String> { use tk::Decoder; self .decoder .as_ref() .unwrap() .read() .unwrap() .decode(tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } } impl tk::Decoder for Decoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self .decoder .as_ref() .ok_or("Uninitialized Decoder")? .read() .unwrap() .decode_chain(tokens) } } #[napi] pub fn bpe_decoder(suffix: Option<String>) -> Decoder { let suffix = suffix.unwrap_or("</w>".to_string()); let decoder = Some(Arc::new(RwLock::new( tk::decoders::bpe::BPEDecoder::new(suffix).into(), ))); Decoder { decoder } } #[napi] pub fn byte_fallback_decoder() -> Decoder { Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::byte_fallback::ByteFallback::new().into(), ))), } } #[napi] pub fn ctc_decoder( #[napi(ts_arg_type = "string = '<pad>'")] pad_token: Option<String>, word_delimiter_token: Option<String>, cleanup: Option<bool>, ) -> Decoder { let pad_token = pad_token.unwrap_or("<pad>".to_string()); let word_delimiter_token = word_delimiter_token.unwrap_or("|".to_string()); let cleanup = cleanup.unwrap_or(true); let decoder = Some(Arc::new(RwLock::new( tk::decoders::ctc::CTC::new(pad_token, word_delimiter_token, cleanup).into(), ))); Decoder { decoder } } #[napi] pub fn fuse_decoder() -> Decoder { Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::fuse::Fuse::new().into(), ))), } } #[napi] pub fn metaspace_decoder( #[napi(ts_arg_type = "string = '▁'")] replacement: Option<String>, #[napi(ts_arg_type = "bool = true")] add_prefix_space: Option<bool>, ) -> Result<Decoder> { let add_prefix_space = add_prefix_space.unwrap_or(true); let replacement = replacement.unwrap_or("▁".to_string()); if replacement.chars().count() != 1 { return Err(Error::from_reason( "replacement is supposed to be a single char", )); } let replacement = replacement.chars().next().unwrap(); Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::metaspace::Metaspace::new(replacement, add_prefix_space).into(), ))), }) } #[napi] pub fn replace_decoder(pattern: String, content: String) -> Result<Decoder> { Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::normalizers::replace::Replace::new(pattern, content) .map_err(|e| Error::from_reason(e.to_string()))? .into(), ))), }) } #[napi] pub fn sequence_decoder(decoders: Vec<&Decoder>) -> Decoder { let sequence: Vec<tk::DecoderWrapper> = decoders .into_iter() .filter_map(|decoder| { decoder .decoder .as_ref() .map(|decoder| (**decoder).read().unwrap().clone()) }) .clone() .collect(); Decoder { decoder: Some(Arc::new(RwLock::new(tk::DecoderWrapper::Sequence( tk::decoders::sequence::Sequence::new(sequence), )))), } } #[napi] pub fn strip_decoder(content: String, left: u32, right: u32) -> Result<Decoder> { let content: char = content.chars().next().ok_or(Error::from_reason( "Expected non empty string for strip pattern", ))?; Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::strip::Strip::new(content, left as usize, right as usize).into(), ))), }) } #[napi] pub fn word_piece_decoder( #[napi(ts_arg_type = "string = '##'")] prefix: Option<String>, #[napi(ts_arg_type = "bool = true")] cleanup: Option<bool>, ) -> Decoder { let prefix = prefix.unwrap_or("##".to_string()); let cleanup = cleanup.unwrap_or(true); Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::wordpiece::WordPiece::new(prefix, cleanup).into(), ))), } }
tokenizers/bindings/node/src/decoders.rs/0
{ "file_path": "tokenizers/bindings/node/src/decoders.rs", "repo_id": "tokenizers", "token_count": 1821 }
219
[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ]
tokenizers/bindings/python/.cargo/config.toml/0
{ "file_path": "tokenizers/bindings/python/.cargo/config.toml", "repo_id": "tokenizers", "token_count": 146 }
220
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = decoders.Sequence
tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py", "repo_id": "tokenizers", "token_count": 128 }
221
# Generated content DO NOT EDIT class PostProcessor: """ Base class for all post-processors This class is not supposed to be instantiated directly. Instead, any implementation of a PostProcessor will return an instance of this class when instantiated. """ def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class BertProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Bert model: - a SEP token - a CLS token Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id """ def __init__(self, sep, cls): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class ByteLevel(PostProcessor): """ This post-processor takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor must be used. Args: trim_offsets (:obj:`bool`): Whether to trim the whitespaces from the produced offsets. """ def __init__(self, trim_offsets=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class RobertaProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Roberta model: - a SEP token - a CLS token It also takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor should be initialized with :obj:`trim_offsets=True` Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to trim the whitespaces from the produced offsets. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether the add_prefix_space option was enabled during pre-tokenization. This is relevant because it defines the way the offsets are trimmed out. """ def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class Sequence(PostProcessor): """ Sequence Processor Args: processors (:obj:`List[PostProcessor]`) The processors that need to be chained """ def __init__(self, processors): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class TemplateProcessing(PostProcessor): """ Provides a way to specify templates in order to add the special tokens to each input sequence as relevant. Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair sequences. The final result looks like this: - Single sequence: :obj:`[CLS] Hello there [SEP]` - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` With the type ids as following:: [CLS] ... [SEP] ... [SEP] 0 0 0 1 1 You can achieve such behavior using a TemplateProcessing:: TemplateProcessing( single="[CLS] $0 [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) In this example, each input sequence is identified using a ``$`` construct. This identifier lets us specify each input sequence, and the type_id to use. When nothing is specified, it uses the default values. Here are the different ways to specify it: - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... - Specifying both: ``$A:0``, ``$B:1``, ... The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. **Warning**: You must ensure that you are giving the correct tokens/ids as these will be added to the Encoding without any further check. If the given ids correspond to something totally different in a `Tokenizer` using this `PostProcessor`, it might lead to unexpected results. Args: single (:obj:`Template`): The template used for single sequences pair (:obj:`Template`): The template used when both sequences are specified special_tokens (:obj:`Tokens`): The list of special tokens used in each sequences Types: Template (:obj:`str` or :obj:`List`): - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens - If a :obj:`List[str]` is provided, a list of tokens Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): - A :obj:`Tuple` with both a token and its associated ID, in any order - A :obj:`dict` with the following keys: - "id": :obj:`str` => The special token id, as specified in the Template - "ids": :obj:`List[int]` => The associated IDs - "tokens": :obj:`List[str]` => The associated tokens The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have the same length. """ def __init__(self, single, pair, special_tokens): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass
tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi", "repo_id": "tokenizers", "token_count": 4779 }
222
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use crate::token::PyToken; use crate::trainers::PyTrainer; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE}; use tk::models::unigram::Unigram; use tk::models::wordlevel::WordLevel; use tk::models::wordpiece::{WordPiece, WordPieceBuilder}; use tk::models::ModelWrapper; use tk::{Model, Token}; use tokenizers as tk; use super::error::{deprecation_warning, ToPyResult}; /// Base class for all models /// /// The model represents the actual tokenization algorithm. This is the part that /// will contain and manage the learned vocabulary. /// /// This class cannot be constructed directly. Please use one of the concrete models. #[pyclass(module = "tokenizers.models", name = "Model", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyModel { #[serde(flatten)] pub model: Arc<RwLock<ModelWrapper>>, } impl PyModel { pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?.into_py(py), ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?.into_py(py), ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?.into_py(py), ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?.into_py(py), }) } } impl Model for PyModel { type Trainer = PyTrainer; fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> { self.model.read().unwrap().tokenize(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self.model.read().unwrap().get_vocab() } fn get_vocab_size(&self) -> usize { self.model.read().unwrap().get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self.model.read().unwrap().save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self.model.read().unwrap().get_trainer().into() } } impl<I> From<I> for PyModel where I: Into<ModelWrapper>, { fn from(model: I) -> Self { Self { model: Arc::new(RwLock::new(model.into())), } } } #[pymethods] impl PyModel { #[new] #[pyo3(text_signature = None)] fn __new__() -> Self { // Instantiate a default empty model. This doesn't really make sense, but we need // to be able to instantiate an empty model for pickle capabilities. PyModel { model: Arc::new(RwLock::new(BPE::default().into())), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.model).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Model: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.model = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Model: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Tokenize a sequence /// /// Args: /// sequence (:obj:`str`): /// A sequence to tokenize /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens #[pyo3(text_signature = "(self, sequence)")] fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> { Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence)) .into_py()? .into_iter() .map(|t| t.into()) .collect()) } /// Get the ID associated to a token /// /// Args: /// token (:obj:`str`): /// A token to convert to an ID /// /// Returns: /// :obj:`int`: The ID associated to the token #[pyo3(text_signature = "(self, tokens)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } /// Get the token associated to an ID /// /// Args: /// id (:obj:`int`): /// An ID to convert to a token /// /// Returns: /// :obj:`str`: The token associated to the ID #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } /// Save the current model /// /// Save the current model in the given folder, using the given prefix for the various /// files that will get created. /// Any file with the same name that already exists in this folder will be overwritten. /// /// Args: /// folder (:obj:`str`): /// The path to the target folder in which to save the various files /// /// prefix (:obj:`str`, `optional`): /// An optional prefix, used to prefix each file name /// /// Returns: /// :obj:`List[str]`: The list of saved files #[pyo3(text_signature = "(self, folder, prefix)")] fn save<'a>( &self, py: Python<'_>, folder: &str, mut prefix: Option<&'a str>, name: Option<&'a str>, ) -> PyResult<Vec<String>> { if name.is_some() { deprecation_warning( py, "0.10.0", "Parameter `name` of Model.save has been renamed `prefix`", )?; if prefix.is_none() { prefix = name; } } let saved: PyResult<Vec<_>> = ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into(); Ok(saved? .into_iter() .map(|path| path.to_string_lossy().into_owned()) .collect()) } /// Get the associated :class:`~tokenizers.trainers.Trainer` /// /// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this /// :class:`~tokenizers.models.Model`. /// /// Returns: /// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model #[pyo3(text_signature = "(self)")] fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> { PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py) } } /// An implementation of the BPE (Byte-Pair Encoding) algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// merges (:obj:`List[Tuple[str, str]]`, `optional`): /// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` /// /// cache_capacity (:obj:`int`, `optional`): /// The number of words that the BPE cache can contain. The cache allows /// to speed-up the process by keeping the result of the merge operations /// for a number of words. /// /// dropout (:obj:`float`, `optional`): /// A float between 0 and 1 that represents the BPE dropout to use. /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// The prefix to attach to subword units that don't represent a beginning of word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// The suffix to attach to subword units that represent an end of word. /// /// fuse_unk (:obj:`bool`, `optional`): /// Whether to fuse any subsequent unknown tokens into a single one /// /// byte_fallback (:obj:`bool`, `optional`): /// Whether to use spm byte-fallback trick (defaults to False) #[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")] pub struct PyBPE {} impl PyBPE { fn with_builder(mut builder: BpeBuilder, kwargs: Option<&PyDict>) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "cache_capacity" => builder = builder.cache_capacity(value.extract()?), "dropout" => { if let Some(dropout) = value.extract()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = value.extract()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(value.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?), "fuse_unk" => builder = builder.fuse_unk(value.extract()?), "byte_fallback" => builder = builder.byte_fallback(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), }; } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing BPE: {}", e ))), Ok(bpe) => Ok((PyBPE {}, bpe.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); let model = super_.model.read().unwrap(); if let ModelWrapper::$variant(ref mo) = *model { mo.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); let mut model = super_.model.write().unwrap(); if let ModelWrapper::$variant(ref mut mo) = *model { mo.$name = $value; } }}; } #[derive(FromPyObject)] enum PyVocab<'a> { Vocab(Vocab), Filename(&'a str), } #[derive(FromPyObject)] enum PyMerges<'a> { Merges(Merges), Filename(&'a str), } #[pymethods] impl PyBPE { #[getter] fn get_dropout(self_: PyRef<Self>) -> Option<f32> { getter!(self_, BPE, dropout) } #[setter] fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) { setter!(self_, BPE, dropout, dropout); } #[getter] fn get_unk_token(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) { setter!(self_, BPE, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix( self_: PyRef<Self>, continuing_subword_prefix: Option<String>, ) { setter!( self_, BPE, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) { setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix); } #[getter] fn get_fuse_unk(self_: PyRef<Self>) -> bool { getter!(self_, BPE, fuse_unk) } #[setter] fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) { setter!(self_, BPE, fuse_unk, fuse_unk); } #[getter] fn get_byte_fallback(self_: PyRef<Self>) -> bool { getter!(self_, BPE, byte_fallback) } #[setter] fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) { setter!(self_, BPE, byte_fallback, byte_fallback); } #[new] #[pyo3( signature = (vocab=None, merges=None, **kwargs), text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, merges: Option<PyMerges>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both specified", )); } let mut builder = BPE::builder(); if let (Some(vocab), Some(merges)) = (vocab, merges) { match (vocab, merges) { (PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => { builder = builder.vocab_and_merges(vocab, merges); } (PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => { deprecation_warning( py, "0.9.0", "BPE.__init__ will not create from files anymore, try `BPE.from_file` instead", )?; builder = builder.files(vocab_filename.to_string(), merges_filename.to_string()); } _ => { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both be from memory or both filenames", )); } } } PyBPE::with_builder(builder, kwargs) } /// Read a :obj:`vocab.json` and a :obj:`merges.txt` files /// /// This method provides a way to read and parse the content of these files, /// returning the relevant data structures. If you want to instantiate some BPE models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// A :obj:`Tuple` with the vocab and the merges: /// The vocabulary and merges loaded into memory #[staticmethod] #[pyo3(text_signature = "(self, vocab, merges)")] fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> { BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!( "Error while reading vocab & merges files: {}", e )) }) } /// Instantiate a BPE model from the given files. /// /// This method is roughly equivalent to doing:: /// /// vocab, merges = BPE.read_file(vocab_filename, merges_filename) /// bpe = BPE(vocab, merges) /// /// If you don't need to keep the :obj:`vocab, merges` values lying around, /// this method is more optimized than manually calling /// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files #[classmethod] #[pyo3(signature = (vocab, merges, **kwargs))] #[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, merges: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e)) })?; Py::new( py, PyBPE::new( py, Some(PyVocab::Vocab(vocab)), Some(PyMerges::Merges(merges)), kwargs, )?, ) } } /// An implementation of the WordPiece algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// max_input_chars_per_word (:obj:`int`, `optional`): /// The maximum number of characters to authorize in a single word. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")] pub struct PyWordPiece {} impl PyWordPiece { fn with_builder( mut builder: WordPieceBuilder, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "unk_token" => { builder = builder.unk_token(val.extract()?); } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(val.extract()?); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?); } _ => println!("Ignored unknown kwargs option {}", key), } } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing WordPiece: {}", e ))), Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())), } } } #[pymethods] impl PyWordPiece { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordPiece, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) { setter!( self_, WordPiece, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize { getter!(self_, WordPiece, max_input_chars_per_word) } #[setter] fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) { setter!(self_, WordPiece, max_input_chars_per_word, max); } #[new] #[pyo3(signature = (vocab=None, **kwargs), text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordPiece::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } } } PyWordPiece::with_builder(builder, kwargs) } /// Read a :obj:`vocab.txt` file /// /// This method provides a way to read and parse the content of a standard `vocab.txt` /// file as used by the WordPiece Model, returning the relevant data structures. If you /// want to instantiate some WordPiece models from memory, this method gives you the /// expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) }) } /// Instantiate a WordPiece model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordPiece.read_file(vocab_filename) /// wordpiece = WordPiece(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to /// initialize a :class:`~tokenizers.models.WordPiece` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file #[classmethod] #[pyo3(signature = (vocab, **kwargs))] #[pyo3(text_signature = "(vocab, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let vocab = WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) })?; Py::new( py, PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?, ) } } /// An implementation of the WordLevel algorithm /// /// Most simple tokenizer model based on mapping tokens to their corresponding id. /// /// Args: /// vocab (:obj:`str`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")] pub struct PyWordLevel {} #[pymethods] impl PyWordLevel { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordLevel, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordLevel, unk_token, unk_token); } #[new] #[pyo3(signature = (vocab=None, unk_token = None), text_signature = "(self, vocab, unk_token)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, unk_token: Option<String>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordLevel::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordLevel.__init__ will not create from files anymore, \ try `WordLevel.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } }; } if let Some(unk_token) = unk_token { builder = builder.unk_token(unk_token); } Ok(( PyWordLevel {}, builder .build() .map_err(|e| exceptions::PyException::new_err(e.to_string()))? .into(), )) } /// Read a :obj:`vocab.json` /// /// This method provides a way to read and parse the content of a vocabulary file, /// returning the relevant data structures. If you want to instantiate some WordLevel models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) }) } /// Instantiate a WordLevel model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordLevel.read_file(vocab_filename) /// wordlevel = WordLevel(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to /// initialize a :class:`~tokenizers.models.WordLevel` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file #[classmethod] #[pyo3(signature = (vocab, unk_token = None))] #[pyo3(text_signature = "(vocab, unk_token)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, unk_token: Option<String>, ) -> PyResult<Py<Self>> { let vocab = WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) })?; Py::new( py, PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?, ) } } /// An implementation of the Unigram algorithm /// /// Args: /// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): /// A list of vocabulary items and their relative score [("am", -0.2442),...] #[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")] pub struct PyUnigram {} #[pymethods] impl PyUnigram { #[new] #[pyo3(text_signature = "(self, vocab, unk_id, byte_fallback)")] fn new( vocab: Option<Vec<(String, f64)>>, unk_id: Option<usize>, byte_fallback: Option<bool>, ) -> PyResult<(Self, PyModel)> { match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => { let model = Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| { exceptions::PyException::new_err(format!( "Error while loading Unigram: {}", e )) })?; Ok((PyUnigram {}, model.into())) } (None, None, _) => Ok((PyUnigram {}, Unigram::default().into())), _ => Err(exceptions::PyValueError::new_err( "`vocab` and `unk_id` must be both specified", )), } } } /// Models Module #[pymodule] pub fn models(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyModel>()?; m.add_class::<PyBPE>()?; m.add_class::<PyWordPiece>()?; m.add_class::<PyWordLevel>()?; m.add_class::<PyUnigram>()?; Ok(()) } #[cfg(test)] mod test { use crate::models::PyModel; use pyo3::prelude::*; use tk::models::bpe::BPE; use tk::models::ModelWrapper; #[test] fn get_subtype() { Python::with_gil(|py| { let py_model = PyModel::from(BPE::default()); let py_bpe = py_model.get_as_subtype(py).unwrap(); assert_eq!("BPE", py_bpe.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let rs_bpe = BPE::default(); let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap(); let rs_wrapper: ModelWrapper = rs_bpe.into(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_model = PyModel::from(rs_wrapper); let py_ser = serde_json::to_string(&py_model).unwrap(); assert_eq!(py_ser, rs_bpe_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; } }
tokenizers/bindings/python/src/models.rs/0
{ "file_path": "tokenizers/bindings/python/src/models.rs", "repo_id": "tokenizers", "token_count": 14445 }
223
import json import pickle import pytest from tokenizers.decoders import ( CTC, BPEDecoder, ByteLevel, Decoder, Metaspace, Sequence, WordPiece, ByteFallback, Replace, Strip, Fuse, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert isinstance(ByteLevel(), Decoder) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_decoding(self): decoder = ByteLevel() assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John" def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestReplace: def test_instantiate(self): assert Replace("_", " ") is not None assert isinstance(Replace("_", " "), Decoder) assert isinstance(Replace("_", " "), Replace) # assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace) def test_decoding(self): decoder = Replace("_", " ") assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John" class TestWordPiece: def test_instantiate(self): assert WordPiece() is not None assert WordPiece(prefix="__") is not None assert WordPiece(cleanup=True) is not None assert isinstance(WordPiece(), Decoder) assert isinstance(WordPiece(), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece) def test_decoding(self): decoder = WordPiece() assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John" decoder = WordPiece(prefix="__", cleanup=False) assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John" def test_can_modify(self): decoder = WordPiece(prefix="$$", cleanup=False) assert decoder.prefix == "$$" assert decoder.cleanup == False # Modify these decoder.prefix = "__" assert decoder.prefix == "__" decoder.cleanup = True assert decoder.cleanup == True class TestByteFallback: def test_instantiate(self): assert ByteFallback() is not None assert isinstance(ByteFallback(), Decoder) assert isinstance(ByteFallback(), ByteFallback) assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback) def test_decoding(self): decoder = ByteFallback() assert decoder.decode(["My", " na", "me"]) == "My name" assert decoder.decode(["<0x61>"]) == "a" assert decoder.decode(["<0xE5>"]) == "�" assert decoder.decode(["<0xE5>", "<0x8f>"]) == "��" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "叫" assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "��a" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "叫a" class TestFuse: def test_instantiate(self): assert Fuse() is not None assert isinstance(Fuse(), Decoder) assert isinstance(Fuse(), Fuse) assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse) def test_decoding(self): decoder = Fuse() assert decoder.decode(["My", " na", "me"]) == "My name" class TestStrip: def test_instantiate(self): assert Strip(left=0, right=0) is not None assert isinstance(Strip(content="_", left=0, right=0), Decoder) assert isinstance(Strip(content="_", left=0, right=0), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip) def test_decoding(self): decoder = Strip(content="_", left=1, right=0) assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-" class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), Decoder) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_decoding(self): decoder = Metaspace() assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John" decoder = Metaspace(replacement="-", add_prefix_space=False) assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John" def test_can_modify(self): decoder = Metaspace(replacement="*", add_prefix_space=False) assert decoder.replacement == "*" assert decoder.add_prefix_space == False # Modify these decoder.replacement = "&" assert decoder.replacement == "&" decoder.add_prefix_space = True assert decoder.add_prefix_space == True class TestBPEDecoder: def test_instantiate(self): assert BPEDecoder() is not None assert BPEDecoder(suffix="_") is not None assert isinstance(BPEDecoder(), Decoder) assert isinstance(BPEDecoder(), BPEDecoder) assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder) def test_decoding(self): decoder = BPEDecoder() assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John" decoder = BPEDecoder(suffix="_") assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John" def test_can_modify(self): decoder = BPEDecoder(suffix="123") assert decoder.suffix == "123" # Modify these decoder.suffix = "</w>" assert decoder.suffix == "</w>" class TestCTCDecoder: def test_instantiate(self): assert CTC() is not None assert CTC(pad_token="[PAD]") is not None assert isinstance(CTC(), Decoder) assert isinstance(CTC(), CTC) assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC) def test_decoding(self): decoder = CTC() assert ( decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"]) == "hello" ) decoder = CTC(pad_token="[PAD]") assert ( decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"]) == "hello" ) def test_can_modify(self): decoder = CTC(pad_token="[PAD]") assert decoder.pad_token == "[PAD]" assert decoder.word_delimiter_token == "|" assert decoder.cleanup == True # Modify these decoder.pad_token = "{pad}" assert decoder.pad_token == "{pad}" decoder.word_delimiter_token = "_" assert decoder.word_delimiter_token == "_" decoder.cleanup = False assert decoder.cleanup == False class TestSequenceDecoder: def test_instantiate(self): assert Sequence([]) is not None assert Sequence([CTC()]) is not None assert isinstance(Sequence([]), Decoder) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_decoding(self): decoder = Sequence([CTC(), Metaspace()]) initial = ["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] expected = "Hi you" assert decoder.decode(initial) == expected
tokenizers/bindings/python/tests/bindings/test_decoders.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_decoders.py", "repo_id": "tokenizers", "token_count": 3521 }
224
from tokenizers import CharBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, openai_files class TestCharBPETokenizer: def test_basic_encode(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) output = tokenizer.encode("My name is John", "pair") assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688] assert output.tokens == [ "<unk>", "y</w>", "name</w>", "is</w>", "<unk>", "o", "hn</w>", "pair</w>", ] assert output.offsets == [ (0, 1), (1, 2), (3, 7), (8, 10), (11, 12), (12, 13), (13, 15), (0, 4), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1] def test_lowercase(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [547, 1362, 544, 2476, 2688] assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_decoding(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) decoded = tokenizer.decode(tokenizer.encode("my name is john").ids) assert decoded == "my name is john" def test_multiprocessing_with_parallelism(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = CharBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A</w>", "sentence</w>"]
tokenizers/bindings/python/tests/implementations/test_char_bpe.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_char_bpe.py", "repo_id": "tokenizers", "token_count": 1094 }
225
# Tokenizer <tokenizerslangcontent> <python> ## Tokenizer [[autodoc]] tokenizers.Tokenizer - all - decoder - model - normalizer - padding - post_processor - pre_tokenizer - truncation </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/tokenizer.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/tokenizer.mdx", "repo_id": "tokenizers", "token_count": 156 }
226
.highlight .c1, .highlight .sd{ color: #999 } .highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt { color: #FB8D68; } .highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s { color: #6670FF; } .highlight .gp { color: #FB8D68; }
tokenizers/docs/source/_static/css/code-snippets.css/0
{ "file_path": "tokenizers/docs/source/_static/css/code-snippets.css", "repo_id": "tokenizers", "token_count": 166 }
227
Quicktour ==================================================================================================== Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. .. only:: python It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our quicktour by building one from scratch and see how we can train it. Build a tokenizer from scratch ---------------------------------------------------------------------------------------------------- To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on `wikitext-103 <https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of text) in just a few seconds. First things first, you will need to download this dataset and unzip it with: .. code-block:: bash wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip unzip wikitext-103-raw-v1.zip Training the tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. entities:: python BpeTrainer :class:`~tokenizers.trainers.BpeTrainer` vocab_size :obj:`vocab_size` min_frequency :obj:`min_frequency` special_tokens :obj:`special_tokens` unk_token :obj:`unk_token` pad_token :obj:`pad_token` .. entities:: rust BpeTrainer :rust_struct:`~tokenizers::models::bpe::BpeTrainer` vocab_size :obj:`vocab_size` min_frequency :obj:`min_frequency` special_tokens :obj:`special_tokens` unk_token :obj:`unk_token` pad_token :obj:`pad_token` .. entities:: node BpeTrainer BpeTrainer vocab_size :obj:`vocabSize` min_frequency :obj:`minFrequency` special_tokens :obj:`specialTokens` unk_token :obj:`unkToken` pad_token :obj:`padToken` In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information about the different type of tokenizers, check out this `guide <https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the 🤗 Transformers documentation. Here, training the tokenizer means it will learn merge rules by: - Start with all the characters present in the training corpus as tokens. - Identify the most common pair of tokens and merge it into one token. - Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want. The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate one with a BPE model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_tokenizer :end-before: END init_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_tokenizer :end-before: END quicktour_init_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_tokenizer :end-before: END init_tokenizer :dedent: 4 To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case a :entity:`BpeTrainer` .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_trainer :end-before: END init_trainer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_trainer :end-before: END quicktour_init_trainer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_trainer :end-before: END init_trainer :dedent: 4 We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at their default values of 30,000 and 0) but the most important part is to give the :entity:`special_tokens` we plan to use later on (they are not used at all during training) so that they get inserted in the vocabulary. .. note:: The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth. We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that will split our inputs into words, we might get tokens that overlap several words: for instance we could get an :obj:`"it is"` token since those two words often appear next to each other. Using a pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting on whitespace. .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_pretok :end-before: END init_pretok :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_pretok :end-before: END quicktour_init_pretok :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_pretok :end-before: END init_pretok :dedent: 4 Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want to use: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START train :end-before: END train :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_train :end-before: END quicktour_train :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START train :end-before: END train :dedent: 4 This should only take a few seconds to train our tokenizer on the full wikitext dataset! To save the tokenizer in one file that contains all its configuration and vocabulary, just use the :entity:`Tokenizer.save` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START save :end-before: END save :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_save :end-before: END quicktour_save :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START save :end-before: END save :dedent: 4 and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file` :entity:`classmethod`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 12 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_reload_tokenizer :end-before: END quicktour_reload_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 4 Using the tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now that we have trained a tokenizer, we can use it on any text we want with the :entity:`Tokenizer.encode` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode :end-before: END encode :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode :end-before: END quicktour_encode :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode :end-before: END encode :dedent: 4 This applied the full pipeline of the tokenizer on the text, returning an :entity:`Encoding` object. To learn more about this pipeline, and how to apply (or customize) parts of it, check out :doc:`this page <pipeline>`. This :entity:`Encoding` object then has all the attributes you need for your deep learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in tokens: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_tokens :end-before: END print_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_tokens :end-before: END quicktour_print_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_tokens :end-before: END print_tokens :dedent: 4 Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the tokenizer's vocabulary: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_ids :end-before: END print_ids :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_ids :end-before: END quicktour_print_ids :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_ids :end-before: END print_ids :dedent: 4 An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking, meaning you can always get the part of your original sentence that corresponds to a given token. Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear, which is the token at index 9 in the list, we can just ask for the offset at the index: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_offsets :end-before: END print_offsets :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_offsets :end-before: END quicktour_print_offsets :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_offsets :end-before: END print_offsets :dedent: 4 and those are the indices that correspond to the emoji in the original sentence: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START use_offsets :end-before: END use_offsets :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_use_offsets :end-before: END quicktour_use_offsets :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START use_offsets :end-before: END use_offsets :dedent: 4 Post-processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or :obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the most commonly used, you just have to specify a template for the processing of single sentences and pairs of sentences, along with the special tokens and their IDs. When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our list of special tokens, so this should be their IDs. To double-check, we can use the :entity:`Tokenizer.token_to_id` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START check_sep :end-before: END check_sep :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_check_sep :end-before: END quicktour_check_sep :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START check_sep :end-before: END check_sep :dedent: 4 Here is how we can set the post-processing to give us the traditional BERT inputs: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_template_processing :end-before: END init_template_processing :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_template_processing :end-before: END quicktour_init_template_processing :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_template_processing :end-before: END init_template_processing :dedent: 4 Let's go over this snippet of code in more details. First we specify the template for single sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our sentence. Then, we specify the template for sentence pairs, which should have the form :obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token. Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary. To check out this worked properly, let's try to encode the same sentence as before: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_special_tokens :end-before: END print_special_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_special_tokens :end-before: END quicktour_print_special_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_special_tokens :end-before: END print_special_tokens :dedent: 4 To check the results on a pair of sentences, we just pass the two sentences to :entity:`Tokenizer.encode`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_special_tokens_pair :end-before: END print_special_tokens_pair :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_special_tokens_pair :end-before: END quicktour_print_special_tokens_pair :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_special_tokens_pair :end-before: END print_special_tokens_pair :dedent: 4 You can then check the type IDs attributed to each token is correct with .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_type_ids :end-before: END print_type_ids :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_type_ids :end-before: END quicktour_print_type_ids :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_type_ids :end-before: END print_type_ids :dedent: 4 If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along. Encoding multiple sentences in a batch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by using the :entity:`Tokenizer.encode_batch` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode_batch :end-before: END encode_batch :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode_batch :end-before: END quicktour_encode_batch :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode_batch :end-before: END encode_batch :dedent: 4 The output is then a list of :entity:`Encoding` objects like the ones we saw before. You can process together as many texts as you like, as long as it fits in memory. To process a batch of sentences pairs, pass two lists to the :entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences B: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode_batch_pair :end-before: END encode_batch_pair :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode_batch_pair :end-before: END quicktour_encode_batch_pair :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode_batch_pair :end-before: END encode_batch_pair :dedent: 4 When encoding multiple sentences, you can automatically pad the outputs to the longest sentence present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID (which we can double-check the id for the padding token with :entity:`Tokenizer.token_to_id` like before): .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START enable_padding :end-before: END enable_padding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_enable_padding :end-before: END quicktour_enable_padding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START enable_padding :end-before: END enable_padding :dedent: 4 We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if we want to pad every sample to that specific number (here we leave it unset to pad to the size of the longest text). .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_batch_tokens :end-before: END print_batch_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_batch_tokens :end-before: END quicktour_print_batch_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_batch_tokens :end-before: END print_batch_tokens :dedent: 4 In this case, the `attention mask` generated by the tokenizer takes the padding into account: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_attention_mask :end-before: END print_attention_mask :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_attention_mask :end-before: END quicktour_print_attention_mask :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_attention_mask :end-before: END print_attention_mask :dedent: 4 .. _pretrained: .. only:: python Using a pretrained tokenizer ------------------------------------------------------------------------------------------------ You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is available in the repository. .. code-block:: python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") Importing a pretrained tokenizer from legacy vocabulary files ------------------------------------------------------------------------------------------------ You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file. For instance, here is how to import the classic pretrained BERT tokenizer: .. code-block:: python from tokenizers import BertWordPieceTokenizer tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) as long as you have downloaded the file `bert-base-uncased-vocab.txt` with .. code-block:: bash wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
tokenizers/docs/source/quicktour.rst/0
{ "file_path": "tokenizers/docs/source/quicktour.rst", "repo_id": "tokenizers", "token_count": 8904 }
228
<div align="center"> <h1><code>wasm-pack-template</code></h1> <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong> <p> <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This is an example project showing off a very basic use case for `wasm` tokenizers usage. [**📚 Read this template tutorial! 📚**][template-docs] This template is designed for compiling Rust libraries into WebAssembly and publishing the resulting package to NPM. Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other templates and usages of `wasm-pack`. [tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html [template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html ## 🚴 Usage ### 🐑 Use `cargo generate` to Clone this Template [Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate) ``` cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project cd my-project ``` ### 🛠️ Build with `wasm-pack build` ``` wasm-pack build ``` ### 🔬 Test in Headless Browsers with `wasm-pack test` ``` wasm-pack test --headless --firefox ``` ### 🎁 Publish to NPM with `wasm-pack publish` ``` wasm-pack publish ``` ## 🔋 Batteries Included * [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating between WebAssembly and JavaScript. * [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook) for logging panic messages to the developer console. * [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized for small code size.
tokenizers/tokenizers/examples/unstable_wasm/README.md/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/README.md", "repo_id": "tokenizers", "token_count": 811 }
229
stable
tokenizers/tokenizers/rust-toolchain/0
{ "file_path": "tokenizers/tokenizers/rust-toolchain", "repo_id": "tokenizers", "token_count": 2 }
230
use rand::distributions::WeightedIndex; use rand::prelude::*; use std::cell::RefCell; use std::cmp::{min, Ordering}; use std::collections::BinaryHeap; use std::rc::Rc; type NodeRef = Rc<RefCell<Node>>; type HypothesisRef = Rc<RefCell<Hypothesis>>; type Agenda = BinaryHeap<Hypothesis>; struct Hypothesis { node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64, } impl Hypothesis { pub fn new(node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64) -> Self { Self { node_ref, next, fx, gx, } } } impl PartialEq for Hypothesis { fn eq(&self, other: &Self) -> bool { self.fx == other.fx } } impl Eq for Hypothesis {} impl PartialOrd for Hypothesis { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } // TODO Maybe use Ordered Floats (https://docs.rs/ordered-float/1.0.2/ordered_float/) impl Ord for Hypothesis { fn cmp(&self, other: &Self) -> Ordering { if self.fx < other.fx { Ordering::Less } else { Ordering::Greater } } } /// Structure to implement Viterbi algorithm to find the best encoding, or sample /// from all possible encodings of a given sentence. #[derive(Debug)] pub struct Lattice<'a> { pub(super) sentence: &'a str, len: usize, nodes: Vec<NodeRef>, pub(super) begin_nodes: Vec<Vec<NodeRef>>, pub(super) end_nodes: Vec<Vec<NodeRef>>, _bos_id: usize, _eos_id: usize, } impl std::fmt::Display for Lattice<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let display_pieces = |nodes: &Vec<Vec<NodeRef>>| { nodes .iter() .map(|l| { l.iter() .map(|n| self.piece(&n.borrow())) .collect::<Vec<_>>() }) .collect::<Vec<_>>() }; f.debug_struct("Lattice") .field("sentence", &self.sentence) .field("begin_nodes", &display_pieces(&self.begin_nodes)) .field("end_nodes", &display_pieces(&self.end_nodes)) .finish() } } /// A node from the lattice, that helps reconstruct the underlying `String` #[derive(Debug, Clone)] pub struct Node { // Vocabulary id pub(super) id: usize, // Local lattice identifier pub(super) node_id: usize, pos: usize, length: usize, prev: Option<NodeRef>, backtrace_score: f64, score: f64, } impl PartialEq for Node { fn eq(&self, other: &Node) -> bool { self.id == other.id } } impl Node { pub fn new(id: usize, node_id: usize, pos: usize, length: usize, score: f64) -> Self { Self { id, node_id, pos, length, prev: None, score, backtrace_score: 0.0, } } } /// Returns log(exp(x) + exp(y)). /// if init_mode is true, returns log(exp(y)) == y. /// log(\sum_i exp(a[i])) can be computed as /// for (int i = 0; i < a.size(); ++i) /// x = LogSumExp(x, a[i], i == 0); fn log_sum_exp(x: f64, y: f64, init_mode: bool) -> f64 { if init_mode { y } else { let (vmin, vmax) = if x > y { (y, x) } else { (x, y) }; let k_minus_log_epsilon = 50.0; if vmax > vmin + k_minus_log_epsilon { vmax } else { vmax + ((vmin - vmax).exp() + 1.0).ln() } } } impl<'a> Lattice<'a> { pub fn from(sentence: &'a str, bos_id: usize, eos_id: usize) -> Self { let len = sentence.len(); let k_reserved_node_size = 16; // We are adding 2 tokens, bos and eos let mut nodes: Vec<NodeRef> = Vec::with_capacity(k_reserved_node_size); let mut begin_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let mut end_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let bos = Rc::new(RefCell::new(Node::new(bos_id, 0, 0, 0, 0.0))); let eos = Rc::new(RefCell::new(Node::new(eos_id, 1, len, 0, 0.0))); begin_nodes[len].push(Rc::clone(&eos)); end_nodes[0].push(Rc::clone(&bos)); nodes.push(bos); nodes.push(eos); Self { sentence, len, nodes, begin_nodes, end_nodes, _bos_id: bos_id, _eos_id: eos_id, } } pub fn insert(&mut self, pos: usize, length: usize, score: f64, id: usize) { let node_id = self.nodes.len(); let node = Rc::new(RefCell::new(Node::new(id, node_id, pos, length, score))); self.begin_nodes[pos].push(Rc::clone(&node)); self.end_nodes[pos + length].push(Rc::clone(&node)); self.nodes.push(node); } pub fn viterbi(&mut self) -> Vec<NodeRef> { let len = self.len; let mut pos = 0; while pos <= len { if self.begin_nodes[pos].is_empty() { return vec![]; } for rnode in &self.begin_nodes[pos] { rnode.borrow_mut().prev = None; let mut best_score = 0.0; let mut best_node: Option<NodeRef> = None; for lnode in &self.end_nodes[pos] { let score = lnode.borrow().backtrace_score + rnode.borrow().score; if best_node.is_none() || score > best_score { // TODO can we remove this clone ? best_node = Some(lnode.clone()); best_score = score } } match best_node { Some(bnode) => { rnode.borrow_mut().prev = Some(Rc::clone(&bnode)); rnode.borrow_mut().backtrace_score = best_score; } None => return vec![], } } if let Some(c) = self.sentence[pos..].chars().next() { pos += c.len_utf8(); } else { break; } } let mut results: Vec<NodeRef> = vec![]; let root = self.begin_nodes[len][0].borrow(); let prev = root.prev.as_ref(); if prev.is_none() { return vec![]; } let mut node: NodeRef = prev.unwrap().clone(); while node.borrow().prev.is_some() { results.push(node.clone()); let n = node.borrow().clone(); node = n.prev.as_ref().unwrap().clone(); } results.reverse(); results } pub fn piece(&self, node: &Node) -> String { self.sentence[node.pos..node.pos + node.length].to_owned() } pub fn tokens(&mut self) -> Vec<String> { self.viterbi() .iter() .map(|node| self.piece(&node.borrow())) .collect() } pub fn nbest(&mut self, n: usize) -> Vec<Vec<NodeRef>> { match n { 0 => vec![], 1 => vec![self.viterbi()], _ => { // let k_reserved_hypothesis_size = 512; let mut agenda: Agenda = BinaryHeap::new(); let mut hypotheses: Vec<Vec<NodeRef>> = vec![]; let eos = self.eos_node(); let score = eos.borrow().score; let hypo = Hypothesis::new(eos, None, score, score); agenda.push(hypo); // Fill backtrace scores self.viterbi(); while !agenda.is_empty() { let top = Rc::new(RefCell::new(agenda.pop().unwrap())); let node = Rc::clone(&top.borrow().node_ref); if node.borrow().id == self.bos_node().borrow().id { let mut hypothesis = vec![]; let mut next: HypothesisRef = Rc::clone(top.borrow().next.as_ref().unwrap()); while next.borrow().next.is_some() { hypothesis.push(next.borrow().node_ref.clone()); let c: HypothesisRef = next.clone(); // let c: Ref<Hypothesis> = next.clone().borrow(); next = Rc::clone(c.borrow().next.as_ref().unwrap()); } hypotheses.push(hypothesis); if hypotheses.len() == n { return hypotheses; } } else { for lnode in &self.end_nodes[node.borrow().pos] { let top_gx = top.borrow().gx; let fx = lnode.borrow().backtrace_score + top_gx; let gx = lnode.borrow().score + top_gx; let hyp = Hypothesis::new(Rc::clone(lnode), Some(Rc::clone(&top)), fx, gx); agenda.push(hyp); } // When the input is too long or contains duplicated phrases, // `agenda` will get extremely big. Here we avoid this case by // dynamically shrinking the agenda. let k_max_agenda_size = 100_000; let k_min_agenda_size = 512; if agenda.len() > k_max_agenda_size { let mut new_agenda = BinaryHeap::new(); let len = min(k_min_agenda_size, n * 10); for _i in 0..len { new_agenda.push(agenda.pop().unwrap()); } agenda = new_agenda; } } } hypotheses } } } pub fn nbest_tokens(&mut self, n: usize) -> Vec<Vec<String>> { self.nbest(n) .iter() .map(|v| v.iter().map(|node| self.piece(&node.borrow())).collect()) .collect() } pub fn len(&self) -> usize { self.len } pub fn is_empty(&self) -> bool { self.len == 0 } pub fn bos_node(&self) -> NodeRef { Rc::clone(&self.end_nodes[0][0]) } pub fn eos_node(&self) -> NodeRef { Rc::clone(&self.begin_nodes[self.len][0]) } pub fn surface(&self, n: usize) -> &str { match self.sentence.char_indices().nth(n) { Some((pos, _)) => &self.sentence[pos..], None => "", } } pub fn sentence(&self) -> &str { self.sentence } pub fn populate_marginal(&self, freq: f64, expected: &mut [f64]) -> f64 { let len = self.len(); let n_nodes = self.nodes.len(); let mut alpha = vec![0.0; n_nodes]; let mut beta = vec![0.0; n_nodes]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], lnode.borrow().score + alpha[lid], *lnode == self.end_nodes[pos][0], ); } } } for pos in (0..=len).rev() { // let rpos = len - pos; for lnode in &self.end_nodes[pos] { for rnode in &self.begin_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; beta[lid] = log_sum_exp( beta[lid], rnode.borrow().score + beta[rid], *rnode == self.begin_nodes[pos][0], ); } } } let eos_id = self.begin_nodes[len][0].borrow().node_id; let z = alpha[eos_id]; for pos in 0..len { for node in &self.begin_nodes[pos] { let node_id = node.borrow().node_id; let id = node.borrow().id; let a = alpha[node_id]; let b = beta[node_id]; let total = a + node.borrow().score + b - z; let update = freq * total.exp(); expected[id] += update; } } freq * z } pub fn sample(&self, theta: f64) -> Vec<NodeRef> { let len = self.len(); if len == 0 { return vec![]; } let mut alpha = vec![0.0; self.nodes.len()]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], theta * (lnode.borrow().score + alpha[lid]), *lnode == self.end_nodes[pos][0], ); } } } let mut rng = thread_rng(); let mut results: Vec<NodeRef> = vec![]; let mut probs: Vec<f64> = vec![]; let mut z = alpha[self.eos_node().borrow().node_id]; let mut node = self.eos_node(); loop { probs.clear(); let pos = node.borrow().pos; for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; probs.push((alpha[lid] + theta * lnode.borrow().score - z).exp()) } let dist = WeightedIndex::new(&probs).unwrap(); let index = dist.sample(&mut rng); node = Rc::clone(&self.end_nodes[pos][index]); if node == self.bos_node() { break; } z = alpha[node.borrow().node_id]; results.push(Rc::clone(&node)); } results.reverse(); results } pub fn sample_token(&self, theta: f64) -> Vec<String> { self.sample(theta) .iter() .map(|node| self.piece(&node.borrow())) .collect() } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; #[test] fn set_sentence() { let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); assert_eq!(lattice.sentence(), ""); assert_eq!(lattice.surface(0), ""); let lattice = Lattice::from("test", 1, 2); assert_eq!(lattice.len(), 4); assert_eq!(lattice.sentence(), "test"); assert_eq!(lattice.surface(0), "test"); assert_eq!(lattice.surface(1), "est"); assert_eq!(lattice.surface(2), "st"); assert_eq!(lattice.surface(3), "t"); let bos = lattice.bos_node(); let eos = lattice.eos_node(); assert_eq!(bos.borrow().id, 1); assert_eq!(eos.borrow().id, 2); assert_eq!( lattice.end_nodes[0].first().unwrap().borrow().id, bos.borrow().id ); assert_eq!( lattice.begin_nodes[4].first().unwrap().borrow().id, eos.borrow().id ); let lattice = Lattice::from("テストab", 1, 2); assert_eq!(lattice.len(), 11); assert_eq!(lattice.sentence(), "テストab"); assert_eq!(lattice.surface(0), "テストab"); assert_eq!(lattice.surface(1), "ストab"); assert_eq!(lattice.surface(2), "トab"); assert_eq!(lattice.surface(3), "ab"); assert_eq!(lattice.surface(4), "b"); } #[test] fn insert_test() { let mut lattice = Lattice::from("ABあい", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 3, 0.0, 5); lattice.insert(5, 3, 0.0, 6); lattice.insert(0, 2, 0.0, 7); lattice.insert(1, 4, 0.0, 8); lattice.insert(2, 6, 0.0, 9); // 0 & 1 are bos and eos let node0 = lattice.nodes[2].borrow(); let node1 = lattice.nodes[3].borrow(); let node2 = lattice.nodes[4].borrow(); let node3 = lattice.nodes[5].borrow(); let node4 = lattice.nodes[6].borrow(); let node5 = lattice.nodes[7].borrow(); let node6 = lattice.nodes[8].borrow(); assert_eq!(lattice.piece(&node0), "A"); assert_eq!(lattice.piece(&node1), "B"); assert_eq!(lattice.piece(&node2), "あ"); assert_eq!(lattice.piece(&node3), "い"); assert_eq!(lattice.piece(&node4), "AB"); assert_eq!(lattice.piece(&node5), "Bあ"); assert_eq!(lattice.piece(&node6), "あい"); assert_eq!(node0.pos, 0); assert_eq!(node1.pos, 1); assert_eq!(node2.pos, 2); assert_eq!(node3.pos, 5); assert_eq!(node4.pos, 0); assert_eq!(node5.pos, 1); assert_eq!(node6.pos, 2); assert_eq!(node0.length, 1); assert_eq!(node1.length, 1); assert_eq!(node2.length, 3); assert_eq!(node3.length, 3); assert_eq!(node4.length, 2); assert_eq!(node5.length, 4); assert_eq!(node6.length, 6); assert_eq!(lattice.bos_node().borrow().id, 1); assert_eq!(lattice.eos_node().borrow().id, 2); assert_eq!(node0.id, 3); assert_eq!(node1.id, 4); assert_eq!(node2.id, 5); assert_eq!(node3.id, 6); assert_eq!(node4.id, 7); assert_eq!(node5.id, 8); assert_eq!(node6.id, 9); assert_eq!(lattice.begin_nodes[0].len(), 2); assert_eq!(lattice.begin_nodes[1].len(), 2); assert_eq!(lattice.begin_nodes[2].len(), 2); assert_eq!(lattice.begin_nodes[5].len(), 1); assert_eq!(lattice.begin_nodes[8].len(), 1); assert_eq!(lattice.end_nodes[0].len(), 1); assert_eq!(lattice.end_nodes[1].len(), 1); assert_eq!(lattice.end_nodes[2].len(), 2); assert_eq!(lattice.end_nodes[5].len(), 2); assert_eq!(lattice.end_nodes[8].len(), 2); assert_eq!(lattice.begin_nodes[0][0].borrow().id, node0.id); assert_eq!(lattice.begin_nodes[0][1].borrow().id, node4.id); assert_eq!(lattice.begin_nodes[1][0].borrow().id, node1.id); assert_eq!(lattice.begin_nodes[1][1].borrow().id, node5.id); assert_eq!(lattice.begin_nodes[2][0].borrow().id, node2.id); assert_eq!(lattice.begin_nodes[2][1].borrow().id, node6.id); assert_eq!(lattice.begin_nodes[5][0].borrow().id, node3.id); assert_eq!( lattice.eos_node().borrow().id, lattice.begin_nodes[8][0].borrow().id ); assert_eq!( lattice.bos_node().borrow().id, lattice.end_nodes[0][0].borrow().id ); assert_eq!(node0.id, lattice.end_nodes[1][0].borrow().id); assert_eq!(node1.id, lattice.end_nodes[2][0].borrow().id); assert_eq!(node4.id, lattice.end_nodes[2][1].borrow().id); assert_eq!(node2.id, lattice.end_nodes[5][0].borrow().id); assert_eq!(node5.id, lattice.end_nodes[5][1].borrow().id); assert_eq!(node3.id, lattice.end_nodes[8][0].borrow().id); assert_eq!(node6.id, lattice.end_nodes[8][1].borrow().id); } #[test] fn test_viterbi() { let mut lattice = Lattice::from("ABC", 1, 2); assert_eq!(lattice.viterbi(), vec![]); // Still incomplete lattice.insert(0, 1, 0.0, 3); assert_eq!(lattice.viterbi(), vec![]); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); // XXX: In sentence piece this is not tested, still incomplete ? assert_eq!(lattice.viterbi().len(), 3); } #[test] fn test_viterbi2() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); assert_eq!(lattice.tokens(), ["A", "B", "C"]); lattice.insert(0, 2, 2.0, 6); assert_eq!(lattice.tokens(), ["AB", "C"]); lattice.insert(1, 2, 5.0, 7); assert_eq!(lattice.tokens(), ["A", "BC"]); lattice.insert(0, 3, 10.0, 8); assert_eq!(lattice.tokens(), ["ABC"]); } #[test] fn test_nbest() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); lattice.insert(0, 2, 2.0, 6); lattice.insert(1, 2, 5.0, 7); lattice.insert(0, 3, 10.0, 8); let nbests = lattice.nbest_tokens(10); assert_eq!( nbests, vec![ vec!["ABC"], vec!["A", "BC"], vec!["AB", "C"], vec!["A", "B", "C"] ] ); assert!(lattice.nbest_tokens(0).is_empty()); assert_eq!(lattice.nbest_tokens(1), vec![vec!["ABC"]]); } #[test] fn test_log_sum_exp() { let mut x = 0.0; let v: Vec<f64> = vec![1.0, 2.0, 3.0]; for (i, y) in v.iter().enumerate() { x = log_sum_exp(x, *y, i == 0); } assert_approx_eq!(x, v.iter().map(|n| n.exp()).sum::<f64>().ln(), 0.001); } #[test] fn test_populate() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 1.0, 3); // A lattice.insert(1, 1, 1.2, 4); // B lattice.insert(2, 1, 2.5, 5); // C lattice.insert(0, 2, 3.0, 6); // AB lattice.insert(1, 2, 4.0, 7); // BC lattice.insert(0, 3, 2.0, 8); // ABC let mut probs = vec![0.0; 9]; let p1 = (1.0_f64 + 1.2 + 2.5).exp(); let p2 = (3.0_f64 + 2.5).exp(); let p3 = (1.0_f64 + 4.0).exp(); let p4 = 2.0_f64.exp(); let z = p1 + p2 + p3 + p4; let log_z = lattice.populate_marginal(1.0, &mut probs); assert_approx_eq!(log_z, z.ln(), 0.001); assert_approx_eq!(probs[0], 0.0, 0.001); assert_approx_eq!(probs[1], 0.0, 0.001); assert_approx_eq!(probs[2], 0.0, 0.001); assert_approx_eq!(probs[3], (p1 + p3) / z, 0.001); assert_approx_eq!(probs[4], (p1) / z, 0.001); assert_approx_eq!(probs[5], (p1 + p2) / z, 0.001); assert_approx_eq!(probs[6], (p2) / z, 0.001); assert_approx_eq!(probs[7], (p3) / z, 0.001); assert_approx_eq!(probs[8], (p4) / z, 0.001); } }
tokenizers/tokenizers/src/models/unigram/lattice.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/lattice.rs", "repo_id": "tokenizers", "token_count": 12682 }
231
use crate::tokenizer::pattern::Pattern; use crate::tokenizer::Decoder; use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::SysRegex; use serde::{Deserialize, Serialize}; /// Represents the different patterns that `Replace` can use #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum ReplacePattern { String(String), Regex(String), } impl From<String> for ReplacePattern { fn from(v: String) -> Self { Self::String(v) } } impl From<&str> for ReplacePattern { fn from(v: &str) -> Self { Self::String(v.to_owned()) } } /// We use this custom deserializer to provide the value for `regex` for `Replace` #[doc(hidden)] #[derive(Deserialize)] #[serde(tag = "type")] struct ReplaceDeserializer { pattern: ReplacePattern, content: String, } impl std::convert::TryFrom<ReplaceDeserializer> for Replace { type Error = Box<dyn std::error::Error + Send + Sync>; fn try_from(v: ReplaceDeserializer) -> Result<Self> { Self::new(v.pattern, v.content) } } /// This normalizer will take a `pattern` (for now only a String) /// and replace every occurrence with `content`. #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "type", try_from = "ReplaceDeserializer")] pub struct Replace { pattern: ReplacePattern, content: String, #[serde(skip)] regex: SysRegex, } impl Clone for Replace { fn clone(&self) -> Self { Self::new(self.pattern.clone(), &self.content).unwrap() } } impl PartialEq for Replace { fn eq(&self, other: &Self) -> bool { self.pattern == other.pattern && self.content == other.content } } impl Replace { pub fn new<I: Into<ReplacePattern>, C: Into<String>>(pattern: I, content: C) -> Result<Self> { let pattern: ReplacePattern = pattern.into(); let regex = match &pattern { ReplacePattern::String(s) => SysRegex::new(&regex::escape(s))?, ReplacePattern::Regex(r) => SysRegex::new(r)?, }; Ok(Self { pattern, content: content.into(), regex, }) } } impl Normalizer for Replace { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.replace(&self.regex, &self.content) } } impl Decoder for Replace { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { tokens .into_iter() .map(|token| -> Result<String> { let mut new_token = "".to_string(); for ((start, stop), is_match) in (&self.regex).find_matches(&token)? { if is_match { new_token.push_str(&self.content); } else { new_token.push_str(&token[start..stop]); } } Ok(new_token) }) .collect() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_replace() { let original = "This is a ''test''"; let normalized = "This is a \"test\""; let mut n = NormalizedString::from(original); Replace::new("''", "\"").unwrap().normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_replace_regex() { let original = "This is a test"; let normalized = "This is a test"; let mut n = NormalizedString::from(original); Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ') .unwrap() .normalize(&mut n) .unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn serialization() { let replace = Replace::new("Hello", "Hey").unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"String":"Hello"},"content":"Hey"}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); let replace = Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ').unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"Regex":"\\s+"},"content":" "}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); } #[test] fn test_replace_decode() { let original = vec!["hello".to_string(), "_hello".to_string()]; let replace = Replace::new("_", " ").unwrap(); assert_eq!( replace.decode_chain(original).unwrap(), vec!["hello", " hello"] ); } }
tokenizers/tokenizers/src/normalizers/replace.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/replace.rs", "repo_id": "tokenizers", "token_count": 2048 }
232
use regex::Regex; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Whitespace; impl Default for Whitespace { fn default() -> Self { Self } } impl PreTokenizer for Whitespace { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { lazy_static! { static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap(); } let re_ref: &Regex = &RE; pretokenized.split(|_, normalized| { normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed) }) } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct WhitespaceSplit; impl PreTokenizer for WhitespaceSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed) }) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; #[test] fn basic() { let tests = vec![ ( "Hey man!", vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))], ), ( "How are you doing?", vec![ ("How", (0, 3)), ("are", (4, 7)), ("you", (8, 11)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ("\n", vec![]), ]; let pretok = Whitespace {}; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn whitespace_split() { let tests = vec![ ("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]), ( "Hey, man, Good?", vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))], ), ]; let pretok = WhitespaceSplit; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } }
tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs", "repo_id": "tokenizers", "token_count": 1660 }
233
//! This comes from the Rust libcore and is duplicated here because it is not exported //! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>) //! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f> //! because the one from the libcore seems to cause overflowing stacks in some cases //! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings. use std::io::BufRead; pub struct ResultShunt<I, E> { iter: I, error: Option<E>, } impl<I, T, E> ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { /// Process the given iterator as if it yielded a `T` instead of a /// `Result<T, _>`. Any errors will stop the inner iterator and /// the overall result will be an error. pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E> where F: FnMut(&mut Self) -> U, { let mut shunt = ResultShunt::new(iter); let value = f(shunt.by_ref()); shunt.reconstruct(value) } fn new(iter: I) -> Self { ResultShunt { iter, error: None } } /// Consume the adapter and rebuild a `Result` value. This should /// *always* be called, otherwise any potential error would be /// lost. fn reconstruct<U>(self, val: U) -> Result<U, E> { match self.error { None => Ok(val), Some(e) => Err(e), } } } impl<I, T, E> Iterator for ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self.iter.next() { Some(Ok(v)) => Some(v), Some(Err(e)) => { self.error = Some(e); None } None => None, } } } /// Copied from std::io::BufRead but keep newline characters. #[derive(Debug)] pub struct Lines<B> { buf: B, } pub trait LinesWithEnding<B> { fn lines_with_ending(self) -> Lines<B>; } impl<B> LinesWithEnding<B> for B where B: BufRead, { fn lines_with_ending(self) -> Lines<B> { Lines::<B> { buf: self } } } impl<B: BufRead> Iterator for Lines<B> { type Item = std::io::Result<String>; fn next(&mut self) -> Option<Self::Item> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(0) => None, Ok(_n) => { // if buf.ends_with('\n') { // buf.pop(); // if buf.ends_with('\r') { // buf.pop(); // } // } Some(Ok(buf)) } Err(e) => Some(Err(e)), } } }
tokenizers/tokenizers/src/utils/iter.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/iter.rs", "repo_id": "tokenizers", "token_count": 1339 }
234
version: 2.1 setup: true orbs: continuation: circleci/[email protected] parameters: nightly: type: boolean default: false jobs: # Ensure running with CircleCI/huggingface check_circleci_user: docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - run: echo $CIRCLE_PROJECT_USERNAME - run: | if [ "$CIRCLE_PROJECT_USERNAME" = "huggingface" ]; then exit 0 else echo "The CI is running under $CIRCLE_PROJECT_USERNAME personal account. Please follow https://support.circleci.com/hc/en-us/articles/360008097173-Troubleshooting-why-pull-requests-are-not-triggering-jobs-on-my-organization- to fix it."; exit -1 fi # Fetch the tests to run fetch_tests: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - checkout - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager GitPython - run: pip install -U --upgrade-strategy eager . - run: mkdir -p test_preparation - run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt - store_artifacts: path: ~/transformers/tests_fetched_summary.txt - run: | if [ -f test_list.txt ]; then cp test_list.txt test_preparation/test_list.txt else touch test_preparation/test_list.txt fi - run: | if [ -f examples_test_list.txt ]; then mv examples_test_list.txt test_preparation/examples_test_list.txt else touch test_preparation/examples_test_list.txt fi - run: | if [ -f filtered_test_list_cross_tests.txt ]; then mv filtered_test_list_cross_tests.txt test_preparation/filtered_test_list_cross_tests.txt else touch test_preparation/filtered_test_list_cross_tests.txt fi - run: | if [ -f doctest_list.txt ]; then cp doctest_list.txt test_preparation/doctest_list.txt else touch test_preparation/doctest_list.txt fi - run: | if [ -f test_repo_utils.txt ]; then mv test_repo_utils.txt test_preparation/test_repo_utils.txt else touch test_preparation/test_repo_utils.txt fi - run: python utils/tests_fetcher.py --filter_tests - run: | if [ -f test_list.txt ]; then mv test_list.txt test_preparation/filtered_test_list.txt else touch test_preparation/filtered_test_list.txt fi - store_artifacts: path: test_preparation/test_list.txt - store_artifacts: path: test_preparation/doctest_list.txt - store_artifacts: path: ~/transformers/test_preparation/filtered_test_list.txt - store_artifacts: path: test_preparation/examples_test_list.txt - run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: | if [ ! -s test_preparation/generated_config.yml ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt - store_artifacts: path: test_preparation/generated_config.txt - store_artifacts: path: test_preparation/filtered_test_list_cross_tests.txt - continuation/continue: configuration_path: test_preparation/generated_config.yml # To run all tests for the nightly build fetch_all_tests: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - checkout - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager GitPython - run: pip install -U --upgrade-strategy eager . - run: | mkdir test_preparation echo -n "tests" > test_preparation/test_list.txt echo -n "all" > test_preparation/examples_test_list.txt echo -n "tests/repo_utils" > test_preparation/test_repo_utils.txt - run: | echo -n "tests" > test_list.txt python utils/tests_fetcher.py --filter_tests mv test_list.txt test_preparation/filtered_test_list.txt - run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt - store_artifacts: path: test_preparation/generated_config.txt - continuation/continue: configuration_path: test_preparation/generated_config.yml check_code_quality: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.7-code_quality-pip-{{ checksum "setup.py" }} - v0.7-code-quality-pip - restore_cache: keys: - v0.7-code_quality-site-packages-{{ checksum "setup.py" }} - v0.7-code-quality-site-packages - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager .[all,quality] - save_cache: key: v0.7-code_quality-pip-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - save_cache: key: v0.7-code_quality-site-packages-{{ checksum "setup.py" }} paths: - '~/.pyenv/versions/' - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: ruff check examples tests src utils - run: ruff format tests src utils --check - run: python utils/custom_init_isort.py --check_only - run: python utils/sort_auto_mappings.py --check_only - run: python utils/check_doc_toc.py check_repository_consistency: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.7-repository_consistency-pip-{{ checksum "setup.py" }} - v0.7-repository_consistency-pip - restore_cache: keys: - v0.7-repository_consistency-site-packages-{{ checksum "setup.py" }} - v0.7-repository_consistency-site-packages - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager .[all,quality] - save_cache: key: v0.7-repository_consistency-pip-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - save_cache: key: v0.7-repository_consistency-site-packages-{{ checksum "setup.py" }} paths: - '~/.pyenv/versions/' - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: python utils/check_copies.py - run: python utils/check_table.py - run: python utils/check_dummies.py - run: python utils/check_repo.py - run: python utils/check_inits.py - run: python utils/check_config_docstrings.py - run: python utils/check_config_attributes.py - run: python utils/check_doctest_list.py - run: make deps_table_check_updated - run: python utils/update_metadata.py --check-only - run: python utils/check_task_guides.py - run: python utils/check_docstrings.py - run: python utils/check_support_list.py workflows: version: 2 setup_and_quality: when: not: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_tests nightly: when: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_all_tests
transformers/.circleci/config.yml/0
{ "file_path": "transformers/.circleci/config.yml", "repo_id": "transformers", "token_count": 5200 }
235
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 FROM nvcr.io/nvidia/pytorch:23.04-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.2.0' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' RUN apt -y update RUN apt install -y libaio-dev RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] # Install latest release PyTorch # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) RUN python3 -m pip uninstall -y torch torchvision torchaudio && python3 -m pip install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # Uninstall `transformer-engine` shipped with the base image RUN python3 -m pip uninstall -y transformer-engine # Uninstall `torch-tensorrt` shipped with the base image RUN python3 -m pip uninstall -y torch-tensorrt # recompile apex RUN python3 -m pip uninstall -y apex # RUN git clone https://github.com/NVIDIA/apex # `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners # TODO: check if there is alternative way to install latest apex # RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . # Pre-build **latest** DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout) RUN python3 -m pip uninstall -y deepspeed # This has to be run (again) inside the GPU VMs running the tests. # The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests. # TODO: Find out why test fail. RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop # The base image ships with `pydantic==1.8.2` which is not working - i.e. the next command fails RUN python3 -m pip install -U --no-cache-dir "pydantic<2" RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile", "repo_id": "transformers", "token_count": 890 }
236
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Wie erstellt man eine benutzerdefinierte Pipeline? In dieser Anleitung sehen wir uns an, wie Sie eine benutzerdefinierte Pipeline erstellen und sie auf dem [Hub](https://hf.co/models) freigeben oder sie der 🤗 Transformers-Bibliothek hinzufügen. Zuallererst müssen Sie entscheiden, welche Roheingaben die Pipeline verarbeiten kann. Es kann sich um Strings, rohe Bytes, Dictionaries oder was auch immer die wahrscheinlichste gewünschte Eingabe ist. Versuchen Sie, diese Eingaben so rein wie möglich in Python zu halten denn das macht die Kompatibilität einfacher (auch mit anderen Sprachen über JSON). Dies werden die Eingaben der Pipeline (`Vorverarbeitung`). Definieren Sie dann die `Outputs`. Dieselbe Richtlinie wie für die Eingänge. Je einfacher, desto besser. Dies werden die Ausgaben der Methode `Postprocess`. Beginnen Sie damit, die Basisklasse `Pipeline` mit den 4 Methoden zu erben, die für die Implementierung von `preprocess` benötigt werden, Weiterleiten", "Nachbearbeitung" und "Parameter säubern". ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Maybe {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` Die Struktur dieser Aufteilung soll eine relativ nahtlose Unterstützung für CPU/GPU ermöglichen und gleichzeitig die Durchführung von Vor-/Nachbearbeitung auf der CPU in verschiedenen Threads Preprocess" nimmt die ursprünglich definierten Eingaben und wandelt sie in etwas um, das in das Modell eingespeist werden kann. Es kann mehr Informationen enthalten und ist normalerweise ein `Dict`. `_forward` ist das Implementierungsdetail und ist nicht dafür gedacht, direkt aufgerufen zu werden. Weiterleiten" ist die bevorzugte aufgerufene Methode, da sie Sicherheitsvorkehrungen enthält, die sicherstellen, dass alles auf dem erwarteten Gerät funktioniert. Wenn etwas mit einem realen Modell verknüpft ist, gehört es in die Methode `_forward`, alles andere gehört in die Methoden preprocess/postprocess. Die Methode `Postprocess` nimmt die Ausgabe von `_forward` und verwandelt sie in die endgültige Ausgabe, die zuvor festgelegt wurde. zuvor entschieden wurde. Die Methode `_sanitize_parameters` ermöglicht es dem Benutzer, beliebige Parameter zu übergeben, wann immer er möchte, sei es bei der Initialisierung Zeit `pipeline(...., maybe_arg=4)` oder zur Aufrufzeit `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. Die Rückgabe von `_sanitize_parameters` sind die 3 Dicts von kwargs, die direkt an `preprocess` übergeben werden, `_forward` und `postprocess` übergeben werden. Füllen Sie nichts aus, wenn der Aufrufer keinen zusätzlichen Parameter angegeben hat. Das erlaubt es, die Standardargumente in der Funktionsdefinition beizubehalten, was immer "natürlicher" ist. Ein klassisches Beispiel wäre das Argument `top_k` in der Nachbearbeitung bei Klassifizierungsaufgaben. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit `_sanitize_parameters` to allow this new parameter. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Add logic to handle top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` Versuchen Sie, die Eingaben/Ausgaben sehr einfach und idealerweise JSON-serialisierbar zu halten, da dies die Verwendung der Pipeline sehr einfach macht ohne dass die Benutzer neue Arten von Objekten verstehen müssen. Es ist auch relativ üblich, viele verschiedene Arten von Argumenten zu unterstützen von Argumenten zu unterstützen (Audiodateien, die Dateinamen, URLs oder reine Bytes sein können). ## Hinzufügen zur Liste der unterstützten Aufgaben Um Ihre `neue Aufgabe` in die Liste der unterstützten Aufgaben aufzunehmen, müssen Sie sie zur `PIPELINE_REGISTRY` hinzufügen: ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` Wenn Sie möchten, können Sie ein Standardmodell angeben. In diesem Fall sollte es mit einer bestimmten Revision (die der Name einer Verzweigung oder ein Commit-Hash sein kann, hier haben wir `"abcdef"` genommen) sowie mit dem Typ versehen sein: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # current support type: text, audio, image, multimodal ) ``` ## Teilen Sie Ihre Pipeline auf dem Hub Um Ihre benutzerdefinierte Pipeline auf dem Hub freizugeben, müssen Sie lediglich den benutzerdefinierten Code Ihrer `Pipeline`-Unterklasse in einer Python-Datei speichern. Nehmen wir zum Beispiel an, Sie möchten eine benutzerdefinierte Pipeline für die Klassifizierung von Satzpaaren wie folgt verwenden: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` Die Implementierung ist Framework-unabhängig und funktioniert für PyTorch- und TensorFlow-Modelle. Wenn wir dies in einer Datei einer Datei namens `pair_classification.py` gespeichert haben, können wir sie importieren und wie folgt registrieren: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` Sobald dies geschehen ist, können wir es mit einem vortrainierten Modell verwenden. Zum Beispiel wurde `sgugger/finetuned-bert-mrpc` auf den auf den MRPC-Datensatz abgestimmt, der Satzpaare als Paraphrasen oder nicht klassifiziert. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` Dann können wir sie auf dem Hub mit der Methode `save_pretrained` in einem `Repository` freigeben: ```py from huggingface_hub import Repository repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline") classifier.save_pretrained("test-dynamic-pipeline") repo.push_to_hub() ``` Dadurch wird die Datei, in der Sie `PairClassificationPipeline` definiert haben, in den Ordner `"test-dynamic-pipeline"` kopiert, und speichert das Modell und den Tokenizer der Pipeline, bevor Sie alles in das Repository verschieben `{Ihr_Benutzername}/test-dynamic-pipeline`. Danach kann jeder die Pipeline verwenden, solange er die Option `trust_remote_code=True` angeben: ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## Hinzufügen der Pipeline zu 🤗 Transformers Wenn Sie Ihre Pipeline zu 🤗 Transformers beitragen möchten, müssen Sie ein neues Modul im Untermodul `pipelines` hinzufügen mit dem Code Ihrer Pipeline hinzufügen. Fügen Sie es dann der Liste der in `pipelines/__init__.py` definierten Aufgaben hinzu. Dann müssen Sie noch Tests hinzufügen. Erstellen Sie eine neue Datei `tests/test_pipelines_MY_PIPELINE.py` mit Beispielen für die anderen Tests. Die Funktion `run_pipeline_test` ist sehr allgemein gehalten und läuft auf kleinen Zufallsmodellen auf jeder möglichen Architektur, wie durch `model_mapping` und `tf_model_mapping` definiert. Dies ist sehr wichtig, um die zukünftige Kompatibilität zu testen, d.h. wenn jemand ein neues Modell für `XXXForQuestionAnswering` hinzufügt, wird der Pipeline-Test versuchen, mit diesem Modell zu arbeiten. Da die Modelle zufällig sind, ist es ist es unmöglich, die tatsächlichen Werte zu überprüfen. Deshalb gibt es eine Hilfsfunktion `ANY`, die einfach versucht, die Ausgabe der Pipeline TYPE. Außerdem *müssen* Sie 2 (idealerweise 4) Tests implementieren. - `test_small_model_pt` : Definieren Sie 1 kleines Modell für diese Pipeline (es spielt keine Rolle, ob die Ergebnisse keinen Sinn ergeben) und testen Sie die Ausgaben der Pipeline. Die Ergebnisse sollten die gleichen sein wie bei `test_small_model_tf`. - `test_small_model_tf` : Definieren Sie 1 kleines Modell für diese Pipeline (es spielt keine Rolle, ob die Ergebnisse keinen Sinn ergeben) und testen Sie die Ausgaben der Pipeline. Die Ergebnisse sollten die gleichen sein wie bei `test_small_model_pt`. - `test_large_model_pt` (`optional`): Testet die Pipeline an einer echten Pipeline, bei der die Ergebnisse Sinn machen. Diese Tests sind langsam und sollten als solche gekennzeichnet werden. Hier geht es darum, die Pipeline zu präsentieren und sicherzustellen sicherzustellen, dass es in zukünftigen Versionen keine Abweichungen gibt. - `test_large_model_tf` (`optional`): Testet die Pipeline an einer echten Pipeline, bei der die Ergebnisse Sinn machen. Diese Tests sind langsam und sollten als solche gekennzeichnet werden. Hier geht es darum, die Pipeline zu präsentieren und sicherzustellen sicherzustellen, dass es in zukünftigen Versionen keine Abweichungen gibt.
transformers/docs/source/de/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/de/add_new_pipeline.md", "repo_id": "transformers", "token_count": 4595 }
237
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformers Agents <Tip warning={true}> Transformers Agents ist eine experimentelle API, die jederzeit geändert werden kann. Die von den Agenten zurückgegebenen Ergebnisse zurückgegeben werden, können variieren, da sich die APIs oder die zugrunde liegenden Modelle ändern können. </Tip> Transformers Version v4.29.0, die auf dem Konzept von *Tools* und *Agenten* aufbaut. Sie können damit spielen in [dieses Colab](https://colab.research.google.com/drive/1c7MHD-T1forUPGcC_jlwsIptOzpG3hSj). Kurz gesagt, es bietet eine API für natürliche Sprache auf der Grundlage von Transformers: Wir definieren eine Reihe von kuratierten Tools und entwerfen einen Agenten, um natürliche Sprache zu interpretieren und diese Werkzeuge zu verwenden. Es ist von vornherein erweiterbar; wir haben einige relevante Tools kuratiert, aber wir werden Ihnen zeigen, wie das System einfach erweitert werden kann, um jedes von der Community entwickelte Tool zu verwenden. Beginnen wir mit einigen Beispielen dafür, was mit dieser neuen API erreicht werden kann. Sie ist besonders leistungsfähig, wenn es um Sie ist besonders leistungsstark, wenn es um multimodale Aufgaben geht. Lassen Sie uns also eine Runde drehen, um Bilder zu erzeugen und Text vorzulesen. ```py agent.run("Caption the following image", image=image) ``` | **Input** | **Output** | |-----------------------------------------------------------------------------------------------------------------------------|-----------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beaver.png" width=200> | A beaver is swimming in the water | --- ```py agent.run("Read the following text out loud", text=text) ``` | **Input** | **Output** | |-------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| | A beaver is swimming in the water | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tts_example.wav" type="audio/wav"> your browser does not support the audio element. </audio> --- ```py agent.run( "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", document=document, ) ``` | **Input** | **Output** | |-----------------------------------------------------------------------------------------------------------------------------|----------------| | <img src="https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/0/image/image.jpg" width=200> | ballroom foyer | ## Schnellstart Bevor Sie `agent.run` verwenden können, müssen Sie einen Agenten instanziieren, der ein großes Sprachmodell (LLM) ist. Wir bieten Unterstützung für openAI-Modelle sowie für OpenSource-Alternativen von BigCode und OpenAssistant. Die openAI Modelle sind leistungsfähiger (erfordern aber einen openAI-API-Schlüssel, können also nicht kostenlos verwendet werden); Hugging Face bietet kostenlosen Zugang zu Endpunkten für BigCode- und OpenAssistant-Modelle. To start with, please install the `agents` extras in order to install all default dependencies. ```bash pip install transformers[agents] ``` Um openAI-Modelle zu verwenden, instanziieren Sie einen [`OpenAiAgent`], nachdem Sie die `openai`-Abhängigkeit installiert haben: ```bash pip install openai ``` ```py from transformers import OpenAiAgent agent = OpenAiAgent(model="text-davinci-003", api_key="<your_api_key>") ``` Um BigCode oder OpenAssistant zu verwenden, melden Sie sich zunächst an, um Zugriff auf die Inference API zu erhalten: ```py from huggingface_hub import login login("<YOUR_TOKEN>") ``` Dann instanziieren Sie den Agenten ```py from transformers import HfAgent # Starcoder agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") # StarcoderBase # agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoderbase") # OpenAssistant # agent = HfAgent(url_endpoint="https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5") ``` Dies geschieht mit der Inferenz-API, die Hugging Face derzeit kostenlos zur Verfügung stellt. Wenn Sie Ihren eigenen Inferenz Endpunkt für dieses Modell (oder einen anderen) haben, können Sie die obige URL durch Ihren URL-Endpunkt ersetzen. <Tip> StarCoder und OpenAssistant sind kostenlos und leisten bei einfachen Aufgaben bewundernswert gute Arbeit. Allerdings halten die Kontrollpunkte nicht, wenn es um komplexere Aufforderungen geht. Wenn Sie mit einem solchen Problem konfrontiert sind, empfehlen wir Ihnen, das OpenAI Modell auszuprobieren, das zwar leider nicht quelloffen ist, aber zur Zeit eine bessere Leistung erbringt. </Tip> Sie sind jetzt startklar! Lassen Sie uns in die beiden APIs eintauchen, die Ihnen jetzt zur Verfügung stehen. ### Einzelne Ausführung (run) Die Methode der einmaligen Ausführung ist die Verwendung der [`~Agent.run`] Methode des Agenten: ```py agent.run("Draw me a picture of rivers and lakes.") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> Es wählt automatisch das (oder die) Werkzeug(e) aus, das (die) für die von Ihnen gewünschte Aufgabe geeignet ist (sind) und führt es (sie) entsprechend aus. Es kann eine oder mehrere Aufgaben in der gleichen Anweisung ausführen (je komplexer Ihre Anweisung ist, desto wahrscheinlicher ist ein der Agent scheitern). ```py agent.run("Draw me a picture of the sea then transform the picture to add an island") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sea_and_island.png" width=200> <br/> Jede [`~Agent.run`] Operation ist unabhängig, so dass Sie sie mehrmals hintereinander mit unterschiedlichen Aufgaben ausführen können. Beachten Sie, dass Ihr `Agent` nur ein großsprachiges Modell ist, so dass kleine Variationen in Ihrer Eingabeaufforderung völlig unterschiedliche Ergebnisse liefern können. unterschiedliche Ergebnisse liefern. Es ist wichtig, dass Sie die Aufgabe, die Sie ausführen möchten, so genau wie möglich erklären. Wir gehen noch weiter ins Detail wie man gute Prompts schreibt [hier](custom_tools#writing-good-user-inputs). Wenn Sie einen Status über Ausführungszeiten hinweg beibehalten oder dem Agenten Nicht-Text-Objekte übergeben möchten, können Sie dies tun, indem Sie Variablen, die der Agent verwenden soll. Sie könnten zum Beispiel das erste Bild von Flüssen und Seen erzeugen, und das Modell bitten, dieses Bild zu aktualisieren und eine Insel hinzuzufügen, indem Sie Folgendes tun: ```python picture = agent.run("Generate a picture of rivers and lakes.") updated_picture = agent.run("Transform the image in `picture` to add an island to it.", picture=picture) ``` <Tip> Dies kann hilfreich sein, wenn das Modell Ihre Anfrage nicht verstehen kann und die Werkzeuge verwechselt. Ein Beispiel wäre: ```py agent.run("Draw me the picture of a capybara swimming in the sea") ``` Hier könnte das Modell auf zwei Arten interpretieren: - Die Funktion `Text-zu-Bild` erzeugt ein Wasserschwein, das im Meer schwimmt. - Oder Sie lassen das `Text-zu-Bild` ein Wasserschwein erzeugen und verwenden dann das Werkzeug `Bildtransformation`, um es im Meer schwimmen zu lassen. Falls Sie das erste Szenario erzwingen möchten, können Sie dies tun, indem Sie die Eingabeaufforderung als Argument übergeben: ```py agent.run("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") ``` </Tip> ### Chat-basierte Ausführung (Chat) Der Agent verfügt auch über einen Chat-basierten Ansatz, der die Methode [`~Agent.chat`] verwendet: ```py agent.chat("Generate a picture of rivers and lakes") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> ```py agent.chat("Transform the picture so that there is a rock in there") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_and_beaver.png" width=200> <br/> Dies ist ein interessanter Ansatz, wenn Sie den Zustand über Anweisungen hinweg beibehalten möchten. Er ist besser für Experimente geeignet, eignet sich aber eher für einzelne Anweisungen als für komplexe Anweisungen (die die [`~Agent.run`] Methode besser verarbeiten kann). Diese Methode kann auch Argumente entgegennehmen, wenn Sie Nicht-Text-Typen oder bestimmte Aufforderungen übergeben möchten. ### ⚠️ Fernausführung Zu Demonstrationszwecken und damit es mit allen Setups verwendet werden kann, haben wir Remote-Executors für mehrere der Standard-Tools erstellt, auf die der Agent in dieser Version Zugriff hat. Diese werden erstellt mit [inference endpoints](https://huggingface.co/inference-endpoints). Wir haben diese vorerst deaktiviert, aber um zu sehen, wie Sie selbst Remote Executors Tools einrichten können, empfehlen wir die Lektüre des [custom tool guide](./custom_tools). ### Was passiert hier? Was sind Tools und was sind Agenten? <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/diagram.png"> #### Agenten Der "Agent" ist hier ein großes Sprachmodell, das wir auffordern, Zugang zu einem bestimmten Satz von Tools zu erhalten. LLMs sind ziemlich gut darin, kleine Codeproben zu erzeugen. Diese API macht sich das zunutze, indem sie das LLM ein kleines Codebeispiel gibt, das eine Aufgabe mit einer Reihe von Werkzeugen ausführt. Diese Aufforderung wird dann ergänzt durch die Aufgabe, die Sie Ihrem Agenten geben, und die Beschreibung der Werkzeuge, die Sie ihm geben. Auf diese Weise erhält er Zugriff auf die Dokumentation der Tools, insbesondere die erwarteten Eingaben und Ausgaben, und kann den entsprechenden Code generieren. #### Tools Tools sind sehr einfach: Sie bestehen aus einer einzigen Funktion mit einem Namen und einer Beschreibung. Wir verwenden dann die Beschreibungen dieser Tools um den Agenten aufzufordern. Anhand der Eingabeaufforderung zeigen wir dem Agenten, wie er die Tools nutzen kann, um das zu tun, was in der in der Abfrage angefordert wurde. Dies geschieht mit brandneuen Tools und nicht mit Pipelines, denn der Agent schreibt besseren Code mit sehr atomaren Tools. Pipelines sind stärker refaktorisiert und fassen oft mehrere Aufgaben in einer einzigen zusammen. Tools sind dafür gedacht, sich auf eine einzige, sehr einfache Aufgabe konzentrieren. #### Code-Ausführung?! Dieser Code wird dann mit unserem kleinen Python-Interpreter auf den mit Ihren Tools übergebenen Eingaben ausgeführt. Wir hören Sie schon schreien "Willkürliche Codeausführung!", aber lassen Sie uns erklären, warum das nicht der Fall ist. Die einzigen Funktionen, die aufgerufen werden können, sind die von Ihnen zur Verfügung gestellten Tools und die Druckfunktion, so dass Sie bereits eingeschränkt sind eingeschränkt, was ausgeführt werden kann. Sie sollten sicher sein, wenn es sich auf die Werkzeuge für das Umarmungsgesicht beschränkt. Dann lassen wir keine Attributsuche oder Importe zu (die ohnehin nicht benötigt werden, um die Inputs/Outputs an eine kleine Gruppe von Funktionen), so dass alle offensichtlichen Angriffe (und Sie müssten den LLM dazu auffordern, sie auszugeben) kein Problem darstellen sollten. Wenn Sie auf Nummer sicher gehen wollen, können Sie die run()-Methode mit dem zusätzlichen Argument return_code=True ausführen. In diesem Fall gibt der Agent nur den auszuführenden Code zur Ausführung zurück und Sie können entscheiden, ob Sie ihn ausführen möchten oder nicht. Die Ausführung bricht bei jeder Zeile ab, in der versucht wird, eine illegale Operation auszuführen, oder wenn ein regulärer Python-Fehler mit dem vom Agenten generierten Code. ### Ein kuratierter Satz von Tools Wir haben eine Reihe von Tools identifiziert, die solche Agenten unterstützen können. Hier ist eine aktualisierte Liste der Tools, die wir integriert haben in `transformers` integriert haben: - **Beantwortung von Fragen zu Dokumenten**: Beantworten Sie anhand eines Dokuments (z.B. PDF) im Bildformat eine Frage zu diesem Dokument ([Donut](./model_doc/donut)) - Beantworten von Textfragen**: Geben Sie einen langen Text und eine Frage an, beantworten Sie die Frage im Text ([Flan-T5](./model_doc/flan-t5)) - **Unbedingte Bildunterschriften**: Beschriften Sie das Bild! ([BLIP](./model_doc/blip)) - **Bildfragebeantwortung**: Beantworten Sie bei einem Bild eine Frage zu diesem Bild ([VILT](./model_doc/vilt)) - **Bildsegmentierung**: Geben Sie ein Bild und einen Prompt an und geben Sie die Segmentierungsmaske dieses Prompts aus ([CLIPSeg](./model_doc/clipseg)) - **Sprache in Text**: Geben Sie eine Audioaufnahme einer sprechenden Person an und transkribieren Sie die Sprache in Text ([Whisper](./model_doc/whisper)) - **Text in Sprache**: wandelt Text in Sprache um ([SpeechT5](./model_doc/speecht5)) - **Zero-Shot-Textklassifizierung**: Ermitteln Sie anhand eines Textes und einer Liste von Bezeichnungen, welcher Bezeichnung der Text am ehesten entspricht ([BART](./model_doc/bart)) - **Textzusammenfassung**: fassen Sie einen langen Text in einem oder wenigen Sätzen zusammen ([BART](./model_doc/bart)) - **Übersetzung**: Übersetzen des Textes in eine bestimmte Sprache ([NLLB](./model_doc/nllb)) Diese Tools sind in Transformatoren integriert und können auch manuell verwendet werden, zum Beispiel: ```py from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### Benutzerdefinierte Tools Wir haben zwar eine Reihe von Tools identifiziert, sind aber der festen Überzeugung, dass der Hauptwert dieser Implementierung darin besteht die Möglichkeit, benutzerdefinierte Tools schnell zu erstellen und weiterzugeben. Indem Sie den Code eines Tools in einen Hugging Face Space oder ein Modell-Repository stellen, können Sie das Tool direkt mit dem Agenten nutzen. Wir haben ein paar neue Funktionen hinzugefügt **transformers-agnostic** Tools zur [`huggingface-tools` Organisation](https://huggingface.co/huggingface-tools) hinzugefügt: - **Text-Downloader**: zum Herunterladen eines Textes von einer Web-URL - **Text zu Bild**: erzeugt ein Bild nach einer Eingabeaufforderung und nutzt dabei stabile Diffusion - **Bildtransformation**: verändert ein Bild anhand eines Ausgangsbildes und einer Eingabeaufforderung, unter Ausnutzung der stabilen pix2pix-Diffusion - **Text zu Video**: Erzeugen eines kleinen Videos nach einer Eingabeaufforderung, unter Verwendung von damo-vilab Das Text-zu-Bild-Tool, das wir von Anfang an verwendet haben, ist ein Remote-Tool, das sich in [*huggingface-tools/text-to-image*](https://huggingface.co/spaces/huggingface-tools/text-to-image)! Wir werden weiterhin solche Tools für diese und andere Organisationen veröffentlichen, um diese Implementierung weiter zu verbessern. Die Agenten haben standardmäßig Zugriff auf die Tools, die sich auf [*huggingface-tools*](https://huggingface.co/huggingface-tools) befinden. Wie Sie Ihre eigenen Tools schreiben und freigeben können und wie Sie jedes benutzerdefinierte Tool, das sich auf dem Hub befindet, nutzen können, erklären wir in [folgender Anleitung](custom_tools). ### Code-Erzeugung Bisher haben wir gezeigt, wie Sie die Agenten nutzen können, um Aktionen für Sie durchzuführen. Der Agent generiert jedoch nur Code den wir dann mit einem sehr eingeschränkten Python-Interpreter ausführen. Falls Sie den generierten Code in einer anderen Umgebung verwenden möchten einer anderen Umgebung verwenden möchten, können Sie den Agenten auffordern, den Code zusammen mit einer Tooldefinition und genauen Importen zurückzugeben. Zum Beispiel die folgende Anweisung ```python agent.run("Draw me a picture of rivers and lakes", return_code=True) ``` gibt den folgenden Code zurück ```python from transformers import load_tool image_generator = load_tool("huggingface-tools/text-to-image") image = image_generator(prompt="rivers and lakes") ``` die Sie dann selbst ändern und ausführen können.
transformers/docs/source/de/transformers_agents.md/0
{ "file_path": "transformers/docs/source/de/transformers_agents.md", "repo_id": "transformers", "token_count": 6629 }
238
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Create a custom architecture An [`AutoClass`](model_doc/auto) automatically infers the model architecture and downloads pretrained configuration and weights. Generally, we recommend using an `AutoClass` to produce checkpoint-agnostic code. But users who want more control over specific model parameters can create a custom 🤗 Transformers model from just a few base classes. This could be particularly useful for anyone who is interested in studying, training or experimenting with a 🤗 Transformers model. In this guide, dive deeper into creating a custom model without an `AutoClass`. Learn how to: - Load and customize a model configuration. - Create a model architecture. - Create a slow and fast tokenizer for text. - Create an image processor for vision tasks. - Create a feature extractor for audio tasks. - Create a processor for multimodal tasks. ## Configuration A [configuration](main_classes/configuration) refers to a model's specific attributes. Each model configuration has different attributes; for instance, all NLP models have the `hidden_size`, `num_attention_heads`, `num_hidden_layers` and `vocab_size` attributes in common. These attributes specify the number of attention heads or hidden layers to construct a model with. Get a closer look at [DistilBERT](model_doc/distilbert) by accessing [`DistilBertConfig`] to inspect it's attributes: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`] displays all the default attributes used to build a base [`DistilBertModel`]. All attributes are customizable, creating space for experimentation. For example, you can customize a default model to: - Try a different activation function with the `activation` parameter. - Use a higher dropout ratio for the attention probabilities with the `attention_dropout` parameter. ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` Pretrained model attributes can be modified in the [`~PretrainedConfig.from_pretrained`] function: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` Once you are satisfied with your model configuration, you can save it with [`~PretrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory: ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` To reuse the configuration file, load it with [`~PretrainedConfig.from_pretrained`]: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") ``` <Tip> You can also save your configuration file as a dictionary or even just the difference between your custom configuration attributes and the default configuration attributes! See the [configuration](main_classes/configuration) documentation for more details. </Tip> ## Model The next step is to create a [model](main_classes/models). The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like `num_hidden_layers` from the configuration are used to define the architecture. Every model shares the base class [`PreTrainedModel`] and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) or [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. This means models are compatible with each of their respective framework's usage. <frameworkcontent> <pt> Load your custom configuration attributes into the model: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") >>> model = DistilBertModel(my_config) ``` This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training. Create a pretrained model with [`~PreTrainedModel.from_pretrained`]: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> Load your custom configuration attributes into the model: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training. Create a pretrained model with [`~TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### Model heads At this point, you have a base DistilBERT model which outputs the *hidden states*. The hidden states are passed as inputs to a model head to produce the final output. 🤗 Transformers provides a different model head for each task as long as a model supports the task (i.e., you can't use DistilBERT for a sequence-to-sequence task like translation). <frameworkcontent> <pt> For example, [`DistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs. ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`DistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output. ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> For example, [`TFDistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs. ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`TFDistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output. ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## Tokenizer The last base class you need before using a model for textual data is a [tokenizer](main_classes/tokenizer) to convert raw text to tensors. There are two types of tokenizers you can use with 🤗 Transformers: - [`PreTrainedTokenizer`]: a Python implementation of a tokenizer. - [`PreTrainedTokenizerFast`]: a tokenizer from our Rust-based [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) library. This tokenizer type is significantly faster - especially during batch tokenization - due to its Rust implementation. The fast tokenizer also offers additional methods like *offset mapping* which maps tokens to their original words or characters. Both tokenizers support common methods such as encoding and decoding, adding new tokens, and managing special tokens. <Tip warning={true}> Not every model supports a fast tokenizer. Take a look at this [table](index#supported-frameworks) to check if a model has fast tokenizer support. </Tip> If you trained your own tokenizer, you can create one from your *vocabulary* file: ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` It is important to remember the vocabulary from a custom tokenizer will be different from the vocabulary generated by a pretrained model's tokenizer. You need to use a pretrained model's vocabulary if you are using a pretrained model, otherwise the inputs won't make sense. Create a tokenizer with a pretrained model's vocabulary with the [`DistilBertTokenizer`] class: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Create a fast tokenizer with the [`DistilBertTokenizerFast`] class: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> By default, [`AutoTokenizer`] will try to load a fast tokenizer. You can disable this behavior by setting `use_fast=False` in `from_pretrained`. </Tip> ## Image processor An image processor processes vision inputs. It inherits from the base [`~image_processing_utils.ImageProcessingMixin`] class. To use, create an image processor associated with the model you're using. For example, create a default [`ViTImageProcessor`] if you are using [ViT](model_doc/vit) for image classification: ```py >>> from transformers import ViTImageProcessor >>> vit_extractor = ViTImageProcessor() >>> print(vit_extractor) ViTImageProcessor { "do_normalize": true, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default image processor parameters. </Tip> Modify any of the [`ViTImageProcessor`] parameters to create your custom image processor: ```py >>> from transformers import ViTImageProcessor >>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTImageProcessor { "do_normalize": false, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` ## Backbone <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Backbone.png"> </div> Computer vision models consist of a backbone, neck, and head. The backbone extracts features from an input image, the neck combines and enhances the extracted features, and the head is used for the main task (e.g., object detection). Start by initializing a backbone in the model config and specify whether you want to load pretrained weights or load randomly initialized weights. Then you can pass the model config to the model head. For example, to load a [ResNet](../model_doc/resnet) backbone into a [MaskFormer](../model_doc/maskformer) model with an instance segmentation head: <hfoptions id="backbone"> <hfoption id="pretrained weights"> Set `use_pretrained_backbone=True` to load pretrained ResNet weights for the backbone. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig config = MaskFormerConfig(backbone="microsoft/resnet50", use_pretrained_backbone=True) # backbone and neck config model = MaskFormerForInstanceSegmentation(config) # head ``` You could also load the backbone config separately and then pass it to the model config. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-50") config = MaskFormerConfig(backbone_config=backbone_config) model = MaskFormerForInstanceSegmentation(config) ``` </hfoption> <hfoption id="random weights"> Set `use_pretrained_backbone=False` to randomly initialize a ResNet backbone. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig config = MaskFormerConfig(backbone="microsoft/resnet50", use_pretrained_backbone=False) # backbone and neck config model = MaskFormerForInstanceSegmentation(config) # head ``` You could also load the backbone config separately and then pass it to the model config. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig backbone_config = ResNetConfig() config = MaskFormerConfig(backbone_config=backbone_config) model = MaskFormerForInstanceSegmentation(config) ``` </hfoption> </hfoptions> [timm](https://hf.co/docs/timm/index) models are loaded with [`TimmBackbone`] and [`TimmBackboneConfig`]. ```python from transformers import TimmBackboneConfig, TimmBackbone backbone_config = TimmBackboneConfig("resnet50") model = TimmBackbone(config=backbone_config) ``` ## Feature extractor A feature extractor processes audio inputs. It inherits from the base [`~feature_extraction_utils.FeatureExtractionMixin`] class, and may also inherit from the [`SequenceFeatureExtractor`] class for processing audio inputs. To use, create a feature extractor associated with the model you're using. For example, create a default [`Wav2Vec2FeatureExtractor`] if you are using [Wav2Vec2](model_doc/wav2vec2) for audio classification: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` <Tip> If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default feature extractor parameters. </Tip> Modify any of the [`Wav2Vec2FeatureExtractor`] parameters to create your custom feature extractor: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False) >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": false, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 8000 } ``` ## Processor For models that support multimodal tasks, 🤗 Transformers offers a processor class that conveniently wraps processing classes such as a feature extractor and a tokenizer into a single object. For example, let's use the [`Wav2Vec2Processor`] for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer. Create a feature extractor to handle the audio inputs: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` Create a tokenizer to handle the text inputs: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` Combine the feature extractor and tokenizer in [`Wav2Vec2Processor`]: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, image processor, feature extractor, or processor), you can create any of the models supported by 🤗 Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune.
transformers/docs/source/en/create_a_model.md/0
{ "file_path": "transformers/docs/source/en/create_a_model.md", "repo_id": "transformers", "token_count": 5528 }
239
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Auto Classes In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the `from_pretrained()` method. AutoClasses are here to do this job for you so that you automatically retrieve the relevant model given the name/path to the pretrained weights/config/vocabulary. Instantiating one of [`AutoConfig`], [`AutoModel`], and [`AutoTokenizer`] will directly create a class of the relevant architecture. For instance ```python model = AutoModel.from_pretrained("google-bert/bert-base-cased") ``` will create a model that is an instance of [`BertModel`]. There is one class of `AutoModel` for each task, and for each backend (PyTorch, TensorFlow, or Flax). ## Extending the Auto Classes Each of the auto classes has a method to be extended with your custom classes. For instance, if you have defined a custom class of model `NewModel`, make sure you have a `NewModelConfig` then you can add those to the auto classes like this: ```python from transformers import AutoConfig, AutoModel AutoConfig.register("new-model", NewModelConfig) AutoModel.register(NewModelConfig, NewModel) ``` You will then be able to use the auto classes like you would usually do! <Tip warning={true}> If your `NewModelConfig` is a subclass of [`~transformers.PretrainedConfig`], make sure its `model_type` attribute is set to the same key you use when registering the config (here `"new-model"`). Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its `config_class` attribute is set to the same class you use when registering the model (here `NewModelConfig`). </Tip> ## AutoConfig [[autodoc]] AutoConfig ## AutoTokenizer [[autodoc]] AutoTokenizer ## AutoFeatureExtractor [[autodoc]] AutoFeatureExtractor ## AutoImageProcessor [[autodoc]] AutoImageProcessor ## AutoProcessor [[autodoc]] AutoProcessor ## Generic model classes The following auto classes are available for instantiating a base model class without a specific head. ### AutoModel [[autodoc]] AutoModel ### TFAutoModel [[autodoc]] TFAutoModel ### FlaxAutoModel [[autodoc]] FlaxAutoModel ## Generic pretraining classes The following auto classes are available for instantiating a model with a pretraining head. ### AutoModelForPreTraining [[autodoc]] AutoModelForPreTraining ### TFAutoModelForPreTraining [[autodoc]] TFAutoModelForPreTraining ### FlaxAutoModelForPreTraining [[autodoc]] FlaxAutoModelForPreTraining ## Natural Language Processing The following auto classes are available for the following natural language processing tasks. ### AutoModelForCausalLM [[autodoc]] AutoModelForCausalLM ### TFAutoModelForCausalLM [[autodoc]] TFAutoModelForCausalLM ### FlaxAutoModelForCausalLM [[autodoc]] FlaxAutoModelForCausalLM ### AutoModelForMaskedLM [[autodoc]] AutoModelForMaskedLM ### TFAutoModelForMaskedLM [[autodoc]] TFAutoModelForMaskedLM ### FlaxAutoModelForMaskedLM [[autodoc]] FlaxAutoModelForMaskedLM ### AutoModelForMaskGeneration [[autodoc]] AutoModelForMaskGeneration ### TFAutoModelForMaskGeneration [[autodoc]] TFAutoModelForMaskGeneration ### AutoModelForSeq2SeqLM [[autodoc]] AutoModelForSeq2SeqLM ### TFAutoModelForSeq2SeqLM [[autodoc]] TFAutoModelForSeq2SeqLM ### FlaxAutoModelForSeq2SeqLM [[autodoc]] FlaxAutoModelForSeq2SeqLM ### AutoModelForSequenceClassification [[autodoc]] AutoModelForSequenceClassification ### TFAutoModelForSequenceClassification [[autodoc]] TFAutoModelForSequenceClassification ### FlaxAutoModelForSequenceClassification [[autodoc]] FlaxAutoModelForSequenceClassification ### AutoModelForMultipleChoice [[autodoc]] AutoModelForMultipleChoice ### TFAutoModelForMultipleChoice [[autodoc]] TFAutoModelForMultipleChoice ### FlaxAutoModelForMultipleChoice [[autodoc]] FlaxAutoModelForMultipleChoice ### AutoModelForNextSentencePrediction [[autodoc]] AutoModelForNextSentencePrediction ### TFAutoModelForNextSentencePrediction [[autodoc]] TFAutoModelForNextSentencePrediction ### FlaxAutoModelForNextSentencePrediction [[autodoc]] FlaxAutoModelForNextSentencePrediction ### AutoModelForTokenClassification [[autodoc]] AutoModelForTokenClassification ### TFAutoModelForTokenClassification [[autodoc]] TFAutoModelForTokenClassification ### FlaxAutoModelForTokenClassification [[autodoc]] FlaxAutoModelForTokenClassification ### AutoModelForQuestionAnswering [[autodoc]] AutoModelForQuestionAnswering ### TFAutoModelForQuestionAnswering [[autodoc]] TFAutoModelForQuestionAnswering ### FlaxAutoModelForQuestionAnswering [[autodoc]] FlaxAutoModelForQuestionAnswering ### AutoModelForTextEncoding [[autodoc]] AutoModelForTextEncoding ### TFAutoModelForTextEncoding [[autodoc]] TFAutoModelForTextEncoding ## Computer vision The following auto classes are available for the following computer vision tasks. ### AutoModelForDepthEstimation [[autodoc]] AutoModelForDepthEstimation ### AutoModelForImageClassification [[autodoc]] AutoModelForImageClassification ### TFAutoModelForImageClassification [[autodoc]] TFAutoModelForImageClassification ### FlaxAutoModelForImageClassification [[autodoc]] FlaxAutoModelForImageClassification ### AutoModelForVideoClassification [[autodoc]] AutoModelForVideoClassification ### AutoModelForKeypointDetection [[autodoc]] AutoModelForKeypointDetection ### AutoModelForMaskedImageModeling [[autodoc]] AutoModelForMaskedImageModeling ### TFAutoModelForMaskedImageModeling [[autodoc]] TFAutoModelForMaskedImageModeling ### AutoModelForObjectDetection [[autodoc]] AutoModelForObjectDetection ### AutoModelForImageSegmentation [[autodoc]] AutoModelForImageSegmentation ### AutoModelForImageToImage [[autodoc]] AutoModelForImageToImage ### AutoModelForSemanticSegmentation [[autodoc]] AutoModelForSemanticSegmentation ### TFAutoModelForSemanticSegmentation [[autodoc]] TFAutoModelForSemanticSegmentation ### AutoModelForInstanceSegmentation [[autodoc]] AutoModelForInstanceSegmentation ### AutoModelForUniversalSegmentation [[autodoc]] AutoModelForUniversalSegmentation ### AutoModelForZeroShotImageClassification [[autodoc]] AutoModelForZeroShotImageClassification ### TFAutoModelForZeroShotImageClassification [[autodoc]] TFAutoModelForZeroShotImageClassification ### AutoModelForZeroShotObjectDetection [[autodoc]] AutoModelForZeroShotObjectDetection ## Audio The following auto classes are available for the following audio tasks. ### AutoModelForAudioClassification [[autodoc]] AutoModelForAudioClassification ### AutoModelForAudioFrameClassification [[autodoc]] TFAutoModelForAudioClassification ### TFAutoModelForAudioFrameClassification [[autodoc]] AutoModelForAudioFrameClassification ### AutoModelForCTC [[autodoc]] AutoModelForCTC ### AutoModelForSpeechSeq2Seq [[autodoc]] AutoModelForSpeechSeq2Seq ### TFAutoModelForSpeechSeq2Seq [[autodoc]] TFAutoModelForSpeechSeq2Seq ### FlaxAutoModelForSpeechSeq2Seq [[autodoc]] FlaxAutoModelForSpeechSeq2Seq ### AutoModelForAudioXVector [[autodoc]] AutoModelForAudioXVector ### AutoModelForTextToSpectrogram [[autodoc]] AutoModelForTextToSpectrogram ### AutoModelForTextToWaveform [[autodoc]] AutoModelForTextToWaveform ## Multimodal The following auto classes are available for the following multimodal tasks. ### AutoModelForTableQuestionAnswering [[autodoc]] AutoModelForTableQuestionAnswering ### TFAutoModelForTableQuestionAnswering [[autodoc]] TFAutoModelForTableQuestionAnswering ### AutoModelForDocumentQuestionAnswering [[autodoc]] AutoModelForDocumentQuestionAnswering ### TFAutoModelForDocumentQuestionAnswering [[autodoc]] TFAutoModelForDocumentQuestionAnswering ### AutoModelForVisualQuestionAnswering [[autodoc]] AutoModelForVisualQuestionAnswering ### AutoModelForVision2Seq [[autodoc]] AutoModelForVision2Seq ### TFAutoModelForVision2Seq [[autodoc]] TFAutoModelForVision2Seq ### FlaxAutoModelForVision2Seq [[autodoc]] FlaxAutoModelForVision2Seq
transformers/docs/source/en/model_doc/auto.md/0
{ "file_path": "transformers/docs/source/en/model_doc/auto.md", "repo_id": "transformers", "token_count": 2622 }
240
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Blenderbot ## Overview The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020. The abstract of the paper is the following: *Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI) . ## Usage tips and example Blenderbot is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. An example: ```python >>> from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration >>> mname = "facebook/blenderbot-400M-distill" >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname) >>> tokenizer = BlenderbotTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") >>> reply_ids = model.generate(**inputs) >>> print(tokenizer.batch_decode(reply_ids)) ["<s> That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?</s>"] ``` ## Implementation Notes - Blenderbot uses a standard [seq2seq model transformer](https://arxiv.org/pdf/1706.03762.pdf) based architecture. - Available checkpoints can be found in the [model hub](https://huggingface.co/models?search=blenderbot). - This is the *default* Blenderbot model class. However, some smaller checkpoints, such as `facebook/blenderbot_small_90M`, have a different architecture and consequently should be used with [BlenderbotSmall](blenderbot-small). ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## BlenderbotConfig [[autodoc]] BlenderbotConfig ## BlenderbotTokenizer [[autodoc]] BlenderbotTokenizer - build_inputs_with_special_tokens ## BlenderbotTokenizerFast [[autodoc]] BlenderbotTokenizerFast - build_inputs_with_special_tokens <frameworkcontent> <pt> ## BlenderbotModel See [`~transformers.BartModel`] for arguments to *forward* and *generate* [[autodoc]] BlenderbotModel - forward ## BlenderbotForConditionalGeneration See [`~transformers.BartForConditionalGeneration`] for arguments to *forward* and *generate* [[autodoc]] BlenderbotForConditionalGeneration - forward ## BlenderbotForCausalLM [[autodoc]] BlenderbotForCausalLM - forward </pt> <tf> ## TFBlenderbotModel [[autodoc]] TFBlenderbotModel - call ## TFBlenderbotForConditionalGeneration [[autodoc]] TFBlenderbotForConditionalGeneration - call </tf> <jax> ## FlaxBlenderbotModel [[autodoc]] FlaxBlenderbotModel - __call__ - encode - decode ## FlaxBlenderbotForConditionalGeneration [[autodoc]] FlaxBlenderbotForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/blenderbot.md/0
{ "file_path": "transformers/docs/source/en/model_doc/blenderbot.md", "repo_id": "transformers", "token_count": 1405 }
241
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Encoder Decoder Models ## Overview The [`EncoderDecoderModel`] can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. After such an [`EncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). An application of this architecture could be to leverage two pretrained [`BertModel`] as the encoder and decoder for a summarization model as was shown in: [Text Summarization with Pretrained Encoders](https://arxiv.org/abs/1908.08345) by Yang Liu and Mirella Lapata. ## Randomly initializing `EncoderDecoderModel` from model configurations. [`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder. ```python >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel >>> config_encoder = BertConfig() >>> config_decoder = BertConfig() >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> model = EncoderDecoderModel(config=config) ``` ## Initialising `EncoderDecoderModel` from a pretrained encoder and a pretrained decoder. [`EncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained auto-encoding model, *e.g.* BERT, can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder. Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized. Initializing [`EncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder). To do so, the `EncoderDecoderModel` class provides a [`EncoderDecoderModel.from_encoder_decoder_pretrained`] method. ```python >>> from transformers import EncoderDecoderModel, BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased") ``` ## Loading an existing `EncoderDecoderModel` checkpoint and perform inference. To load fine-tuned checkpoints of the `EncoderDecoderModel` class, [`EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers. To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling. ```python >>> from transformers import AutoTokenizer, EncoderDecoderModel >>> # load a fine-tuned seq2seq model and corresponding tokenizer >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") >>> # let's perform inference on a long piece of text >>> ARTICLE_TO_SUMMARIZE = ( ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." ... ) >>> input_ids = tokenizer(ARTICLE_TO_SUMMARIZE, return_tensors="pt").input_ids >>> # autoregressively generate summary (uses greedy decoding by default) >>> generated_ids = model.generate(input_ids) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) nearly 800 thousand customers were affected by the shutoffs. the aim is to reduce the risk of wildfires. nearly 800, 000 customers were expected to be affected by high winds amid dry conditions. pg & e said it scheduled the blackouts to last through at least midday tomorrow. ``` ## Loading a PyTorch checkpoint into `TFEncoderDecoderModel`. [`TFEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a pytorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is: ```python >>> # a workaround to load from pytorch checkpoint >>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel >>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") >>> _model.encoder.save_pretrained("./encoder") >>> _model.decoder.save_pretrained("./decoder") >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( ... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ... ) >>> # This is only for copying some specific attributes of this particular model. >>> model.config = _model.config ``` ## Training Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model. As you can see, only 2 inputs are required for the model in order to compute a loss: `input_ids` (which are the `input_ids` of the encoded input sequence) and `labels` (which are the `input_ids` of the encoded target sequence). ```python >>> from transformers import BertTokenizer, EncoderDecoderModel >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased") >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> input_ids = tokenizer( ... "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side.During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft).Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", ... return_tensors="pt", ... ).input_ids >>> labels = tokenizer( ... "the eiffel tower surpassed the washington monument to become the tallest structure in the world. it was the first structure to reach a height of 300 metres in paris in 1930. it is now taller than the chrysler building by 5. 2 metres ( 17 ft ) and is the second tallest free - standing structure in paris.", ... return_tensors="pt", ... ).input_ids >>> # the forward function automatically creates the correct decoder_input_ids >>> loss = model(input_ids=input_ids, labels=labels).loss ``` Detailed [colab](https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing#scrollTo=ZwQIEhKOrJpl) for training. This model was contributed by [thomwolf](https://github.com/thomwolf). This model's TensorFlow and Flax versions were contributed by [ydshieh](https://github.com/ydshieh). ## EncoderDecoderConfig [[autodoc]] EncoderDecoderConfig <frameworkcontent> <pt> ## EncoderDecoderModel [[autodoc]] EncoderDecoderModel - forward - from_encoder_decoder_pretrained </pt> <tf> ## TFEncoderDecoderModel [[autodoc]] TFEncoderDecoderModel - call - from_encoder_decoder_pretrained </tf> <jax> ## FlaxEncoderDecoderModel [[autodoc]] FlaxEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/encoder-decoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/encoder-decoder.md", "repo_id": "transformers", "token_count": 2664 }
242
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. specific language governing permissions and limitations under the License. --> # ImageGPT ## Overview The ImageGPT model was proposed in [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation. The abstract from the paper is the following: *Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png" alt="drawing" width="600"/> <small> Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small> This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found [here](https://github.com/openai/image-gpt). ## Usage tips - ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT also doesn't have tied input- and output embeddings. - As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special "start of sentence" (SOS) token, used at the beginning of every sequence. One can use [`ImageGPTImageProcessor`] to prepare images for the model. - Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as "linear probing". Features can be easily obtained by first forwarding the image through the model, then specifying `output_hidden_states=True`, and then average-pool the hidden states at whatever layer you like. - Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use [`ImageGPTForImageClassification`]. - ImageGPT comes in different sizes: there's ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn't release. The differences in size are summarized in the following table: | **Model variant** | **Depths** | **Hidden sizes** | **Decoder hidden size** | **Params (M)** | **ImageNet-1k Top 1** | |---|---|---|---|---|---| | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ImageGPT. <PipelineTag pipeline="image-classification"/> - Demo notebooks for ImageGPT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT). - [`ImageGPTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ImageGPTConfig [[autodoc]] ImageGPTConfig ## ImageGPTFeatureExtractor [[autodoc]] ImageGPTFeatureExtractor - __call__ ## ImageGPTImageProcessor [[autodoc]] ImageGPTImageProcessor - preprocess ## ImageGPTModel [[autodoc]] ImageGPTModel - forward ## ImageGPTForCausalImageModeling [[autodoc]] ImageGPTForCausalImageModeling - forward ## ImageGPTForImageClassification [[autodoc]] ImageGPTForImageClassification - forward
transformers/docs/source/en/model_doc/imagegpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/imagegpt.md", "repo_id": "transformers", "token_count": 1915 }
243
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Longformer <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=longformer"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-longformer-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/longformer-base-4096-finetuned-squadv1"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Longformer model was presented in [Longformer: The Long-Document Transformer](https://arxiv.org/pdf/2004.05150.pdf) by Iz Beltagy, Matthew E. Peters, Arman Cohan. The abstract from the paper is the following: *Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.* This model was contributed by [beltagy](https://huggingface.co/beltagy). The Authors' code can be found [here](https://github.com/allenai/longformer). ## Usage tips - Since the Longformer is based on RoBERTa, it doesn't have `token_type_ids`. You don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `</s>`). - A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g., what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the local attention section for more information. ## Longformer Self Attention Longformer self attention employs self attention on both a "local" context and a "global" context. Most tokens only attend "locally" to each other meaning that each token attends to its \\(\frac{1}{2} w\\) previous tokens and \\(\frac{1}{2} w\\) succeeding tokens with \\(w\\) being the window length as defined in `config.attention_window`. Note that `config.attention_window` can be of type `List` to define a different \\(w\\) for each layer. A selected few tokens attend "globally" to all other tokens, as it is conventionally done for all tokens in `BertSelfAttention`. Note that "locally" and "globally" attending tokens are projected by different query, key and value matrices. Also note that every "locally" attending token not only attends to tokens within its window \\(w\\), but also to all "globally" attending tokens so that global attention is *symmetric*. The user can define which tokens attend "locally" and which tokens attend "globally" by setting the tensor `global_attention_mask` at run-time appropriately. All Longformer models employ the following logic for `global_attention_mask`: - 0: the token attends "locally", - 1: the token attends "globally". For more information please also refer to [`~LongformerModel.forward`] method. Using Longformer self attention, the memory and time complexity of the query-key matmul operation, which usually represents the memory and time bottleneck, can be reduced from \\(\mathcal{O}(n_s \times n_s)\\) to \\(\mathcal{O}(n_s \times w)\\), with \\(n_s\\) being the sequence length and \\(w\\) being the average window size. It is assumed that the number of "globally" attending tokens is insignificant as compared to the number of "locally" attending tokens. For more information, please refer to the official [paper](https://arxiv.org/pdf/2004.05150.pdf). ## Training [`LongformerForMaskedLM`] is trained the exact same way [`RobertaForMaskedLM`] is trained and should be used as follows: ```python input_ids = tokenizer.encode("This is a sentence from [MASK] training data", return_tensors="pt") mlm_labels = tokenizer.encode("This is a sentence from the training data", return_tensors="pt") loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0] ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## LongformerConfig [[autodoc]] LongformerConfig ## LongformerTokenizer [[autodoc]] LongformerTokenizer ## LongformerTokenizerFast [[autodoc]] LongformerTokenizerFast ## Longformer specific outputs [[autodoc]] models.longformer.modeling_longformer.LongformerBaseModelOutput [[autodoc]] models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling [[autodoc]] models.longformer.modeling_longformer.LongformerMaskedLMOutput [[autodoc]] models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput [[autodoc]] models.longformer.modeling_longformer.LongformerSequenceClassifierOutput [[autodoc]] models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput [[autodoc]] models.longformer.modeling_longformer.LongformerTokenClassifierOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput <frameworkcontent> <pt> ## LongformerModel [[autodoc]] LongformerModel - forward ## LongformerForMaskedLM [[autodoc]] LongformerForMaskedLM - forward ## LongformerForSequenceClassification [[autodoc]] LongformerForSequenceClassification - forward ## LongformerForMultipleChoice [[autodoc]] LongformerForMultipleChoice - forward ## LongformerForTokenClassification [[autodoc]] LongformerForTokenClassification - forward ## LongformerForQuestionAnswering [[autodoc]] LongformerForQuestionAnswering - forward </pt> <tf> ## TFLongformerModel [[autodoc]] TFLongformerModel - call ## TFLongformerForMaskedLM [[autodoc]] TFLongformerForMaskedLM - call ## TFLongformerForQuestionAnswering [[autodoc]] TFLongformerForQuestionAnswering - call ## TFLongformerForSequenceClassification [[autodoc]] TFLongformerForSequenceClassification - call ## TFLongformerForTokenClassification [[autodoc]] TFLongformerForTokenClassification - call ## TFLongformerForMultipleChoice [[autodoc]] TFLongformerForMultipleChoice - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/longformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/longformer.md", "repo_id": "transformers", "token_count": 2395 }
244
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. :warning: Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MusicGen Melody ## Overview The MusicGen Melody model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. MusicGen Melody is a single stage auto-regressive Transformer model capable of generating high-quality music samples conditioned on text descriptions or audio prompts. The text descriptions are passed through a frozen text encoder model to obtain a sequence of hidden-state representations. MusicGen is then trained to predict discrete audio tokens, or *audio codes*, conditioned on these hidden-states. These audio tokens are then decoded using an audio compression model, such as EnCodec, to recover the audio waveform. Through an efficient token interleaving pattern, MusicGen does not require a self-supervised semantic representation of the text/audio prompts, thus eliminating the need to cascade multiple models to predict a set of codebooks (e.g. hierarchically or upsampling). Instead, it is able to generate all the codebooks in a single forward pass. The abstract from the paper is the following: *We tackle the task of conditional music generation. We introduce MusicGen, a single Language Model (LM) that operates over several streams of compressed discrete music representation, i.e., tokens. Unlike prior work, MusicGen is comprised of a single-stage transformer LM together with efficient token interleaving patterns, which eliminates the need for cascading several models, e.g., hierarchically or upsampling. Following this approach, we demonstrate how MusicGen can generate high-quality samples, while being conditioned on textual description or melodic features, allowing better controls over the generated output. We conduct extensive empirical evaluation, considering both automatic and human studies, showing the proposed approach is superior to the evaluated baselines on a standard text-to-music benchmark. Through ablation studies, we shed light over the importance of each of the components comprising MusicGen.* This model was contributed by [ylacombe](https://huggingface.co/ylacombe). The original code can be found [here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the [Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen). ## Difference with [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) There are two key differences with MusicGen: 1. The audio prompt is used here as a conditional signal for the generated audio sample, whereas it's used for audio continuation in [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen). 2. Conditional text and audio signals are concatenated to the decoder's hidden states instead of being used as a cross-attention signal, as in MusicGen. ## Generation MusicGen Melody is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly better results than greedy, thus we encourage sampling mode to be used where possible. Sampling is enabled by default, and can be explicitly specified by setting `do_sample=True` in the call to [`MusicgenMelodyForConditionalGeneration.generate`], or by overriding the model's generation config (see below). Transformers supports both mono (1-channel) and stereo (2-channel) variants of MusicGen Melody. The mono channel versions generate a single set of codebooks. The stereo versions generate 2 sets of codebooks, 1 for each channel (left/right), and each set of codebooks is decoded independently through the audio compression model. The audio streams for each channel are combined to give the final stereo output. #### Audio Conditional Generation The model can generate an audio sample conditioned on a text and an audio prompt through use of the [`MusicgenMelodyProcessor`] to pre-process the inputs. In the following examples, we load an audio file using the 🤗 Datasets library, which can be pip installed through the command below: ``` pip install --upgrade pip pip install datasets[audio] ``` The audio file we are about to use is loaded as follows: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) >>> sample = next(iter(dataset))["audio"] ``` The audio prompt should ideally be free of the low-frequency signals usually produced by instruments such as drums and bass. The [Demucs](https://github.com/adefossez/demucs/tree/main) model can be used to separate vocals and other signals from the drums and bass components. If you wish to use Demucs, you first need to follow the installation steps [here](https://github.com/adefossez/demucs/tree/main?tab=readme-ov-file#for-musicians) before using the following snippet: ```python from demucs import pretrained from demucs.apply import apply_model from demucs.audio import convert_audio import torch wav = torch.tensor(sample["array"]).to(torch.float32) demucs = pretrained.get_model('htdemucs') wav = convert_audio(wav[None], sample["sampling_rate"], demucs.samplerate, demucs.audio_channels) wav = apply_model(demucs, wav[None]) ``` You can then use the following snippet to generate music: ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> inputs = processor( ... audio=wav, ... sampling_rate=demucs.samplerate, ... text=["80s blues track with groovy saxophone"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` You can also pass the audio signal directly without using Demucs, although the quality of the generation will probably be degraded: ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> inputs = processor( ... audio=sample["array"], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` The audio outputs are a three-dimensional Torch tensor of shape `(batch_size, num_channels, sequence_length)`. To listen to the generated audio samples, you can either play them in an ipynb notebook: ```python from IPython.display import Audio sampling_rate = model.config.audio_encoder.sampling_rate Audio(audio_values[0].numpy(), rate=sampling_rate) ``` Or save them as a `.wav` file using a third-party library, e.g. `soundfile`: ```python >>> import soundfile as sf >>> sampling_rate = model.config.audio_encoder.sampling_rate >>> sf.write("musicgen_out.wav", audio_values[0].T.numpy(), sampling_rate) ``` ### Text-only Conditional Generation The same [`MusicgenMelodyProcessor`] can be used to pre-process a text-only prompt. ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` The `guidance_scale` is used in classifier free guidance (CFG), setting the weighting between the conditional logits (which are predicted from the text prompts) and the unconditional logits (which are predicted from an unconditional or 'null' prompt). Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer audio quality. CFG is enabled by setting `guidance_scale > 1`. For best results, use `guidance_scale=3` (default). You can also generate in batch: ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> # take the first quarter of the audio sample >>> sample_1 = sample["array"][: len(sample["array"]) // 4] >>> # take the first half of the audio sample >>> sample_2 = sample["array"][: len(sample["array"]) // 2] >>> inputs = processor( ... audio=[sample_1, sample_2], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` ### Unconditional Generation The inputs for unconditional (or 'null') generation can be obtained through the method [`MusicgenMelodyProcessor.get_unconditional_inputs`]: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> unconditional_inputs = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody").get_unconditional_inputs(num_samples=1) >>> audio_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=256) ``` ### Generation Configuration The default parameters that control the generation process, such as sampling, guidance scale and number of generated tokens, can be found in the model's generation config, and updated as desired: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> # inspect the default generation config >>> model.generation_config >>> # increase the guidance scale to 4.0 >>> model.generation_config.guidance_scale = 4.0 >>> # decrease the max length to 256 tokens >>> model.generation_config.max_length = 256 ``` Note that any arguments passed to the generate method will **supersede** those in the generation config, so setting `do_sample=False` in the call to generate will supersede the setting of `model.generation_config.do_sample` in the generation config. ## Model Structure The MusicGen model can be de-composed into three distinct stages: 1. Text encoder: maps the text inputs to a sequence of hidden-state representations. The pre-trained MusicGen models use a frozen text encoder from either T5 or Flan-T5. 2. MusicGen Melody decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations 3. Audio decoder: used to recover the audio waveform from the audio tokens predicted by the decoder. Thus, the MusicGen model can either be used as a standalone decoder model, corresponding to the class [`MusicgenMelodyForCausalLM`], or as a composite model that includes the text encoder and audio encoder, corresponding to the class [`MusicgenMelodyForConditionalGeneration`]. If only the decoder needs to be loaded from the pre-trained checkpoint, it can be loaded by first specifying the correct config, or be accessed through the `.decoder` attribute of the composite model: ```python >>> from transformers import AutoConfig, MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration >>> # Option 1: get decoder config and pass to `.from_pretrained` >>> decoder_config = AutoConfig.from_pretrained("facebook/musicgen-melody").decoder >>> decoder = MusicgenMelodyForCausalLM.from_pretrained("facebook/musicgen-melody", **decoder_config.to_dict()) >>> # Option 2: load the entire composite model, but only return the decoder >>> decoder = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody").decoder ``` Since the text encoder and audio encoder models are frozen during training, the MusicGen decoder [`MusicgenMelodyForCausalLM`] can be trained standalone on a dataset of encoder hidden-states and audio codes. For inference, the trained decoder can be combined with the frozen text encoder and audio encoder to recover the composite [`MusicgenMelodyForConditionalGeneration`] model. ## Checkpoint Conversion - After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models), you can convert them using the **conversion script** available at `src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py` with the following command: ```bash python src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py \ --checkpoint="facebook/musicgen-melody" --pytorch_dump_folder /output/path ``` Tips: * MusicGen is trained on the 32kHz checkpoint of Encodec. You should ensure you use a compatible version of the Encodec model. * Sampling mode tends to deliver better results than greedy - you can toggle sampling with the variable `do_sample` in the call to [`MusicgenMelodyForConditionalGeneration.generate`] ## MusicgenMelodyDecoderConfig [[autodoc]] MusicgenMelodyDecoderConfig ## MusicgenMelodyProcessor [[autodoc]] MusicgenMelodyProcessor - get_unconditional_inputs ## MusicgenMelodyFeatureExtractor [[autodoc]] MusicgenMelodyFeatureExtractor - _extract_stem_indices ## MusicgenMelodyConfig [[autodoc]] MusicgenMelodyConfig ## MusicgenMelodyModel [[autodoc]] MusicgenMelodyModel - forward ## MusicgenMelodyForCausalLM [[autodoc]] MusicgenMelodyForCausalLM - forward ## MusicgenMelodyForConditionalGeneration [[autodoc]] MusicgenMelodyForConditionalGeneration - forward
transformers/docs/source/en/model_doc/musicgen_melody.md/0
{ "file_path": "transformers/docs/source/en/model_doc/musicgen_melody.md", "repo_id": "transformers", "token_count": 4090 }
245
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pegasus <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=pegasus"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-pegasus-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/pegasus_paraphrase"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Pegasus model was proposed in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019. According to the abstract, - Pegasus' pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary. - Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval. This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The Authors' code can be found [here](https://github.com/google-research/pegasus). ## Usage tips - Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG). * MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT) * GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder. - FP16 is not supported (help/ideas on this appreciated!). - The adafactor optimizer is recommended for pegasus fine-tuning. ## Checkpoints All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tuned for summarization, besides *pegasus-large*, whence the other checkpoints are fine-tuned: - Each checkpoint is 2.2 GB on disk and 568M parameters. - FP16 is not supported (help/ideas on this appreciated!). - Summarizing xsum in fp32 takes about 400ms/sample, with default parameters on a v100 GPU. - Full replication results and correctly pre-processed data can be found in this [Issue](https://github.com/huggingface/transformers/issues/6844#issue-689259666). - [Distilled checkpoints](https://huggingface.co/models?search=distill-pegasus) are described in this [paper](https://arxiv.org/abs/2010.13002). ## Implementation Notes - All models are transformer encoder-decoders with 16 layers in each component. - The implementation is completely inherited from [`BartForConditionalGeneration`] - Some key configuration differences: - static, sinusoidal position embeddings - the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix. - more beams are used (`num_beams=8`) - All pretrained pegasus checkpoints are the same besides three attributes: `tokenizer.model_max_length` (maximum input size), `max_length` (the maximum number of tokens to generate) and `length_penalty`. - The code to convert checkpoints trained in the author's [repo](https://github.com/google-research/pegasus) can be found in `convert_pegasus_tf_to_pytorch.py`. ## Usage Example ```python >>> from transformers import PegasusForConditionalGeneration, PegasusTokenizer >>> import torch >>> src_text = [ ... """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""" ... ] ... model_name = "google/pegasus-xsum" ... device = "cuda" if torch.cuda.is_available() else "cpu" ... tokenizer = PegasusTokenizer.from_pretrained(model_name) ... model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device) ... batch = tokenizer(src_text, truncation=True, padding="longest", return_tensors="pt").to(device) ... translated = model.generate(**batch) ... tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) ... assert ( ... tgt_text[0] ... == "California's largest electricity provider has turned off power to hundreds of thousands of customers." ... ) ``` ## Resources - [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## PegasusConfig [[autodoc]] PegasusConfig ## PegasusTokenizer warning: `add_tokens` does not work at the moment. [[autodoc]] PegasusTokenizer ## PegasusTokenizerFast [[autodoc]] PegasusTokenizerFast <frameworkcontent> <pt> ## PegasusModel [[autodoc]] PegasusModel - forward ## PegasusForConditionalGeneration [[autodoc]] PegasusForConditionalGeneration - forward ## PegasusForCausalLM [[autodoc]] PegasusForCausalLM - forward </pt> <tf> ## TFPegasusModel [[autodoc]] TFPegasusModel - call ## TFPegasusForConditionalGeneration [[autodoc]] TFPegasusForConditionalGeneration - call </tf> <jax> ## FlaxPegasusModel [[autodoc]] FlaxPegasusModel - __call__ - encode - decode ## FlaxPegasusForConditionalGeneration [[autodoc]] FlaxPegasusForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/pegasus.md/0
{ "file_path": "transformers/docs/source/en/model_doc/pegasus.md", "repo_id": "transformers", "token_count": 1966 }
246
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # REALM ## Overview The REALM model was proposed in [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. It's a retrieval-augmented language model that firstly retrieves documents from a textual knowledge corpus and then utilizes retrieved documents to process question answering tasks. The abstract from the paper is the following: *Language model pre-training has been shown to capture a surprising amount of world knowledge, crucial for NLP tasks such as question answering. However, this knowledge is stored implicitly in the parameters of a neural network, requiring ever-larger networks to cover more facts. To capture knowledge in a more modular and interpretable way, we augment language model pre-training with a latent knowledge retriever, which allows the model to retrieve and attend over documents from a large corpus such as Wikipedia, used during pre-training, fine-tuning and inference. For the first time, we show how to pre-train such a knowledge retriever in an unsupervised manner, using masked language modeling as the learning signal and backpropagating through a retrieval step that considers millions of documents. We demonstrate the effectiveness of Retrieval-Augmented Language Model pre-training (REALM) by fine-tuning on the challenging task of Open-domain Question Answering (Open-QA). We compare against state-of-the-art models for both explicit and implicit knowledge storage on three popular Open-QA benchmarks, and find that we outperform all previous methods by a significant margin (4-16% absolute accuracy), while also providing qualitative benefits such as interpretability and modularity.* This model was contributed by [qqaatw](https://huggingface.co/qqaatw). The original code can be found [here](https://github.com/google-research/language/tree/master/language/realm). ## RealmConfig [[autodoc]] RealmConfig ## RealmTokenizer [[autodoc]] RealmTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary - batch_encode_candidates ## RealmTokenizerFast [[autodoc]] RealmTokenizerFast - batch_encode_candidates ## RealmRetriever [[autodoc]] RealmRetriever ## RealmEmbedder [[autodoc]] RealmEmbedder - forward ## RealmScorer [[autodoc]] RealmScorer - forward ## RealmKnowledgeAugEncoder [[autodoc]] RealmKnowledgeAugEncoder - forward ## RealmReader [[autodoc]] RealmReader - forward ## RealmForOpenQA [[autodoc]] RealmForOpenQA - block_embedding_to - forward
transformers/docs/source/en/model_doc/realm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/realm.md", "repo_id": "transformers", "token_count": 926 }
247
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SEW-D ## Overview SEW-D (Squeezed and Efficient Wav2Vec with Disentangled attention) was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. The abstract from the paper is the following: *This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.* This model was contributed by [anton-l](https://huggingface.co/anton-l). ## Usage tips - SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## SEWDConfig [[autodoc]] SEWDConfig ## SEWDModel [[autodoc]] SEWDModel - forward ## SEWDForCTC [[autodoc]] SEWDForCTC - forward ## SEWDForSequenceClassification [[autodoc]] SEWDForSequenceClassification - forward
transformers/docs/source/en/model_doc/sew-d.md/0
{ "file_path": "transformers/docs/source/en/model_doc/sew-d.md", "repo_id": "transformers", "token_count": 732 }
248
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UniSpeech-SAT ## Overview The UniSpeech-SAT model was proposed in [UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu . The abstract from the paper is the following: *Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisedly and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). ## Usage tips - UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechSatConfig [[autodoc]] UniSpeechSatConfig ## UniSpeechSat specific outputs [[autodoc]] models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput ## UniSpeechSatModel [[autodoc]] UniSpeechSatModel - forward ## UniSpeechSatForCTC [[autodoc]] UniSpeechSatForCTC - forward ## UniSpeechSatForSequenceClassification [[autodoc]] UniSpeechSatForSequenceClassification - forward ## UniSpeechSatForAudioFrameClassification [[autodoc]] UniSpeechSatForAudioFrameClassification - forward ## UniSpeechSatForXVector [[autodoc]] UniSpeechSatForXVector - forward ## UniSpeechSatForPreTraining [[autodoc]] UniSpeechSatForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech-sat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech-sat.md", "repo_id": "transformers", "token_count": 1045 }
249
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLNet <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xlnet"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlnet-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xlnet-base-cased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The XLNet model was proposed in [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. The abstract from the paper is the following: *With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/zihangdai/xlnet/). ## Usage tips - The specific attention pattern can be controlled at training and test time using the `perm_mask` input. - Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `target_mapping` input. - To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and `target_mapping` inputs to control the attention span and outputs (see examples in *examples/pytorch/text-generation/run_generation.py*) - XLNet is one of the few models that has no sequence length limit. - XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,…,sequence length. - XLNet also uses the same recurrence mechanism as Transformer-XL to build long-term dependencies. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XLNetConfig [[autodoc]] XLNetConfig ## XLNetTokenizer [[autodoc]] XLNetTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLNetTokenizerFast [[autodoc]] XLNetTokenizerFast ## XLNet specific outputs [[autodoc]] models.xlnet.modeling_xlnet.XLNetModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput [[autodoc]] models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput <frameworkcontent> <pt> ## XLNetModel [[autodoc]] XLNetModel - forward ## XLNetLMHeadModel [[autodoc]] XLNetLMHeadModel - forward ## XLNetForSequenceClassification [[autodoc]] XLNetForSequenceClassification - forward ## XLNetForMultipleChoice [[autodoc]] XLNetForMultipleChoice - forward ## XLNetForTokenClassification [[autodoc]] XLNetForTokenClassification - forward ## XLNetForQuestionAnsweringSimple [[autodoc]] XLNetForQuestionAnsweringSimple - forward ## XLNetForQuestionAnswering [[autodoc]] XLNetForQuestionAnswering - forward </pt> <tf> ## TFXLNetModel [[autodoc]] TFXLNetModel - call ## TFXLNetLMHeadModel [[autodoc]] TFXLNetLMHeadModel - call ## TFXLNetForSequenceClassification [[autodoc]] TFXLNetForSequenceClassification - call ## TFLNetForMultipleChoice [[autodoc]] TFXLNetForMultipleChoice - call ## TFXLNetForTokenClassification [[autodoc]] TFXLNetForTokenClassification - call ## TFXLNetForQuestionAnsweringSimple [[autodoc]] TFXLNetForQuestionAnsweringSimple - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/xlnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlnet.md", "repo_id": "transformers", "token_count": 2042 }
250
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Train with a script Along with the 🤗 Transformers [notebooks](./noteboks/README), there are also example scripts demonstrating how to train a model for a task with [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), or [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). You will also find scripts we've used in our [research projects](https://github.com/huggingface/transformers/tree/main/examples/research_projects) and [legacy examples](https://github.com/huggingface/transformers/tree/main/examples/legacy) which are mostly community contributed. These scripts are not actively maintained and require a specific version of 🤗 Transformers that will most likely be incompatible with the latest version of the library. The example scripts are not expected to work out-of-the-box on every problem, and you may need to adapt the script to the problem you're trying to solve. To help you with this, most of the scripts fully expose how data is preprocessed, allowing you to edit it as necessary for your use case. For any feature you'd like to implement in an example script, please discuss it on the [forum](https://discuss.huggingface.co/) or in an [issue](https://github.com/huggingface/transformers/issues) before submitting a Pull Request. While we welcome bug fixes, it is unlikely we will merge a Pull Request that adds more functionality at the cost of readability. This guide will show you how to run an example summarization training script in [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) and [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). All examples are expected to work with both frameworks unless otherwise specified. ## Setup To successfully run the latest version of the example scripts, you have to **install 🤗 Transformers from source** in a new virtual environment: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` For older versions of the example scripts, click on the toggle below: <details> <summary>Examples for older versions of 🤗 Transformers</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> Then switch your current clone of 🤗 Transformers to a specific version, like v3.5.1 for example: ```bash git checkout tags/v3.5.1 ``` After you've setup the correct library version, navigate to the example folder of your choice and install the example specific requirements: ```bash pip install -r requirements.txt ``` ## Run a script <frameworkcontent> <pt> The example script downloads and preprocesses a dataset from the 🤗 [Datasets](https://huggingface.co/docs/datasets/) library. Then the script fine-tunes a dataset with the [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) on an architecture that supports summarization. The following example shows how to fine-tune [T5-small](https://huggingface.co/google-t5/t5-small) on the [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) dataset. The T5 model requires an additional `source_prefix` argument due to how it was trained. This prompt lets T5 know this is a summarization task. ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> The example script downloads and preprocesses a dataset from the 🤗 [Datasets](https://huggingface.co/docs/datasets/) library. Then the script fine-tunes a dataset using Keras on an architecture that supports summarization. The following example shows how to fine-tune [T5-small](https://huggingface.co/google-t5/t5-small) on the [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) dataset. The T5 model requires an additional `source_prefix` argument due to how it was trained. This prompt lets T5 know this is a summarization task. ```bash python examples/tensorflow/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Distributed training and mixed precision The [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) supports distributed training and mixed precision, which means you can also use it in a script. To enable both of these features: - Add the `fp16` argument to enable mixed precision. - Set the number of GPUs to use with the `nproc_per_node` argument. ```bash torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` TensorFlow scripts utilize a [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) for distributed training, and you don't need to add any additional arguments to the training script. The TensorFlow script will use multiple GPUs by default if they are available. ## Run a script on a TPU <frameworkcontent> <pt> Tensor Processing Units (TPUs) are specifically designed to accelerate performance. PyTorch supports TPUs with the [XLA](https://www.tensorflow.org/xla) deep learning compiler (see [here](https://github.com/pytorch/xla/blob/master/README.md) for more details). To use a TPU, launch the `xla_spawn.py` script and use the `num_cores` argument to set the number of TPU cores you want to use. ```bash python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> Tensor Processing Units (TPUs) are specifically designed to accelerate performance. TensorFlow scripts utilize a [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) for training on TPUs. To use a TPU, pass the name of the TPU resource to the `tpu` argument. ```bash python run_summarization.py \ --tpu name_of_tpu_resource \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Run a script with 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate) is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have 🤗 Accelerate installed if you don't already have it: > Note: As Accelerate is rapidly developing, the git version of accelerate must be installed to run the scripts ```bash pip install git+https://github.com/huggingface/accelerate ``` Instead of the `run_summarization.py` script, you need to use the `run_summarization_no_trainer.py` script. 🤗 Accelerate supported scripts will have a `task_no_trainer.py` file in the folder. Begin by running the following command to create and save a configuration file: ```bash accelerate config ``` Test your setup to make sure it is configured correctly: ```bash accelerate test ``` Now you are ready to launch the training: ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` ## Use a custom dataset The summarization script supports custom datasets as long as they are a CSV or JSON Line file. When you use your own dataset, you need to specify several additional arguments: - `train_file` and `validation_file` specify the path to your training and validation files. - `text_column` is the input text to summarize. - `summary_column` is the target text to output. A summarization script using a custom dataset would look like this: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` ## Test a script It is often a good idea to run your script on a smaller number of dataset examples to ensure everything works as expected before committing to an entire dataset which may take hours to complete. Use the following arguments to truncate the dataset to a maximum number of samples: - `max_train_samples` - `max_eval_samples` - `max_predict_samples` ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` Not all example scripts support the `max_predict_samples` argument. If you aren't sure whether your script supports this argument, add the `-h` argument to check: ```bash examples/pytorch/summarization/run_summarization.py -h ``` ## Resume training from checkpoint Another helpful option to enable is resuming training from a previous checkpoint. This will ensure you can pick up where you left off without starting over if your training gets interrupted. There are two methods to resume training from a checkpoint. The first method uses the `output_dir previous_output_dir` argument to resume training from the latest checkpoint stored in `output_dir`. In this case, you should remove `overwrite_output_dir`: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate ``` The second method uses the `resume_from_checkpoint path_to_specific_checkpoint` argument to resume training from a specific checkpoint folder. ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` ## Share your model All scripts can upload your final model to the [Model Hub](https://huggingface.co/models). Make sure you are logged into Hugging Face before you begin: ```bash huggingface-cli login ``` Then add the `push_to_hub` argument to the script. This argument will create a repository with your Hugging Face username and the folder name specified in `output_dir`. To give your repository a specific name, use the `push_to_hub_model_id` argument to add it. The repository will be automatically listed under your namespace. The following example shows how to upload a model with a specific repository name: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ```
transformers/docs/source/en/run_scripts.md/0
{ "file_path": "transformers/docs/source/en/run_scripts.md", "repo_id": "transformers", "token_count": 5916 }
251
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Monocular depth estimation Monocular depth estimation is a computer vision task that involves predicting the depth information of a scene from a single image. In other words, it is the process of estimating the distance of objects in a scene from a single camera viewpoint. Monocular depth estimation has various applications, including 3D reconstruction, augmented reality, autonomous driving, and robotics. It is a challenging task as it requires the model to understand the complex relationships between objects in the scene and the corresponding depth information, which can be affected by factors such as lighting conditions, occlusion, and texture. <Tip> The task illustrated in this tutorial is supported by the following model architectures: <!--This tip is automatically generated by `make fix-copies`, do not fill manually!--> [Depth Anything](../model_doc/depth_anything), [DPT](../model_doc/dpt), [GLPN](../model_doc/glpn) <!--End of the generated tip--> </Tip> In this guide you'll learn how to: * create a depth estimation pipeline * run depth estimation inference by hand Before you begin, make sure you have all the necessary libraries installed: ```bash pip install -q transformers ``` ## Depth estimation pipeline The simplest way to try out inference with a model supporting depth estimation is to use the corresponding [`pipeline`]. Instantiate a pipeline from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=depth-estimation&sort=downloads): ```py >>> from transformers import pipeline >>> checkpoint = "vinvino02/glpn-nyu" >>> depth_estimator = pipeline("depth-estimation", model=checkpoint) ``` Next, choose an image to analyze: ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/HwBAsSbPBDU/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MzR8fGNhciUyMGluJTIwdGhlJTIwc3RyZWV0fGVufDB8MHx8fDE2Nzg5MDEwODg&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-estimation-example.jpg" alt="Photo of a busy street"/> </div> Pass the image to the pipeline. ```py >>> predictions = depth_estimator(image) ``` The pipeline returns a dictionary with two entries. The first one, called `predicted_depth`, is a tensor with the values being the depth expressed in meters for each pixel. The second one, `depth`, is a PIL image that visualizes the depth estimation result. Let's take a look at the visualized result: ```py >>> predictions["depth"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization.png" alt="Depth estimation visualization"/> </div> ## Depth estimation inference by hand Now that you've seen how to use the depth estimation pipeline, let's see how we can replicate the same result by hand. Start by loading the model and associated processor from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=depth-estimation&sort=downloads). Here we'll use the same checkpoint as before: ```py >>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation >>> checkpoint = "vinvino02/glpn-nyu" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) >>> model = AutoModelForDepthEstimation.from_pretrained(checkpoint) ``` Prepare the image input for the model using the `image_processor` that will take care of the necessary image transformations such as resizing and normalization: ```py >>> pixel_values = image_processor(image, return_tensors="pt").pixel_values ``` Pass the prepared inputs through the model: ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(pixel_values) ... predicted_depth = outputs.predicted_depth ``` Visualize the results: ```py >>> import numpy as np >>> # interpolate to original size >>> prediction = torch.nn.functional.interpolate( ... predicted_depth.unsqueeze(1), ... size=image.size[::-1], ... mode="bicubic", ... align_corners=False, ... ).squeeze() >>> output = prediction.numpy() >>> formatted = (output * 255 / np.max(output)).astype("uint8") >>> depth = Image.fromarray(formatted) >>> depth ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization.png" alt="Depth estimation visualization"/> </div>
transformers/docs/source/en/tasks/monocular_depth_estimation.md/0
{ "file_path": "transformers/docs/source/en/tasks/monocular_depth_estimation.md", "repo_id": "transformers", "token_count": 1553 }
252
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testing Let's take a look at how 🤗 Transformers models are tested and how you can write new tests and improve the existing ones. There are 2 test suites in the repository: 1. `tests` -- tests for the general API 2. `examples` -- tests primarily for various applications that aren't part of the API ## How transformers are tested 1. Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this [config file](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml), so that if needed you can reproduce the same environment on your machine. These CI jobs don't run `@slow` tests. 2. There are 3 jobs run by [github actions](https://github.com/huggingface/transformers/actions): - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): checks whether torch hub integration works. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): runs fast tests on GPU only on commits on `main`. It only runs if a commit on `main` has updated the code in one of the following folders: `src`, `tests`, `.github` (to prevent running on added model cards, notebooks, etc.) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): runs normal and slow tests on GPU in `tests` and `examples`: ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` The results can be observed [here](https://github.com/huggingface/transformers/actions). ## Running tests ### Choosing which tests to run This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them [here](https://docs.pytest.org/en/latest/usage.html). Here are some most useful ways of running tests. Run all: ```console pytest ``` or: ```bash make test ``` Note that the latter is defined as: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` which tells pytest to: - run as many test processes as they are CPU cores (which could be too many if you don't have a ton of RAM!) - ensure that all tests from the same file will be run by the same test process - do not capture output - run in verbose mode ### Getting the list of all tests All tests of the test suite: ```bash pytest --collect-only -q ``` All tests of a given test file: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Run a specific test module To run an individual test module: ```bash pytest tests/utils/test_logging.py ``` ### Run specific tests Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` Here: - `tests/test_optimization.py` - the file with tests - `OptimizationTest` - the name of the class - `test_adam_w` - the name of the specific test function If the file contains multiple classes, you can choose to run only tests of a given class. For example: ```bash pytest tests/test_optimization.py::OptimizationTest ``` will run all the tests inside that class. As mentioned earlier you can see what tests are contained inside the `OptimizationTest` class by running: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` You can run tests by keyword expressions. To run only tests whose name contains `adam`: ```bash pytest -k adam tests/test_optimization.py ``` Logical `and` and `or` can be used to indicate whether all keywords should match or either. `not` can be used to negate. To run all tests except those whose name contains `adam`: ```bash pytest -k "not adam" tests/test_optimization.py ``` And you can combine the two patterns in one: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` For example to run both `test_adafactor` and `test_adam_w` you can use: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` Note that we use `or` here, since we want either of the keywords to match to include both. If you want to include only tests that include both patterns, `and` is to be used: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Run `accelerate` tests Sometimes you need to run `accelerate` tests on your models. For that you can just add `-m accelerate_tests` to your command, if let's say you want to run these tests on `OPT` run: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Run documentation tests In order to test whether the documentation examples are correct, you should check that the `doctests` are passing. As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035): ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` Just run the following line to automatically test every docstring example in the desired file: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` If the file has a markdown extention, you should add the `--doctest-glob="*.md"` argument. ### Run only modified tests You can run the tests related to the unstaged files or the current branch (according to Git) by using [pytest-picked](https://github.com/anapaulagomes/pytest-picked). This is a great way of quickly testing your changes didn't break anything, since it won't run the tests related to files you didn't touch. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` All tests will be run from files and folders which are modified, but not yet committed. ### Automatically rerun failed tests on source modification [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don't need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed. ```bash pip install pytest-xdist ``` To enter the mode: `pytest -f` or `pytest --looponfail` File changes are detected by looking at `looponfailroots` root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in `setup.cfg`: ```ini [tool:pytest] looponfailroots = transformers tests ``` or `pytest.ini`/``tox.ini`` files: ```ini [pytest] looponfailroots = transformers tests ``` This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file’s directory. [pytest-watch](https://github.com/joeyespo/pytest-watch) is an alternative implementation of this functionality. ### Skip a test module If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except `test_modeling_*.py` tests: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Clearing state CI builds and when isolation is important (against speed), cache should be cleared: ```bash pytest --cache-clear tests ``` ### Running tests in parallel As mentioned earlier `make test` runs tests in parallel via `pytest-xdist` plugin (`-n X` argument, e.g. `-n 2` to run 2 parallel jobs). `pytest-xdist`'s `--dist=` option allows one to control how the tests are grouped. `--dist=loadfile` puts the tests located in one file onto the same process. Since the order of executed tests is different and unpredictable, if running the test suite with `pytest-xdist` produces failures (meaning we have some undetected coupled tests), use [pytest-replay](https://github.com/ESSS/pytest-replay) to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum. ### Test order and repetition It's good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL. #### Repeat tests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` And then run every test multiple times (50 by default): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> This plugin doesn't work with `-n` flag from `pytest-xdist`. </Tip> <Tip> There is another plugin `pytest-repeat`, but it doesn't work with `unittest`. </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` Important: the presence of `pytest-random-order` will automatically randomize tests, no configuration change or command line options is required. As explained earlier this allows detection of coupled tests - where one test's state affects the state of another. When `pytest-random-order` is installed it will print the random seed it used for that session, e.g: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using `--random-order-bucket=none`, e.g.: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` To disable the shuffling for all tests: ```bash pytest --random-order-bucket=none ``` By default `--random-order-bucket=module` is implied, which will shuffle the files on the module levels. It can also shuffle on `class`, `package`, `global` and `none` levels. For the complete details please see its [documentation](https://github.com/jbasko/pytest-random-order). Another randomization alternative is: [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly). This module has a very similar functionality/interface, but it doesn't have the bucket modes available in `pytest-random-order`. It has the same problem of imposing itself once installed. ### Look and feel variations #### pytest-sugar [pytest-sugar](https://github.com/Frozenball/pytest-sugar) is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation. ```bash pip install pytest-sugar ``` To run tests without it, run: ```bash pytest -p no:sugar ``` or uninstall it. #### Report each sub-test name and its progress For a single or a group of tests via `pytest` (after `pip install pytest-pspec`): ```bash pytest --pspec tests/test_optimization.py ``` #### Instantly shows failed tests [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) shows failures and errors instantly instead of waiting until the end of test session. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### To GPU or not to GPU On a GPU-enabled setup, to test in CPU-only mode add `CUDA_VISIBLE_DEVICES=""`: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` or if you have multiple gpus, you can specify which one is to be used by `pytest`. For example, to use only the second gpu if you have gpus `0` and `1`, you can run: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` This is handy when you want to run different tasks on different GPUs. Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise: - `require_torch` - this test will run only under torch - `require_torch_gpu` - as `require_torch` plus requires at least 1 GPU - `require_torch_multi_gpu` - as `require_torch` plus requires at least 2 GPUs - `require_torch_non_multi_gpu` - as `require_torch` plus requires 0 or 1 GPUs - `require_torch_up_to_2_gpus` - as `require_torch` plus requires 0 or 1 or 2 GPUs - `require_torch_xla` - as `require_torch` plus requires at least 1 TPU Let's depict the GPU requirements in the following table: | n gpus | decorator | |--------+--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` If a test requires `tensorflow` use the `require_tf` decorator. For example: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` Some decorators like `@parametrized` rewrite test names, therefore `@require_*` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` This order problem doesn't exist with `@pytest.mark.parametrize`, you can put it first or last and it will still work. But it only works with non-unittests. Inside tests: - How many GPUs are available: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testing with a specific PyTorch backend or device To run the test suite on a specific torch device add `TRANSFORMERS_TEST_DEVICE="$device"` where `$device` is the target backend. For example, to test on CPU only: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` This variable is useful for testing custom or less common PyTorch backends such as `mps`. It can also be used to achieve the same effect as `CUDA_VISIBLE_DEVICES` by targeting specific GPUs or testing in CPU-only mode. Certain devices will require an additional import after importing `torch` for the first time. This can be specified using the environment variable `TRANSFORMERS_TEST_BACKEND`: ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` Alternative backends may also require the replacement of device-specific functions. For example `torch.cuda.manual_seed` may need to be replaced with a device-specific seed setter like `torch.npu.manual_seed` to correctly set a random seed on the device. To specify a new backend with backend-specific device functions when running the test suite, create a Python device specification file in the format: ``` import torch import torch_npu # !! Further additional imports can be added here !! # Specify the device name (eg. 'cuda', 'cpu', 'npu') DEVICE_NAME = 'npu' # Specify device-specific backends to dispatch to. # If not specified, will fallback to 'default' in 'testing_utils.py` MANUAL_SEED_FN = torch.npu.manual_seed EMPTY_CACHE_FN = torch.npu.empty_cache DEVICE_COUNT_FN = torch.npu.device_count ``` This format also allows for specification of any additional imports required. To use this file to replace equivalent methods in the test suite, set the environment variable `TRANSFORMERS_TEST_DEVICE_SPEC` to the path of the spec file. Currently, only `MANUAL_SEED_FN`, `EMPTY_CACHE_FN` and `DEVICE_COUNT_FN` are supported for device-specific dispatch. ### Distributed training `pytest` can't deal with distributed training directly. If this is attempted - the sub-processes don't do the right thing and end up thinking they are `pytest` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes. Here are some tests that use it: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) To jump right into the execution point, search for the `execute_subprocess_async` call in those tests. You will need at least 2 GPUs to see these tests in action: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Output capture During test execution any output sent to `stdout` and `stderr` is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback. To disable output capturing and to get the `stdout` and `stderr` normally, use `-s` or `--capture=no`: ```bash pytest -s tests/utils/test_logging.py ``` To send test results to JUnit format output: ```bash py.test tests --junitxml=result.xml ``` ### Color control To have no color (e.g., yellow on white background is not readable): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Sending test report to online pastebin service Creating a URL for each test failure: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure. Creating a URL for a whole test session log: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Writing tests 🤗 transformers tests are based on `unittest`, but run by `pytest`, so most of the time features from both systems can be used. You can read [here](https://docs.pytest.org/en/stable/unittest.html) which features are supported, but the important thing to remember is that most `pytest` fixtures don't work. Neither parametrization, but we use the module `parameterized` that works in a similar way. ### Parametrization Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` Now, by default this test will be run 3 times, each time with the last 3 arguments of `test_floor` being assigned the corresponding arguments in the parameter list. and you could run just the `negative` and `integer` sets of params with: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` or all but `negative` sub-tests, with: ```bash pytest -k "not negative" tests/test_mytest.py ``` Besides using the `-k` filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names. ```bash pytest test_this1.py --collect-only -q ``` and it will list: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` So now you can run just 2 specific sub-tests: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` The module [parameterized](https://pypi.org/project/parameterized/) which is already in the developer dependencies of `transformers` works for both: `unittests` and `pytest` tests. If, however, the test is not a `unittest`, you may use `pytest.mark.parametrize` (or you may see it being used in some existing tests, mostly under `examples`). Here is the same example, this time using `pytest`'s `parametrize` marker: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` Same as with `parameterized`, with `pytest.mark.parametrize` you can have a fine control over which sub-tests are run, if the `-k` filter doesn't do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like: ```bash pytest test_this2.py --collect-only -q ``` and it will list: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` So now you can run just the specific test: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` as in the previous example. ### Files and directories In tests often we need to know where things are relative to the current test file, and it's not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class `transformers.test_utils.TestCasePlus` solves this problem by sorting out all the basic paths and provides easy accessors to them: - `pathlib` objects (all fully resolved): - `test_file_path` - the current test file path, i.e. `__file__` - `test_file_dir` - the directory containing the current test file - `tests_dir` - the directory of the `tests` test suite - `examples_dir` - the directory of the `examples` test suite - `repo_root_dir` - the directory of the repository - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides) - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` To start using those all you need is to make sure that the test resides in a subclass of `transformers.test_utils.TestCasePlus`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` If you don't need to manipulate paths via `pathlib` or you just need a path as a string, you can always invoked `str()` on the `pathlib` object or use the accessors ending with `_str`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporary files and directories Using unique temporary files and directories are essential for parallel test running, so that the tests won't overwrite each other's data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like `tempfile`, which address these needs is essential. However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it's exact path and not having it randomized on every test re-run. A helper class `transformers.test_utils.TestCasePlus` is best used for such purposes. It's a sub-class of `unittest.TestCase`, so we can easily inherit from it in the test modules. Here is an example of its usage: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` This code creates a unique temporary directory, and sets `tmp_dir` to its location. - Create a unique temporary dir: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the test. - Create a temporary dir of my choice, ensure it's empty before the test starts and don't empty it after the test. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn't leave any data in there. - You can override the default behavior by directly overriding the `before` and `after` args, leading to one of the following behaviors: - `before=True`: the temporary dir will always be cleared at the beginning of the test. - `before=False`: if the temporary dir already existed, any existing files will remain there. - `after=True`: the temporary dir will always be deleted at the end of the test. - `after=False`: the temporary dir will always be left intact at the end of the test. <Tip> In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with `./`. </Tip> <Tip> Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise. </Tip> ### Temporary sys.path override If you need to temporary override `sys.path` to import from another test for example, you can use the `ExtendSysPath` context manager. Example: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Skipping tests This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it's skipped during `make test`. Methods: - A **skip** means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database). - A **xfail** means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it’s an xpass and will be reported in the test summary. One of the important differences between the two is that `skip` doesn't run the test, and `xfail` does. So if the code that's buggy causes some bad state that will affect other tests, do not use `xfail`. #### Implementation - Here is how to skip whole test unconditionally: ```python no-style @unittest.skip("this bug needs to be fixed") def test_feature_x(): ``` or via pytest: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` or the `xfail` way: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` Here's how to skip a test based on internal checks within the test: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` or the whole module: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` or the `xfail` way: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - Here is how to skip all tests in a module if some import is missing: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - Skip a test based on a condition: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` or: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` or skip the whole module: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` More details, example and ways are [here](https://docs.pytest.org/en/latest/skipping.html). ### Slow tests The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can't afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` Once a test is marked as `@slow`, to run such tests set `RUN_SLOW=1` env var, e.g.: ```bash RUN_SLOW=1 pytest tests ``` Some decorators like `@parameterized` rewrite test names, therefore `@slow` and the rest of the skip decorators `@require_*` have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it's possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it's important to run the slow tests on your machine before submitting the PR. Here is a rough decision making mechanism for choosing which tests should be marked as slow: If the test is focused on one of the library's internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it's focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions: - All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you're adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs. - All tests that need to do a training not specifically optimized to be fast should be set to slow. - We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to `@slow`. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as `@slow`. - If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless. Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the `@slow` tests can use large slow models to do qualitative testing. To see the use of these simply look for *tiny* models with: ```bash grep tiny tests examples ``` Here is a an example of a [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) that created the tiny model [stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). You can easily adjust it to your specific model's architecture. It's easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of `pytest --durations=0 tests`). That report is also useful to find slow outliers that aren't marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests. ### Testing the stdout/stderr output In order to test functions that write to `stdout` and/or `stderr`, the test can access those streams using the `pytest`'s [capsys system](https://docs.pytest.org/en/latest/capture.html). Here is how this is accomplished: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` And, of course, most of the time, `stderr` will come as a part of an exception, so try/except has to be used in such a case: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` Another approach to capturing stdout is via `contextlib.redirect_stdout`: ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` An important potential issue with capturing stdout is that it may contain `\r` characters that in normal `print` reset everything that has been printed so far. There is no problem with `pytest`, but with `pytest -s` these characters get included in the buffer, so to be able to have the test run with and without `-s`, you have to make an extra cleanup to the captured output, using `re.sub(r'~.*\r', '', buf, 0, re.M)`. But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some `\r`'s in it or not, so it's a simple: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` Here is a full test example: ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` If you'd like to capture `stderr` use the `CaptureStderr` class instead: ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` If you need to capture both streams at once, use the parent `CaptureStd` class: ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context. ### Capturing logger stream If you need to validate the output of a logger, you can use `CaptureLogger`: ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testing with environment variables If you want to test the impact of environment variables for a specific test you can use a helper decorator `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` At times an external program needs to be called, which requires setting `PYTHONPATH` in `os.environ` to include multiple local paths. A helper class `transformers.test_utils.TestCasePlus` comes to help: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` Depending on whether the test file was under the `tests` test suite or `examples` it'll correctly set up `env[PYTHONPATH]` to include one of these two directories, and also the `src` directory to ensure the testing is done against the current repo, and finally with whatever `env[PYTHONPATH]` was already set to before the test was called if anything. This helper method creates a copy of the `os.environ` object, so the original remains intact. ### Getting reproducible results In some situations you may want to remove randomness for your tests. To get identical reproducible results set, you will need to fix the seed: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed) ``` ### Debugging tests To start a debugger at the point of the warning, do this: ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Working with github actions workflows To trigger a self-push workflow CI job, you must: 1. Create a new branch on `transformers` origin (not a fork!). 2. The branch name has to start with either `ci_` or `ci-` (`main` triggers it too, but we can't do PRs on `main`). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written [here](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) under *push:* 3. Create a PR from this branch. 4. Then you can see the job appear [here](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). It may not run right away if there is a backlog. ## Testing Experimental CI Features Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following. 1. Create a new dedicated job that tests what needs to be tested 2. The new job must always succeed so that it gives us a green ✓ (details below). 3. Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job's logs (not the overall job green as it's purposefully always green) 4. When it's clear that everything is solid, then merge the new changes into existing jobs. That way experiments on CI functionality itself won't interfere with the normal workflow. Now how can we make the job always succeed while the new CI feature is being developed? Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don't support that. So the following workaround can be used: 1. `set +euo pipefail` at the beginning of the run command to suppress most potential failures in the bash script. 2. the last command must be a success: `echo "done"` or just `true` will do Here is an example: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` For simple commands you could also do: ```bash cmd_that_may_fail || true ``` Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing `set +euo pipefail` or any other things you may have added to ensure that the experimental job doesn't interfere with the normal CI functioning. This whole process would have been much easier if we only could set something like `allow-failure` for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don't support it at the moment. You can vote for this feature and see where it is at these CI-specific threads: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344) ## DeepSpeed integration For a PR that involves the DeepSpeed integration, keep in mind our CircleCI PR CI setup doesn't have GPUs. Tests requiring GPUs are run on a different CI nightly. This means if you get a passing CI report in your PR, it doesn’t mean the DeepSpeed tests pass. To run DeepSpeed tests: ```bash RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py ``` Any changes to the modeling or PyTorch examples code requires running the model zoo tests as well. ```bash RUN_SLOW=1 pytest tests/deepspeed ```
transformers/docs/source/en/testing.md/0
{ "file_path": "transformers/docs/source/en/testing.md", "repo_id": "transformers", "token_count": 13408 }
253
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Plantillas para Modelos de Chat ## Introducción Un caso de uso cada vez más común para LLMs es **el chat**. En un contexto de chat, en lugar de continuar una única cadena de texto (como es el caso con un modelo de lenguaje estándar), el modelo continúa una conversación que consta de uno o más **mensajes**, cada uno de los cuales incluye un **rol**, como "usuario" o "asistente", así como el texto del mensaje. Al igual que con la tokenización, diferentes modelos esperan formatos de entrada muy diferentes para el chat. Esta es la razón por la que agregamos las plantillas de chat como una característica. Las plantillas de chat son parte del tokenizador. Especifican cómo convertir conversaciones, representadas como listas de mensajes, en una única cadena tokenizable en el formato que el modelo espera. Vamos a hacer esto con un ejemplo concreto utilizando el modelo `BlenderBot`. BlenderBot tiene una plantilla predeterminada extremadamente simple, que principalmente solo agrega espacios en blanco entre rondas de diálogo: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) " Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>" ``` Observa cómo todo el chat se condensa en una sola cadena. Si usamos `tokenize=True`, que es la configuración predeterminada, esa cadena también será tokenizada para nosotros. Sin embargo, para ver una plantilla más compleja en acción, usemos el modelo `mistralai/Mistral-7B-Instruct-v0.1` ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]" ``` Ten en cuenta que esta vez, el tokenizador ha añadido los tokens de control [INST] y [/INST] para indicar el inicio y el final de los mensajes de usuario (¡pero no de los mensajes del asistente!). Mistral-instruct fue entrenado con estos tokens, pero BlenderBot no lo fue. ## ¿Cómo uso las plantillas de chat? Como puedes ver en el ejemplo anterior, las plantillas de chat son fáciles de usar. Simplemente construye una lista de mensajes, con claves de `rol` y `contenido`, y luego pásala al método [`~PreTrainedTokenizer.apply_chat_template`]. Una vez que hagas eso, ¡obtendrás una salida lista para usar! Al utilizar plantillas de chat como entrada para la generación de modelos, también es una buena idea usar `add_generation_prompt=True` para agregar una [indicación de generación](#¿Qué-son-los-"generation-prompts"?). Aquí tienes un ejemplo de cómo preparar la entrada para `model.generate()` utilizando el modelo de asistente `Zephyr`: ```python from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceH4/zephyr-7b-beta" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` Esto generará una cadena en el formato de entrada que Zephyr espera. ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` Ahora que nuestra entrada está formateada correctamente para Zephyr, podemos usar el modelo para generar una respuesta a la pregunta del usuario: ```python outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` Esto producirá: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` ¡Arr, al final resultó ser fácil! ## ¿Existe un pipeline automatizado para chats? Sí, lo hay! Nuestros canales de generación de texto admiten entradas de chat, cual facilita más facíl utilizar los modelos de chat. En el pasado, solíamos utilizar una clase dedicada "ConversationalPipeline", pero ahora ha quedado obsoleta y su funcionalidad se ha fusionado en [`TextGenerationPipeline`]. Este pipeline está diseñado para facilitar el uso de modelos de chat. Intentemos el ejemplo de `Zephyr` de nuevo, pero esta vez utilizando el pipeline: ```python from transformers import pipeline pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # Print the assistant's response ``` ```text {'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."} ``` La canalización se encargará de todos los detalles de la tokenización y de llamar a `apply_chat_template` por ti. Una vez que el modelo tenga una plantilla de chat, ¡todo lo que necesitas hacer es inicializar el pipeline y pasarle la lista de mensajes! # ¿Qué son los "generation prompts"? Puede que hayas notado que el método `apply_chat_template` tiene un argumento `add_generation_prompt`. Este argumento indica a la plantilla que agregue tokens que indiquen el inicio de una respuesta del bot. Por ejemplo, considera el siguiente chat: ```python messages = [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"}, {"role": "user", "content": "Can I ask a question?"} ] ``` Así es cómo se verá esto sin un "generation prompt", usando la plantilla ChatML que vimos en el ejemplo de Zephyr: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> """ ``` Y así es como se ve **con** un "generation prompt": ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant """ ``` Ten en cuenta que esta vez, hemos agregado los tokens que indican el inicio de una respuesta del bot. Esto asegura que cuando el modelo genere texto, escribirá una respuesta del bot en lugar de hacer algo inesperado, como continuar el mensaje del usuario. Recuerda, los modelos de chat siguen siendo solo modelos de lenguaje: están entrenados para continuar texto, ¡y el chat es solo un tipo especial de texto para ellos! Necesitas guiarlos con los tokens de control apropiados para que sepan lo que se supone que deben estar haciendo. No todos los modelos requieren "generation prompts". Algunos modelos, como BlenderBot y LLaMA, no tienen ningún token especial antes de las respuestas del bot. En estos casos, el argumento `add_generation_prompt` no tendrá ningún efecto. El efecto exacto que tiene `add_generation_prompt` dependerá de la plantilla que se esté utilizando. ## ¿Puedo usar plantillas de chat en el entrenamiento? ¡Sí! Recomendamos que apliques la plantilla de chat como un paso de preprocesamiento para tu conjunto de datos. Después de esto, simplemente puedes continuar como cualquier otra tarea de entrenamiento de modelos de lenguaje. Durante el entrenamiento, generalmente deberías establecer `add_generation_prompt=False`, porque los tokens añadidos para solicitar una respuesta del asistente no serán útiles durante el entrenamiento. Veamos un ejemplo: ```python from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` Y obtenemos: ```text <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` Desde aquí, simplemente continúa el entrenamiento como lo harías con una tarea estándar de modelado de lenguaje, utilizando la columna `formatted_chat`. ## Avanzado: ¿Cómo funcionan las plantillas de chat? La plantilla de chat para un modelo se almacena en el atributo `tokenizer.chat_template`. Si no se establece ninguna plantilla de chat, se utiliza en su lugar la plantilla predeterminada para esa clase de modelo. Echemos un vistazo a la plantilla para `BlenderBot`: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer.default_chat_template "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}" ``` ¡Es un poco intimidante! Vamos a agregar algunas líneas nuevas y sangria para que sea más legible. Ten en cuenta que la primera línea nueva después de cada bloque, así como cualquier espacio en blanco anterior a un bloque, se ignoran de forma predeterminada, utilizando las banderas `trim_blocks` y `lstrip_blocks` de Jinja. Sin embargo, ¡ten cuidado! Aunque el espacio en blanco inicial en cada línea se elimina, los espacios entre bloques en la misma línea no. ¡Te recomendamos encarecidamente que verifiques que tu plantilla no esté imprimiendo espacios adicionales donde no debería estarlo! ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ ' ' }} {% endif %} {{ message['content'] }} {% if not loop.last %} {{ ' ' }} {% endif %} {% endfor %} {{ eos_token }} ``` Si nunca has visto uno de estos antes, esto es una [plantilla de Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/). Jinja es un lenguaje de plantillas que te permite escribir código simple que genera texto. En muchos aspectos, el código y la sintaxis se asemejan a Python. En Python puro, esta plantilla se vería algo así: ```python for idx, message in enumerate(messages): if message['role'] == 'user': print(' ') print(message['content']) if not idx == len(messages) - 1: # Check for the last message in the conversation print(' ') print(eos_token) ``` Efectivamente, la plantilla hace tres cosas: 1. Para cada mensaje, si el mensaje es un mensaje de usuario, añade un espacio en blanco antes de él, de lo contrario no imprime nada. 2. Añade el contenido del mensaje. 3. Si el mensaje no es el último mensaje, añade dos espacios después de él. Después del último mensaje, imprime el token EOS. Esta es una plantilla bastante simple: no añade ningún token de control y no admite mensajes "del sistema", que son una forma común de dar al modelo directivas sobre cómo debe comportarse en la conversación posterior. ¡Pero Jinja te brinda mucha flexibilidad para hacer esas cosas! Veamos una plantilla de Jinja que pueda formatear las entradas de manera similar a la forma en que LLaMA las formatea (nota que la plantilla real de LLaMA incluye el manejo de mensajes del sistema predeterminados y el manejo de mensajes del sistema ligeramente diferentes en general; ¡no uses esta en tu código real!) ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'] + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ ' ' + message['content'] + ' ' + eos_token }} {% endif %} {% endfor %} ``` Si observas esto por un momento, puedas ver lo que esta plantilla está haciendo: añade tokens específicos basados en el "rol" de cada mensaje, que representa quién lo envió. Los mensajes de usuario, asistente y sistema son claramente distinguibles para el modelo debido a los tokens en los que están envueltos. ## Avanzado: Añadiendo y editando plantillas de chat ### ¿Cómo creo una plantilla de chat? Simple, solo escribe una plantilla de Jinja y establece `tokenizer.chat_template`. ¡Puede resultarte más fácil comenzar con una plantilla existente de otro modelo y simplemente editarla según tus necesidades! Por ejemplo, podríamos tomar la plantilla de LLaMA de arriba y añadir "[ASST]" y "[/ASST]" a los mensajes del asistente: ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }} {% endif %} {% endfor %} ``` Ahora, simplemente establece el atributo `tokenizer.chat_template`. ¡La próxima vez que uses [`~PreTrainedTokenizer.apply_chat_template`], se utilizará tu nueva plantilla! Este atributo se guardará en el archivo tokenizer_config.json, por lo que puedes usar [`~utils.PushToHubMixin.push_to_hub`] para cargar tu nueva plantilla en el Hub y asegurarte de que todos estén utilizando la plantilla correcta para tu modelo. ```python template = tokenizer.chat_template template = template.replace("SYS", "SYSTEM") # Change the system token tokenizer.chat_template = template # Set the new template tokenizer.push_to_hub("model_name") # Upload your new template to the Hub! ``` El método [`~PreTrainedTokenizer.apply_chat_template`], que utiliza tu plantilla de chat, es llamado por la clase [`TextGenerationPipeline`], así que una vez que configures la plantilla de chat correcta, tu modelo se volverá automáticamente compatible con [`TextGenerationPipeline`]. <Tip> Si estás ajustando finamente un modelo para chat, además de establecer una plantilla de chat, probablemente deberías agregar cualquier nuevo token de control de chat como los tokens especiales en el tokenizador. Los tokens especiales nunca se dividen, asegurando que tus tokens de control siempre se manejen como tokens únicos en lugar de ser tokenizados en piezas. También deberías establecer el atributo `eos_token` del tokenizador con el token que marca el final de las generaciones del asistente en tu plantilla. Esto asegurará que las herramientas de generación de texto puedan determinar correctamente cuándo detener la generación de texto. </Tip> ### ¿Qué son las plantillas "default"? Antes de la introducción de las plantillas de chat, el manejo del chat estaba codificado en el nivel de la clase del modelo. Por razones de compatibilidad con versiones anteriores, hemos conservado este manejo específico de la clase como plantillas predeterminadas, también establecidas a nivel de clase. Si un modelo no tiene una plantilla de chat establecida, pero hay una plantilla predeterminada para su clase de modelo, la clase `TextGenerationPipeline` y métodos como `apply_chat_template` usarán la plantilla de clase en su lugar. Puedes averiguar cuál es la plantilla predeterminada para tu tokenizador comprobando el atributo `tokenizer.default_chat_template`. Esto es algo que hacemos puramente por razones de compatibilidad con versiones anteriores, para evitar romper cualquier flujo de trabajo existente. Incluso cuando la plantilla de clase es apropiada para tu modelo, recomendamos encarecidamente anular la plantilla predeterminada estableciendo explícitamente el atributo `chat_template` para dejar claro a los usuarios que tu modelo ha sido configurado correctamente para el chat, y para estar preparados para el futuro en caso de que las plantillas predeterminadas alguna vez se alteren o se eliminen. ### ¿Qué plantilla debería usar? Cuando establezcas la plantilla para un modelo que ya ha sido entrenado para chat, debes asegurarte de que la plantilla coincida exactamente con el formato de mensajes que el modelo vio durante el entrenamiento, o de lo contrario es probable que experimentes degradación del rendimiento. Esto es cierto incluso si estás entrenando aún más el modelo; probablemente obtendrás el mejor rendimiento si mantienes constantes los tokens de chat. Esto es muy análogo a la tokenización: generalmente obtienes el mejor rendimiento para la inferencia o el ajuste fino cuando coincides precisamente con la tokenización utilizada durante el entrenamiento. Si estás entrenando un modelo desde cero o ajustando finamente un modelo de lenguaje base para chat, por otro lado, ¡tienes mucha libertad para elegir una plantilla apropiada! Los LLM son lo suficientemente inteligentes como para aprender a manejar muchos formatos de entrada diferentes. Nuestra plantilla predeterminada para modelos que no tienen una plantilla específica de clase sigue el formato ChatML, y esta es una buena elección flexible para muchos casos de uso. Se ve así: ``` {% for message in messages %} {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}} {% endfor %} ``` Si te gusta esta plantilla, aquí está en forma de una sola línea, lista para copiar en tu código. La versión de una sola línea también incluye un práctico soporte para [prompts de generación](#¿Qué-son-los-"generation-prompts"?), ¡pero ten en cuenta que no añade tokens de BOS o EOS! Si tu modelo espera esos tokens, no se agregarán automáticamente por `apply_chat_template`, en otras palabras, el texto será tokenizado con `add_special_tokens=False`. Esto es para evitar posibles conflictos entre la plantilla y la lógica de `add_special_tokens`. ¡Si tu modelo espera tokens especiales, asegúrate de añadirlos a la plantilla! ```python tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" ``` Esta plantilla envuelve cada mensaje en tokens `<|im_start|>` y `<|im_end|>`, y simplemente escribe el rol como una cadena, lo que permite flexibilidad en los roles con los que entrenas. La salida se ve así: ```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant I'm doing great!<|im_end|> ``` Los roles "usuario", "sistema" y "asistente" son los estándar para chat, y recomendamos usarlos cuando tenga sentido, particularmente si deseas que tu modelo funcione bien con [`TextGenerationPipeline`]. Sin embargo, no estás limitado a estos roles: la plantilla es extremadamente flexible y cualquier cadena puede ser un rol. ### ¡Quiero añadir algunas plantillas de chat! ¿Cómo debo empezar? Si tienes algún modelo de chat, debes establecer su atributo `tokenizer.chat_template` y probarlo usando [`~PreTrainedTokenizer.apply_chat_template`], luego subir el tokenizador actualizado al Hub. Esto se aplica incluso si no eres el propietario del modelo: si estás usando un modelo con una plantilla de chat vacía o que todavía está utilizando la plantilla predeterminada de clase, por favor abre una solicitud de extracción [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) al repositorio del modelo para que este atributo se pueda establecer correctamente. Una vez que se establece el atributo, ¡eso es todo, has terminado! `tokenizer.apply_chat_template` ahora funcionará correctamente para ese modelo, ¡lo que significa que también es compatible automáticamente en lugares como `TextGenerationPipeline`! Al asegurarnos de que los modelos tengan este atributo, podemos garantizar que toda la comunidad pueda utilizar todo el poder de los modelos de código abierto. Los desajustes de formato han estado acechando el campo y dañando silenciosamente el rendimiento durante demasiado tiempo: ¡es hora de ponerles fin! ## Avanzado: Consejos para escribir plantillas Si no estás familiarizado con Jinja, generalmente encontramos que la forma más fácil de escribir una plantilla de chat es primero escribir un script de Python corto que formatee los mensajes como desees, y luego convertir ese script en una plantilla. Recuerda que el manejador de plantillas recibirá el historial de conversación como una variable llamada mensajes. Cada mensaje es un diccionario con dos claves, `role` y `content`. Podrás acceder a los `mensajes` en tu plantilla tal como lo harías en Python, lo que significa que puedes recorrerlo con `{% for message in messages %}` o acceder a mensajes individuales con, por ejemplo, `{{ messages[0] }}`. También puedes usar los siguientes consejos para convertir tu código a Jinja: ### Bucles For Los bucles For en Jinja se ven así: ``` {% for message in messages %} {{ message['content'] }} {% endfor %} ``` Ten en cuenta que todo lo que esté dentro del {{bloque de expresión}} se imprimirá en la salida. Puedes usar operadores como `+` para combinar cadenas dentro de bloques de expresión. ### Declaraciones if Las declaraciones if en Jinja se ven así: ``` {% if message['role'] == 'user' %} {{ message['content'] }} {% endif %} ``` Observa cómo donde Python utiliza espacios en blanco para marcar el inicio y el final de los bloques `for` e `if`, Jinja requiere que los termines explícitamente con `{% endfor %}` y `{% endif %}`. ### Variables especiales Dentro de tu plantilla, tendrás acceso a la lista de `mensajes`, pero también puedes acceder a varias otras variables especiales. Estas incluyen tokens especiales como `bos_token` y `eos_token`, así como la variable `add_generation_prompt` que discutimos anteriormente. También puedes usar la variable `loop` para acceder a información sobre la iteración actual del bucle, por ejemplo, usando `{% if loop.last %}` para verificar si el mensaje actual es el último mensaje en la conversación. Aquí tienes un ejemplo que combina estas ideas para agregar un prompt de generación al final de la conversación si add_generation_prompt es `True`: ``` {% if loop.last and add_generation_prompt %} {{ bos_token + 'Assistant:\n' }} {% endif %} ``` ### Notas sobre los espacios en blanco Hemos intentado que Jinja ignore los espacios en blanco fuera de las {{expresiones}} tanto como sea posible. Sin embargo, ten en cuenta que Jinja es un motor de plantillas de propósito general y puede tratar el espacio en blanco entre bloques en la misma línea como significativo e imprimirlo en la salida. ¡Te recomendamos **encarecidamente** que verifiques que tu plantilla no esté imprimiendo espacios adicionales donde no debería antes de subirla!
transformers/docs/source/es/chat_templating.md/0
{ "file_path": "transformers/docs/source/es/chat_templating.md", "repo_id": "transformers", "token_count": 8911 }
254
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines para inferencia Un [`pipeline`] simplifica el uso de cualquier modelo del [Model Hub](https://huggingface.co/models) para la inferencia en una variedad de tareas como la generación de texto, la segmentación de imágenes y la clasificación de audio. Incluso si no tienes experiencia con una modalidad específica o no comprendes el código que alimenta los modelos, ¡aún puedes usarlos con el [`pipeline`]! Este tutorial te enseñará a: * Utilizar un [`pipeline`] para inferencia. * Utilizar un tokenizador o modelo específico. * Utilizar un [`pipeline`] para tareas de audio y visión. <Tip> Echa un vistazo a la documentación de [`pipeline`] para obtener una lista completa de tareas admitidas. </Tip> ## Uso del pipeline Si bien cada tarea tiene un [`pipeline`] asociado, es más sencillo usar la abstracción general [`pipeline`] que contiene todos los pipelines de tareas específicas. El [`pipeline`] carga automáticamente un modelo predeterminado y un tokenizador con capacidad de inferencia para tu tarea. 1. Comienza creando un [`pipeline`] y específica una tarea de inferencia: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation") ``` 2. Pasa tu texto de entrada al [`pipeline`]: ```py >>> generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone") [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain'}] ``` Si tienes más de una entrada, pásala como una lista: ```py >>> generator( ... [ ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", ... ] ... ) ``` Cualquier parámetro adicional para tu tarea también se puede incluir en el [`pipeline`]. La tarea `text-generation` tiene un método [`~generation.GenerationMixin.generate`] con varios parámetros para controlar la salida. Por ejemplo, si deseas generar más de una salida, defínelo en el parámetro `num_return_sequences`: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... num_return_sequences=2, ... ) ``` ### Selecciona un modelo y un tokenizador El [`pipeline`] acepta cualquier modelo del [Model Hub](https://huggingface.co/models). Hay etiquetas en el Model Hub que te permiten filtrar por el modelo que te gustaría utilizar para tu tarea. Una vez que hayas elegido un modelo apropiado, cárgalo con la clase `AutoModelFor` y [`AutoTokenizer`] correspondientes. Por ejemplo, carga la clase [`AutoModelForCausalLM`] para una tarea de modelado de lenguaje causal: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` Crea un [`pipeline`] para tu tarea y específica el modelo y el tokenizador que cargaste: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer) ``` Pasa tu texto de entrada a [`pipeline`] para generar algo de texto: ```py >>> generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone") [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm'}] ``` ## Pipeline de audio La flexibilidad de [`pipeline`] significa que también se puede extender a tareas de audio. Por ejemplo, clasifiquemos la emoción de un breve fragmento del famoso discurso de John F. Kennedy ["We choose to go to the Moon"](https://en.wikipedia.org/wiki/We_choose_to_go_to_the_Moon). Encuentra un modelo de [audio classification](https://huggingface.co/models?pipeline_tag=audio-classification) para reconocimiento de emociones en el Model Hub y cárgalo en el [`pipeline`]: ```py >>> from transformers import pipeline >>> audio_classifier = pipeline( ... task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` Pasa el archivo de audio al [`pipeline`]: ```py >>> audio_classifier("jfk_moon_speech.wav") [{'label': 'calm', 'score': 0.13856211304664612}, {'label': 'disgust', 'score': 0.13148026168346405}, {'label': 'happy', 'score': 0.12635163962841034}, {'label': 'angry', 'score': 0.12439591437578201}, {'label': 'fearful', 'score': 0.12404385954141617}] ``` ## Pipeline de visión Finalmente, utilizar un [`pipeline`] para tareas de visión es prácticamente igual. Específica tu tarea de visión y pasa tu imagen al clasificador. La imagen puede ser un enlace o una ruta local a la imagen. Por ejemplo, ¿qué especie de gato se muestra a continuación? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(task="image-classification") >>> vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) [{'label': 'lynx, catamount', 'score': 0.4403027892112732}, {'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', 'score': 0.03433405980467796}, {'label': 'snow leopard, ounce, Panthera uncia', 'score': 0.032148055732250214}, {'label': 'Egyptian cat', 'score': 0.02353910356760025}, {'label': 'tiger cat', 'score': 0.023034192621707916}] ```
transformers/docs/source/es/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/es/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 2294 }
255
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Come creare una pipeline personalizzata? In questa guida, scopriremo come creare una pipeline personalizzata e condividerla sull' [Hub](https://hf.co/models) o aggiungerla nella libreria Transformers. Innanzitutto, è necessario decidere gli input grezzi che la pipeline sarà in grado di accettare. Possono essere strings, raw bytes, dictionaries o qualsiasi cosa sia l'input desiderato più probabile. Cerca di mantenere questi input il più possibile in Python in quanto facilita la compatibilità (anche con altri linguaggi tramite JSON). Questi saranno gli `inputs` della pipeline (`preprocess`). Poi definire gli `outputs`. Stessa strategia degli `inputs`. Più è seplice e meglio è. Questi saranno gli output del metodo `postprocess`. Si parte ereditando la classe base `Pipeline`. con i 4 metodi che bisogna implementare `preprocess`, `_forward`, `postprocess` e `_sanitize_parameters`. ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Maybe {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` La struttura di questa suddivisione consiste nel supportare in modo relativamente continuo CPU/GPU, supportando allo stesso tempo l'esecuzione di pre/postelaborazione sulla CPU su thread diversi. `preprocess` prenderà gli input originariamente definiti e li trasformerà in qualcosa di alimentabile dal modello. Potrebbe contenere più informazioni e di solito è un `Dict`. `_forward` è il dettaglio dell'implementazione e non è destinato a essere chiamato direttamente. `forward` è il metodo preferito per assicurarsi che tutto funzioni correttamente perchè contiene delle slavaguardie. Se qualcosa è è collegato a un modello reale, appartiene al metodo `_forward`, tutto il resto è nel preprocess/postprocess. `postprocess` prende l'otput di `_forward` e lo trasforma nell'output finale che era stato deciso in precedenza. `_sanitize_parameters` esiste per consentire agli utenti di passare i parametri ogni volta che desiderano sia a inizialization time `pipeline(...., maybe_arg=4)` che al call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. `_sanitize_parameters` ritorna 3 dicts di kwargs che vengono passati direttamente a `preprocess`, `_forward` e `postprocess`. Non riempire nulla se il chiamante non ha chiamato con alcun parametro aggiuntivo. Questo consente di mantenere gli argomenti predefiniti nella definizione della funzione, che è sempre più "naturale". Un esempio classico potrebbe essere l'argomento `top_k` nel post processing dei classification tasks. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit `_sanitize_parameters` to allow this new parameter. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Add logic to handle top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` Cercare di mantenere gli input/output molto semplici e idealmente serializzabili in JSON, in quanto ciò rende l'uso della pipeline molto facile senza richiedere agli utenti di comprendere nuovi tipi di oggetti. È anche relativamente comune supportare molti tipi di argomenti per facilitarne l'uso (ad esempio file audio, possono essere nomi di file, URL o byte puri). ## Aggiungilo alla lista dei tasks supportati Per registrar il tuo `new-task` alla lista dei tasks supportati, devi aggiungerlo al `PIPELINE_REGISTRY`: ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` Puoi specificare il modello di default che desideri, in questo caso dovrebbe essere accompagnato da una revisione specifica (che può essere il nome di un branch o l'hash di un commit, in questo caso abbiamo preso `"abcdef"`) e anche dal type: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # current support type: text, audio, image, multimodal ) ``` ## Condividi la tua pipeline sull'Hub Per condividere la tua pipeline personalizzata sull'Hub, devi solo salvare il codice della tua sottoclasse `Pipeline` in un file python. Per esempio, supponiamo di voler utilizzare una pipeline personalizzata per la classificazione delle coppie di frasi come la seguente: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` L'implementazione è agnostica al framework, e lavorerà sia con modelli PyTorch che con TensorFlow. Se l'abbiamo salvato in un file chiamato `pair_classification.py`, può essere successivamente importato e registrato in questo modo: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` Una volta fatto, possiamo usarla con un modello pretrained. L'istanza `sgugger/finetuned-bert-mrpc` è stata fine-tuned sul dataset MRPC, che classifica le coppie di frasi come parafrasi o no. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` Successivamente possiamo condividerlo sull'Hub usando il metodo `save_pretrained` in un `Repository`: ```py from huggingface_hub import Repository repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline") classifier.save_pretrained("test-dynamic-pipeline") repo.push_to_hub() ``` Questo codice copierà il file dove è stato definitp `PairClassificationPipeline` all'interno della cartella `"test-dynamic-pipeline"`, insieme al salvataggio del modello e del tokenizer della pipeline, prima di pushare il tutto nel repository `{your_username}/test-dynamic-pipeline`. Dopodiché chiunque potrà utilizzarlo, purché fornisca l'opzione `trust_remote_code=True`: ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## Aggiungere la pipeline a Transformers Se vuoi contribuire con la tua pipeline a Transformers, dovrai aggiungere un modulo nel sottomodulo `pipelines` con il codice della tua pipeline, quindi aggiungilo all'elenco dei tasks definiti in `pipelines/__init__.py`. Poi hai bisogno di aggiungere i test. Crea un nuovo file `tests/test_pipelines_MY_PIPELINE.py` con esempi ed altri test. La funzione `run_pipeline_test` sarà molto generica e su piccoli modelli casuali su ogni possibile architettura, come definito da `model_mapping` e `tf_model_mapping`. Questo è molto importante per testare la compatibilità futura, nel senso che se qualcuno aggiunge un nuovo modello di `XXXForQuestionAnswering` allora il test della pipeline tenterà di essere eseguito su di esso. Poiché i modelli sono casuali, è è impossibile controllare i valori effettivi, per questo esiste un aiuto `ANY` che tenterà solamente di far corrispondere l'output della pipeline TYPE. Hai anche *bisogno* di implementare 2 (idealmente 4) test. - `test_small_model_pt` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_tf`. - `test_small_model_tf` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_pt`. - `test_large_model_pt` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future - `test_large_model_tf` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future
transformers/docs/source/it/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/it/add_new_pipeline.md", "repo_id": "transformers", "token_count": 4132 }
256
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza efficiente su GPU singola Questo documento sarà presto completato con informazioni su come effetture l'inferenza su una singola GPU. Nel frattempo è possibile consultare [la guida per l'addestramento su una singola GPU](perf_train_gpu_one) e [la guida per l'inferenza su CPU](perf_infer_cpu). ## `BetterTransformer` per l'inferenza più veloce Abbiamo recentemente integrato `BetterTransformer` per velocizzare l'inferenza su GPU per modelli di testo, immagini e audio. Per maggiori dettagli, consultare la documentazione su questa integrazione [qui](https://huggingface.co/docs/optimum/bettertransformer/overview). ## Integrazione di `bitsandbytes` per Int8 mixed-precision matrix decomposition <Tip> Nota che questa funzione può essere utilizzata anche nelle configurazioni multi GPU. </Tip> Dal paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), noi supportiamo l'integrazione di Hugging Face per tutti i modelli dell'Hub con poche righe di codice. Il metodo `nn.Linear` riduce la dimensione di 2 per i pesi `float16` e `bfloat16` e di 4 per i pesi `float32`, con un impatto quasi nullo sulla qualità, operando sugli outlier in half-precision. ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Il metodo Int8 mixed-precision matrix decomposition funziona separando la moltiplicazione tra matrici in due flussi: (1) una matrice di flusso di outlier di caratteristiche sistematiche moltiplicata in fp16, (2) in flusso regolare di moltiplicazione di matrici int8 (99,9%). Con questo metodo, è possibile effettutare inferenza int8 per modelli molto grandi senza degrado predittivo. Per maggiori dettagli sul metodo, consultare il [paper](https://arxiv.org/abs/2208.07339) o il nostro [blogpost sull'integrazione](https://huggingface.co/blog/hf-bitsandbytes-integration). ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) Nota che è necessaria una GPU per eseguire modelli di tipo mixed-8bit, poiché i kernel sono stati compilati solo per le GPU. Prima di utilizzare questa funzione, assicurarsi di disporre di memoria sufficiente sulla GPU per memorizzare un quarto del modello (o la metà se i pesi del modello sono in mezza precisione). Di seguito sono riportate alcune note per aiutarvi a utilizzare questo modulo, oppure seguite le dimostrazioni su [Google colab](#colab-demos). ### Requisiti - Se si dispone di `bitsandbytes<0.37.0`, assicurarsi di eseguire su GPU NVIDIA che supportano tensor cores a 8 bit (Turing, Ampere o architetture più recenti - ad esempio T4, RTX20s RTX30s, A40-A100). Per `bitsandbytes>=0.37.0`, tutte le GPU dovrebbero essere supportate. - Installare la versione corretta di `bitsandbytes` eseguendo: `pip install bitsandbytes>=0.31.5`. - Installare `accelerate` `pip install accelerate>=0.12.0` ### Esecuzione di modelli mixed-Int8 - configurazione per singola GPU Dopo aver installato le librerie necessarie, per caricare il tuo modello mixed 8-bit è il seguente: ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` Per la generazione di testo, si consiglia di: * utilizzare il metodo `generate()` del modello invece della funzione `pipeline()`. Sebbene l'inferenza sia possibile con la funzione `pipeline()`, essa non è ottimizzata per i modelli mixed-8bit e sarà più lenta rispetto all'uso del metodo `generate()`. Inoltre, alcune strategie di campionamento, come il campionamento nucleaus, non sono supportate dalla funzione `pipeline()` per i modelli mixed-8bit. * collocare tutti gli ingressi sullo stesso dispositivo del modello. Ecco un semplice esempio: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) text = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### Esecuzione di modelli mixed-8bit - configurazione multi GPU Usare il seguente modo caricare il modello mixed-8bit su più GPU (stesso comando della configurazione a GPU singola): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` Puoi controllare la RAM della GPU che si vuole allocare su ogni GPU usando `accelerate`. Utilizzare l'argomento `max_memory` come segue: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` In questo esempio, la prima GPU utilizzerà 1 GB di memoria e la seconda 2 GB. ### Colab demos Con questo metodo è possibile inferire modelli che prima non era possibile inferire su Google Colab. Guardate la demo per l'esecuzione di T5-11b (42GB in fp32)! Utilizzo la quantizzazione a 8 bit su Google Colab: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) Oppure questa demo di BLOOM-3B: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing)
transformers/docs/source/it/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 2306 }
257
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. ライセンス:Apache License、バージョン2.0(「ライセンス」)に基づいています。このファイルは、ライセンスに準拠していない限り、使用できません。ライセンスのコピーは以下から入手できます: http://www.apache.org/licenses/LICENSE-2.0 適用法に従って必要な場合、または書面で同意した場合を除き、ライセンスの下で配布されるソフトウェアは、「AS IS」の基盤で、明示または黙示を問わず、いかなる保証や条件も含みません。ライセンスの詳細については、ライセンス文書をご覧ください。 ⚠️ このファイルはMarkdown形式ですが、弊社のドキュメントビルダー(MDXに類似した特定の構文を含む)を含むため、お使いのMarkdownビューアでは正しく表示されない場合があります。 --> # How to convert a 🤗 Transformers model to TensorFlow? 🤗 Transformersを使用するために複数のフレームワークが利用可能であることは、アプリケーションを設計する際にそれぞれの強みを活かす柔軟性を提供しますが、 互換性をモデルごとに追加する必要があることを意味します。しかし、幸いなことに 既存のモデルにTensorFlow互換性を追加することは、[ゼロから新しいモデルを追加すること](add_new_model)よりも簡単です! 大規模なTensorFlowモデルの詳細を理解したり、主要なオープンソースの貢献を行ったり、 選択したモデルをTensorFlowで有効にするためのガイドです。 このガイドは、コミュニティのメンバーであるあなたに、TensorFlowモデルの重みおよび/または アーキテクチャを🤗 Transformersで使用するために、Hugging Faceチームからの最小限の監視で貢献できる力を与えます。新しいモデルを書くことは小さな偉業ではありませんが、 このガイドを読むことで、それがローラーコースターのようなものから散歩のようなものになることを願っています🎢🚶。 このプロセスをますます簡単にするために、私たちの共通の経験を活用することは非常に重要ですので、 このガイドの改善を提案することを強くお勧めします! さらに詳しく調べる前に、以下のリソースをチェックすることをお勧めします。🤗 Transformersが初めての場合: - [🤗 Transformersの一般的な概要](add_new_model#general-overview-of-transformers) - [Hugging FaceのTensorFlow哲学](https://huggingface.co/blog/tensorflow-philosophy) このガイドの残りの部分では、新しいTensorFlowモデルアーキテクチャを追加するために必要なもの、 PyTorchをTensorFlowモデルの重みに変換する手順、およびMLフレームワーク間の不一致を効率的にデバッグする方法について学びます。それでは始めましょう! <Tip> 使用したいモデルに対応するTensorFlowアーキテクチャがすでに存在するかどうかわからないですか? &nbsp; 選択したモデルの`config.json`の`model_type`フィールドをチェックしてみてください ([例](https://huggingface.co/google-bert/bert-base-uncased/blob/main/config.json#L14))。 🤗 Transformersの該当するモデルフォルダに、名前が"modeling_tf"で始まるファイルがある場合、それは対応するTensorFlow アーキテクチャを持っていることを意味します([例](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert))。 </Tip> ## Step-by-step guide to add TensorFlow model architecture code 大規模なモデルアーキテクチャを設計する方法はさまざまであり、その設計を実装する方法もさまざまです。 しかし、[🤗 Transformersの一般的な概要](add_new_model#general-overview-of-transformers)から 思い出していただけるかもしれませんが、私たちは意見のあるグループです - 🤗 Transformersの使いやすさは一貫性のある設計の選択肢に依存しています。経験から、TensorFlowモデルを追加する際に重要なことをいくつかお伝えできます: - 車輪を再発明しないでください!ほとんどの場合、確認すべき少なくとも2つの参照実装があります。それは、 あなたが実装しているモデルのPyTorchバージョンと、同じ種類の問題に対する他のTensorFlowモデルです。 - 優れたモデル実装は時間の試練を乗り越えます。これは、コードがきれいだからではなく、コードが明確で、デバッグしやすく、 構築しやすいからです。TensorFlow実装でPyTorch実装と一致するパターンを複製し、PyTorch実装との不一致を最小限に抑えることで、 あなたの貢献が長期間にわたって有用であることを保証します。 - 行き詰まったら助けを求めてください! 🤗 Transformersチームはここにいますし、おそらくあなたが直面している同じ問題に対する解決策を見つけています。 TensorFlowモデルアーキテクチャを追加するために必要なステップの概要は次のとおりです: 1. 変換したいモデルを選択 2. transformersの開発環境を準備 3. (オプション)理論的な側面と既存の実装を理解 4. モデルアーキテクチャを実装 5. モデルのテストを実装 6. プルリクエストを提出 7. (オプション)デモを構築して世界と共有 ### 1.-3. Prepare your model contribution **1. 変換したいモデルを選択する** まず、基本から始めましょう。最初に知っておく必要があることは、変換したいアーキテクチャです。 特定のアーキテクチャを決めていない場合、🤗 Transformers チームに提案を求めることは、影響を最大限にする素晴らしい方法です。 チームは、TensorFlow サイドで不足している最も注目されるアーキテクチャに向けてガイドします。 TensorFlow で使用したい特定のモデルに、🤗 Transformers に既に TensorFlow アーキテクチャの実装が存在しているが、重みが不足している場合、 このページの[重みの追加セクション](#adding-tensorflow-weights-to--hub)に直接移動してください。 簡単にするために、このガイドの残りの部分では、TensorFlow バージョンの *BrandNewBert* を貢献することを決定したと仮定しています (これは、[新しいモデルの追加ガイド](add_new_model)での例と同じです)。 <Tip> TensorFlow モデルのアーキテクチャに取り組む前に、それを行うための進行中の取り組みがないかを再確認してください。 GitHub ページの[プルリクエスト](https://github.com/huggingface/transformers/pulls?q=is%3Apr)で `BrandNewBert` を検索して、 TensorFlow 関連のプルリクエストがないことを確認できます。 </Tip> **2. transformers 開発環境の準備** モデルアーキテクチャを選択したら、意向を示すためにドラフト PR を開くための環境を設定してください。 以下の手順に従って、環境を設定し、ドラフト PR を開いてください。 1. リポジトリのページで 'Fork' ボタンをクリックして、[リポジトリ](https://github.com/huggingface/transformers)をフォークします。 これにより、コードのコピーが GitHub ユーザーアカウントの下に作成されます。 2. ローカルディスクにある 'transformers' フォークをクローンし、ベースリポジトリをリモートとして追加します: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. 開発環境を設定します。たとえば、以下のコマンドを実行してください: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 依存関係が増えているため、OSに応じて、Transformersのオプションの依存関係の数が増えるかもしれません。その場合は、TensorFlowをインストールしてから次のコマンドを実行してください。 ```bash pip install -e ".[quality]" ``` **注意:** CUDAをインストールする必要はありません。新しいモデルをCPUで動作させることが十分です。 4. メインブランチからわかりやすい名前のブランチを作成してください。 ```bash git checkout -b add_tf_brand_new_bert ``` 5. 現在のmainブランチにフェッチしてリベースする ```bash git fetch upstream git rebase upstream/main ``` 6. `transformers/src/models/brandnewbert/`に`modeling_tf_brandnewbert.py`という名前の空の`.py`ファイルを追加します。これはあなたのTensorFlowモデルファイルです。 7. 以下を使用して変更内容をアカウントにプッシュします: ```bash git add . git commit -m "initial commit" git push -u origin add_tf_brand_new_bert ``` 8. GitHub上でフォークしたウェブページに移動し、「プルリクエスト」をクリックします。将来の変更に備えて、Hugging Face チームのメンバーのGitHubハンドルをレビュアーとして追加してください。 9. GitHubのプルリクエストウェブページの右側にある「ドラフトに変換」をクリックして、プルリクエストをドラフトに変更します。 これで、🤗 Transformers内に*BrandNewBert*をTensorFlowに移植するための開発環境が設定されました。 **3. (任意) 理論的な側面と既存の実装を理解する** *BrandNewBert*の論文が存在する場合、その記述的な作業を読む時間を取るべきです。論文には理解が難しい大きなセクションがあるかもしれません。その場合でも問題ありません - 心配しないでください!目標は論文の理論的な理解を深めることではなく、🤗 Transformersを使用してTensorFlowでモデルを効果的に再実装するために必要な情報を抽出することです。とは言え、理論的な側面にあまり時間をかける必要はありません。代わりに、既存のモデルのドキュメンテーションページ(たとえば、[BERTのモデルドキュメント](model_doc/bert)など)に焦点を当てるべきです。 実装するモデルの基本を把握した後、既存の実装を理解することは重要です。これは、動作する実装がモデルに対する期待と一致することを確認する絶好の機会であり、TensorFlow側での技術的な課題を予測することもできます。 情報の多さに圧倒されていると感じるのは完全に自然です。この段階ではモデルのすべての側面を理解する必要はありません。ただし、[フォーラム](https://discuss.huggingface.co/)で急な質問を解決することを強くお勧めします。 ### 4. Model implementation さあ、いよいよコーディングを始めましょう。お勧めする出発点は、PyTorchファイルそのものです。 `src/transformers/models/brand_new_bert/`内の`modeling_brand_new_bert.py`の内容を `modeling_tf_brand_new_bert.py`にコピーします。このセクションの目標は、 🤗 Transformersのインポート構造を更新し、`TFBrandNewBert`と `TFBrandNewBert.from_pretrained(model_repo, from_pt=True)`を正常に読み込む動作するTensorFlow *BrandNewBert*モデルを インポートできるようにすることです。 残念ながら、PyTorchモデルをTensorFlowに変換する明確な方法はありません。ただし、プロセスをできるだけスムーズにするためのヒントを以下に示します: - すべてのクラスの名前の前に `TF` を付けます(例: `BrandNewBert` は `TFBrandNewBert` になります)。 - ほとんどのPyTorchの操作には、直接TensorFlowの代替があります。たとえば、`torch.nn.Linear` は `tf.keras.layers.Dense` に対応し、`torch.nn.Dropout` は `tf.keras.layers.Dropout` に対応します。特定の操作について不明確な場合は、[TensorFlowのドキュメント](https://www.tensorflow.org/api_docs/python/tf)または[PyTorchのドキュメント](https://pytorch.org/docs/stable/)を参照できます。 - 🤗 Transformersのコードベースにパターンが見つかります。特定の操作に直接的な代替がない場合、誰かがすでに同じ問題に対処している可能性が高いです。 - デフォルトでは、PyTorchと同じ変数名と構造を維持します。これにより、デバッグや問題の追跡、修正の追加が容易になります。 - 一部のレイヤーには、各フレームワークで異なるデフォルト値があります。注目すべき例は、バッチ正規化レイヤーの epsilon です(PyTorchでは`1e-5`、[TensorFlowでは](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization) `1e-3` です)。ドキュメントを再確認してください! - PyTorchの `nn.Parameter` 変数は通常、TF Layerの `build()` 内で初期化する必要があります。次の例を参照してください:[PyTorch](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_vit_mae.py#L212) / [TensorFlow](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L220) - PyTorchモデルに関数の上部に `#copied from ...` がある場合、TensorFlowモデルも同じアーキテクチャからその関数を借りることができる可能性が高いです。TensorFlowアーキテクチャがある場合です。 - TensorFlow関数内で `name`属性を正しく設定することは、`from_pt=True`のウェイトのクロスロードロードを行うために重要です。通常、`name`はPyTorchコード内の対応する変数の名前です。`name`が正しく設定されていない場合、モデルウェイトのロード時にエラーメッセージで表示されます。 - ベースモデルクラス `BrandNewBertModel` のロジックは実際には `TFBrandNewBertMainLayer` にあります。これはKerasレイヤーのサブクラスです([例](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L719))。`TFBrandNewBertModel` は、単にこのレイヤーのラッパーです。 - モデルを読み込むためには、Kerasモデルをビルドする必要があります。そのため、`TFBrandNewBertPreTrainedModel` はモデルへの入力の例、`dummy_inputs` を持つ必要があります([例](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L916))。 - 表示が止まった場合は、助けを求めてください。私たちはあなたのお手伝いにここにいます! 🤗 モデルファイル自体だけでなく、モデルクラスと関連するドキュメンテーションページへのポインターも追加する必要があります。他のPRのパターンに従ってこの部分を完了できます ([例](https://github.com/huggingface/transformers/pull/18020/files))。 以下は手動での変更が必要な一覧です: - *BrandNewBert*のすべてのパブリッククラスを `src/transformers/__init__.py` に含める - *BrandNewBert*クラスを `src/transformers/models/auto/modeling_tf_auto.py` の対応するAutoクラスに追加 - ドキュメンテーションテストファイルのリストにモデリングファイルを追加する `utils/documentation_tests.txt` - `src/transformers/utils/dummy_tf_objects.py` に関連する *BrandNewBert* に関連する遅延ロードクラスを追加 - `src/transformers/models/brand_new_bert/__init__.py` でパブリッククラスのインポート構造を更新 - `docs/source/en/model_doc/brand_new_bert.md` に *BrandNewBert* のパブリックメソッドのドキュメンテーションポインターを追加 - `docs/source/en/model_doc/brand_new_bert.md` の *BrandNewBert* の貢献者リストに自分自身を追加 - 最後に、`docs/source/en/index.md` の *BrandNewBert* のTensorFlow列に緑色のチェックマーク ✅ を追加 モデルアーキテクチャが準備できていることを確認するために、以下のチェックリストを実行してください: 1. 訓練時に異なる動作をするすべてのレイヤー(例:Dropout)は、`training`引数を使用して呼び出され、それが最上位クラスから伝播されます。 2. 可能な限り `#copied from ...` を使用しました 3. `TFBrandNewBertMainLayer` およびそれを使用するすべてのクラスの `call` 関数が `@unpack_inputs` でデコレートされています 4. `TFBrandNewBertMainLayer` は `@keras_serializable` でデコレートされています 5. PyTorchウェイトからTensorFlowウェイトを使用してTensorFlowモデルをロードできます `TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` 6. 予期される入力形式を使用してTensorFlowモデルを呼び出すことができます ### 5. Add model tests やったね、TensorFlowモデルを実装しました! 今度は、モデルが期待通りに動作することを確認するためのテストを追加する時間です。 前のセクションと同様に、`tests/models/brand_new_bert/`ディレクトリ内の`test_modeling_brand_new_bert.py`ファイルを`test_modeling_tf_brand_new_bert.py`にコピーし、必要なTensorFlowの置換を行うことをお勧めします。 今の段階では、すべての`.from_pretrained()`呼び出しで、既存のPyTorchの重みをロードするために`from_pt=True`フラグを使用する必要があります。 作業が完了したら、テストを実行する準備が整いました! 😬 ```bash NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \ py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py ``` 最も可能性の高い結果は、多くのエラーが表示されることです。心配しないでください、これは予想される動作です! MLモデルのデバッグは非常に難しいとされており、成功の鍵は忍耐力(と`breakpoint()`)です。私たちの経験では、 最も難しい問題はMLフレームワーク間の微妙な不一致から発生し、これについてはこのガイドの最後にいくつかのポインタを示します。 他の場合では、一般的なテストが直接モデルに適用できない場合もあり、その場合はモデルのテストクラスレベルでオーバーライドを提案します。 問題の種類に関係なく、詰まった場合は、ドラフトのプルリクエストで助けを求めることをためらわないでください。 すべてのテストがパスしたら、おめでとうございます。あなたのモデルはほぼ🤗 Transformersライブラリに追加する準備が整いました!🎉 **6. プルリクエストを提出する** 実装とテストが完了したら、プルリクエストを提出する準備が整いました。コードをプッシュする前に、 コードフォーマットユーティリティである `make fixup` 🪄 を実行してください。 これにより、自動的なチェックに失敗する可能性のあるフォーマットの問題が自動的に修正されます。 これで、ドラフトプルリクエストを実際のプルリクエストに変換する準備が整いました。 これを行うには、「レビュー待ち」ボタンをクリックし、Joao(`@gante`)とMatt(`@Rocketknight1`)をレビュワーとして追加します。 モデルプルリクエストには少なくとも3人のレビュワーが必要ですが、モデルに適切な追加のレビュワーを見つけるのは彼らの責任です。 すべてのレビュワーがプルリクエストの状態に満足したら、最後のアクションポイントは、`.from_pretrained()` 呼び出しで `from_pt=True` フラグを削除することです。 TensorFlowのウェイトが存在しないため、それらを追加する必要があります!これを行う方法については、以下のセクションを確認してください。 最後に、TensorFlowのウェイトがマージされ、少なくとも3人のレビューアが承認し、すべてのCIチェックが 成功した場合、テストをローカルで最後にもう一度確認してください。 ```bash NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \ py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py ``` そして、あなたのPRをマージします!マイルストーン達成おめでとうございます 🎉 **7. (Optional) デモを作成して世界と共有** オープンソースの最も難しい部分の1つは、発見です。あなたの素晴らしいTensorFlowの貢献が存在することを他のユーザーがどのように知ることができるでしょうか?適切なコミュニケーションです! 📣 コミュニティとモデルを共有する主要な方法は2つあります。 - デモを作成します。これにはGradioデモ、ノートブック、およびモデルを紹介するための他の楽しい方法が含まれます。[コミュニティ駆動のデモ](https://huggingface.co/docs/transformers/community)にノートブックを追加することを強くお勧めします。 - TwitterやLinkedInなどのソーシャルメディアでストーリーを共有します。あなたの仕事に誇りを持ち、コミュニティとあなたの成果を共有するべきです - あなたのモデルは今や世界中の何千人ものエンジニアや研究者によって使用される可能性があります 🌍!私たちはあなたの投稿をリツイートして共同体と共有するお手伝いを喜んでします。 ## Adding TensorFlow weights to 🤗 Hub TensorFlowモデルのアーキテクチャが🤗 Transformersで利用可能な場合、PyTorchの重みをTensorFlowの重みに変換することは簡単です! 以下がその方法です: 1. ターミナルでHugging Faceアカウントにログインしていることを確認してください。コマンド`huggingface-cli login`を使用してログインできます(アクセストークンは[こちら](https://huggingface.co/settings/tokens)で見つけることができます)。 2. `transformers-cli pt-to-tf --model-name foo/bar`というコマンドを実行します。ここで、`foo/bar`は変換したいPyTorchの重みを含むモデルリポジトリの名前です。 3. 上記のコマンドで作成された🤗 Hub PRに`@joaogante`と`@Rocketknight1`をタグ付けします。 それだけです! 🎉 ## Debugging mismatches across ML frameworks 🐛 新しいアーキテクチャを追加したり、既存のアーキテクチャのTensorFlowの重みを作成したりする際、PyTorchとTensorFlow間の不一致についてのエラーに遭遇することがあります。 場合によっては、PyTorchとTensorFlowのモデルアーキテクチャがほぼ同一であるにもかかわらず、不一致を指摘するエラーが表示されることがあります。 どうしてでしょうか? 🤔 まず最初に、なぜこれらの不一致を理解することが重要かについて話しましょう。多くのコミュニティメンバーは🤗 Transformersモデルをそのまま使用し、モデルが期待どおりに動作すると信頼しています。 2つのフレームワーク間で大きな不一致があると、少なくとも1つのフレームワークのリファレンス実装に従ってモデルが動作しないことを意味します。 これにより、モデルは実行されますが性能が低下する可能性があり、静かな失敗が発生する可能性があります。これは、全く実行されないモデルよりも悪いと言えるかもしれません!そのため、モデルのすべての段階でのフレームワークの不一致が`1e-5`未満であることを目指しています。 数値計算の問題と同様に、詳細については細かいところにあります。そして、詳細指向の技術である以上、秘密の要素は忍耐です。 この種の問題に遭遇した場合のお勧めのワークフローは次のとおりです: 1. 不一致の原因を特定します。変換中のモデルにはおそらく特定の点までほぼ同一の内部変数があります。 両方のフレームワークのアーキテクチャに`breakpoint()`ステートメントを配置し、トップダウンの方法で数値変数の値を比較し、問題の原因を見つけます。 2. 問題の原因を特定したら、🤗 Transformersチームと連絡を取りましょう。同様の問題に遭遇したことがあるかもしれず、迅速に解決策を提供できるかもしれません。最終手段として、StackOverflowやGitHubの問題など、人気のあるページをスキャンします。 3. 解決策が見当たらない場合、問題を掘り下げる必要があることを意味します。良いニュースは、問題の原因を特定したことです。したがって、問題のある命令に焦点を当て、モデルの残りを抽象化できます!悪いニュースは、その命令のソース実装に進む必要があることです。一部の場合では、リファレンス実装に問題があるかもしれません - 上流リポジトリで問題を開くのを控えないでください。 🤗 Transformersチームとの話し合いで、不一致を修正することが困難であることが判明することがあります。 出力レイヤーのモデルで不一致が非常に小さい場合(ただし、隠れた状態では大きい可能性がある)、モデルを配布するためにそれを無視することにするかもしれません。 上記で言及した`pt-to-tf` CLIには、重み変換時にエラーメッセージを無視するための`--max-error`フラグがあります。
transformers/docs/source/ja/add_tensorflow_model.md/0
{ "file_path": "transformers/docs/source/ja/add_tensorflow_model.md", "repo_id": "transformers", "token_count": 11998 }
258
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # インストール 使用しているDeep Learningライブラリに対して、🤗 Transformersをインストールしてキャッシュを設定、そしてオプションでオフラインで実行できるように 🤗 Transformersを設定します。 🤗 TransformersはPython 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, Flaxで動作確認しています。 使用しているDeep Learningライブラリに合わせて、以下のインストール方法に従ってください: * [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip)のインストール手順。 * [Flax](https://flax.readthedocs.io/en/latest/)のインストール手順。 ## pipでのインストール 🤗 Transformersを[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。 もし、Pythonの仮想環境に馴染みがない場合は、この[ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)をご覧ください。仮想環境によって異なるプロジェクトの管理がより簡単になり、依存関係間の互換性の問題を回避できます。 まず、プロジェクトディレクトリに仮想環境を作成することから始めましょう: ```bash python -m venv .env ``` 仮想環境を起動しましょう。LinuxとMacOsの場合は以下のコマンドで起動します: ```bash source .env/bin/activate ``` Windowsで仮想環境を起動します ```bash .env/Scripts/activate ``` これで、次のコマンドで🤗 Transformersをインストールする準備が整いました: ```bash pip install transformers ``` CPU対応のみ必要な場合、🤗 TransformersとDeep Learningライブラリを1行でインストールできるようになっていて便利です。例えば、🤗 TransformersとPyTorchを以下のように一緒にインストールできます: ```bash pip install transformers[torch] ``` 🤗 TransformersとTensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 TransformersとFlax: ```bash pip install transformers[flax] ``` 最後に、以下のコマンドを実行することで🤗 Transformersが正しくインストールされているかを確認します。学習済みモデルがダウンロードされます: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` その後、ラベルとスコアが出力されます: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## ソースからのインストール 以下のコマンドでソースから🤗 Transformersをインストールします: ```bash pip install git+https://github.com/huggingface/transformers ``` このコマンドは最新の安定版ではなく、開発における最新の`main`バージョンをインストールします。`main`バージョンは最新の開発状況に対応するのに便利です。例えば、最後の公式リリース以降にバグが修正されたが、新しいリリースがまだ展開されていない場合などです。しかし、これは`main`バージョンが常に安定しているとは限らないことを意味します。私たちは`main`バージョンの運用を維持するよう努め、ほとんどの問題は通常、数時間から1日以内に解決されます。もし問題に遭遇した場合は、より早く修正できるように[Issue](https://github.com/huggingface/transformers/issues)を作成してください! 以下のコマンドを実行して、🤗 Transformersが正しくインストールされているかどうかを確認します: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 編集可能なインストール 必要に応じて、編集可能なインストールをします: * ソースコードの`main`バージョンを使います。 * 🤗 Transformersにコントリビュートし、コードの変更をテストする必要があります。 以下のコマンドでレポジトリをクローンして、🤗 Transformersをインストールします: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 上記のコマンドは、レポジトリをクローンしたフォルダとPythonのライブラリをパスをリンクします。Pythonは通常のライブラリパスに加えて、あなたがクローンしたフォルダの中も見るようになります。例えば、Pythonパッケージが通常、`~/anaconda3/envs/main/lib/python3.7/site-packages/`にインストールされている場合、Pythonはクローンしたフォルダも検索するようになります: `~/transformers/`. <Tip warning={true}> ライブラリーを使い続けたい場合は、transformersフォルダーを保持しつづける必要があります。 </Tip> これで、次のコマンドで簡単にクローンを🤗 Transformersの最新版に更新できます: ```bash cd ~/transformers/ git pull ``` Python環境は次回の実行時に🤗 Transformersの`main`バージョンを見つけるようになります。 ## condaでのインストール `conda-forge`のcondaチャンネルからインストールします: ```bash conda install conda-forge::transformers ``` ## キャッシュの設定 学習済みモデルはダウンロードされ、ローカルにキャッシュされます: `~/.cache/huggingface/hub`. これはシェル環境変数`TRANSFORMERS_CACHE`で指定されるデフォルトのディレクトリです。Windowsでは、デフォルトのディレクトリは`C:\Users\username\.cache\huggingface\hub`になっています。異なるキャッシュディレクトリを指定するために、以下のシェル環境変数を変更することが可能です。優先度は以下の順番に対応します: 1. シェル環境変数 (デフォルト): `HUGGINGFACE_HUB_CACHE` または `TRANSFORMERS_CACHE`. 2. シェル環境変数: `HF_HOME`. 3. シェル環境変数: `XDG_CACHE_HOME` + `/huggingface`. <Tip> もし、以前のバージョンのライブラリを使用していた人で、`PYTORCH_TRANSFORMERS_CACHE`または`PYTORCH_PRETRAINED_BERT_CACHE`を設定していた場合、シェル環境変数`TRANSFORMERS_CACHE`を指定しない限り🤗 Transformersはこれらのシェル環境変数を使用します。 </Tip> ## オフラインモード 🤗 Transformersはローカルファイルのみを使用することでファイアウォールやオフラインの環境でも動作させることができます。この動作を有効にするためには、環境変数`TRANSFORMERS_OFFLINE=1`を設定します。 <Tip> 環境変数`HF_DATASETS_OFFLINE=1`を設定し、オフライントレーニングワークフローに[🤗 Datasets](https://huggingface.co/docs/datasets/)を追加します。 </Tip> 例えば、外部インスタンスに対してファイアウォールで保護された通常のネットワーク上でプログラムを実行する場合、通常以下のようなコマンドで実行することになります: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` オフラインインスタンスでこの同じプログラムを実行します: ```bash HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` このスクリプトは、ローカルファイルのみを検索することが分かっているので、ハングアップしたりタイムアウトを待ったりすることなく実行されるはずです。 ### オフラインで使用するためにモデルやトークナイザーを取得する オフラインで🤗 Transformersを使用するもう1つの方法は、前もってファイルをダウンロードしておき、オフラインで使用する必要があるときにそのローカルパスを指定することです。これには3つの方法があります: * [Model Hub](https://huggingface.co/models)のユーザーインターフェース上から↓アイコンをクリックしてファイルをダウンロードする方法。 ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * [`PreTrainedModel.from_pretrained`]および[`PreTrainedModel.save_pretrained`]のワークフローを使用する方法: 1. [`PreTrainedModel.from_pretrained`]で前もってファイルをダウンロードします: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. [`PreTrainedModel.save_pretrained`]で指定されたディレクトリにファイルを保存しておきます: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. オフラインにある時、[`PreTrainedModel.from_pretrained`]に指定したディレクトリからファイルをリロードします: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * プログラム的に[huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub)ライブラリを用いて、ファイルをダウンロードする方法: 1. 仮想環境に`huggingface_hub`ライブラリをインストールします: ```bash python -m pip install huggingface_hub ``` 2. 指定のパスにファイルをダウンロードするために、[`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub)関数を使用します。例えば、以下のコマンドで、[T0](https://huggingface.co/bigscience/T0_3B)モデルの`config.json`ファイルを指定のパスにダウンロードできます: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` ファイルがダウンロードされ、ローカルにキャッシュされたら、そのローカルパスを指定してファイルをロードして使用します: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Hubに保存されているファイルをダウンロードする方法の詳細については、[How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream)セクションを参照してください。 </Tip>
transformers/docs/source/ja/installation.md/0
{ "file_path": "transformers/docs/source/ja/installation.md", "repo_id": "transformers", "token_count": 4808 }
259
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AltCLIP ## 概要 AltCLIPモデルは、「[AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2)」という論文でZhongzhi Chen、Guang Liu、Bo-Wen Zhang、Fulong Ye、Qinghong Yang、Ledell Wuによって提案されました。AltCLIP(CLIPの言語エンコーダーの代替)は、様々な画像-テキストペアおよびテキスト-テキストペアでトレーニングされたニューラルネットワークです。CLIPのテキストエンコーダーを事前学習済みの多言語テキストエンコーダーXLM-Rに置き換えることで、ほぼ全てのタスクでCLIPに非常に近い性能を得られ、オリジナルのCLIPの能力を多言語理解などに拡張しました。 論文の要旨は以下の通りです: *この研究では、強力なバイリンガルマルチモーダル表現モデルを訓練するための概念的に単純で効果的な方法を提案します。OpenAIによってリリースされたマルチモーダル表現モデルCLIPから開始し、そのテキストエンコーダを事前学習済みの多言語テキストエンコーダXLM-Rに交換し、教師学習と対照学習からなる2段階のトレーニングスキーマを用いて言語と画像の表現を整合させました。幅広いタスクの評価を通じて、我々の方法を検証します。ImageNet-CN、Flicker30k-CN、COCO-CNを含む多くのタスクで新たな最先端の性能を達成しました。さらに、ほぼすべてのタスクでCLIPに非常に近い性能を得ており、これはCLIPのテキストエンコーダを変更するだけで、多言語理解などの拡張を実現できることを示唆しています。* このモデルは[jongjyh](https://huggingface.co/jongjyh)により提供されました。 ## 使用上のヒントと使用例 AltCLIPの使用方法はCLIPに非常に似ています。CLIPとの違いはテキストエンコーダーにあります。私たちはカジュアルアテンションではなく双方向アテンションを使用し、XLM-Rの[CLS]トークンをテキスト埋め込みを表すものとして取ることに留意してください。 AltCLIPはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。AltCLIPはViTのようなTransformerを使用して視覚的特徴を、双方向言語モデルを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 Transformerエンコーダーに画像を与えるには、各画像を固定サイズの重複しないパッチの系列に分割し、それらを線形に埋め込みます。画像全体を表現するための[CLS]トークンが追加されます。著者は絶対位置埋め込みも追加し、結果として得られるベクトルの系列を標準的なTransformerエンコーダーに供給します。[`CLIPImageProcessor`]を使用して、モデルのために画像のサイズ変更(または拡大縮小)と正規化を行うことができます。 [`AltCLIPProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`CLIPImageProcessor`]と[`XLMRobertaTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AltCLIPProcessor`]と[`AltCLIPModel`]を使用して画像-テキスト類似スコアを取得する方法を示しています。 ```python >>> from PIL import Image >>> import requests >>> from transformers import AltCLIPModel, AltCLIPProcessor >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` <Tip> このモデルは`CLIPModel`をベースにしており、オリジナルの[CLIP](clip)と同じように使用してください。 </Tip> ## AltCLIPConfig [[autodoc]] AltCLIPConfig - from_text_vision_configs ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor ## AltCLIPModel [[autodoc]] AltCLIPModel - forward - get_text_features - get_image_features ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel - forward ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel - forward
transformers/docs/source/ja/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/altclip.md", "repo_id": "transformers", "token_count": 2400 }
260
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Big Transfer (BiT) ## Overview BiT モデルは、Alexander Kolesnikov、Lucas Beyer、Xiaohua Zhai、Joan Puigcerver、Jessica Yung、Sylvain Gelly によって [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) で提案されました。ニール・ホールズビー。 BiT は、[ResNet](resnet) のようなアーキテクチャ (具体的には ResNetv2) の事前トレーニングをスケールアップするための簡単なレシピです。この方法により、転移学習が大幅に改善されます。 論文の要約は次のとおりです。 *事前トレーニングされた表現の転送により、サンプル効率が向上し、視覚用のディープ ニューラル ネットワークをトレーニングする際のハイパーパラメーター調整が簡素化されます。大規模な教師ありデータセットでの事前トレーニングと、ターゲット タスクでのモデルの微調整のパラダイムを再検討します。私たちは事前トレーニングをスケールアップし、Big Transfer (BiT) と呼ぶシンプルなレシピを提案します。いくつかの慎重に選択されたコンポーネントを組み合わせ、シンプルなヒューリスティックを使用して転送することにより、20 を超えるデータセットで優れたパフォーマンスを実現します。 BiT は、クラスごとに 1 つのサンプルから合計 100 万のサンプルまで、驚くほど広範囲のデータ領域にわたって良好にパフォーマンスを発揮します。 BiT は、ILSVRC-2012 で 87.5%、CIFAR-10 で 99.4%、19 タスクの Visual Task Adaptation Benchmark (VTAB) で 76.3% のトップ 1 精度を達成しました。小規模なデータセットでは、BiT は ILSVRC-2012 (クラスあたり 10 例) で 76.8%、CIFAR-10 (クラスあたり 10 例) で 97.0% を達成しました。高い転写性能を実現する主要成分を詳細に分析※。 ## Usage tips - BiT モデルは、アーキテクチャの点で ResNetv2 と同等ですが、次の点が異なります: 1) すべてのバッチ正規化層が [グループ正規化](https://arxiv.org/abs/1803.08494) に置き換えられます。 2) [重みの標準化](https://arxiv.org/abs/1903.10520) は畳み込み層に使用されます。著者らは、両方の組み合わせが大きなバッチサイズでのトレーニングに役立ち、重要な効果があることを示しています。 転移学習への影響。 このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。 元のコードは [こちら](https://github.com/google-research/big_transfer) にあります。 ## Resources BiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`BitForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## BitConfig [[autodoc]] BitConfig ## BitImageProcessor [[autodoc]] BitImageProcessor - preprocess ## BitModel [[autodoc]] BitModel - forward ## BitForImageClassification [[autodoc]] BitForImageClassification - forward
transformers/docs/source/ja/model_doc/bit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bit.md", "repo_id": "transformers", "token_count": 1858 }
261
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLVP ## Overview CLVP (Contrastive Language-Voice Pretrained Transformer) モデルは、James Betker によって [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) で提案されました。 論文の要約は次のとおりです。 *近年、画像生成の分野は自己回帰変換器と DDPM の応用によって革命を起こしています。これらのアプローチは、画像生成のプロセスを段階的な確率的プロセスとしてモデル化し、大量のコンピューティングとデータを活用して画像の分布を学習します。パフォーマンスを向上させるこの方法論は、画像に限定される必要はありません。この論文では、画像生成ドメインの進歩を音声合成に適用する方法について説明します。その結果、表現力豊かなマルチ音声テキスト読み上げシステムである TorToise が誕生しました。 このモデルは [Susnato Dhar](https://huggingface.co/susnato) によって提供されました。 元のコードは [ここ](https://github.com/neonbjb/tortoise-tts) にあります。 ## Usage tips 1. CLVP は Tortoise TTS モデルの不可欠な部分です。 2. CLVP を使用して、生成されたさまざまな音声候補を提供されたテキストと比較することができ、最良の音声トークンが拡散モデルに転送されます。 3. Tortoise の使用には、[`ClvpModelForConditionalGeneration.generate()`] メソッドの使用を強くお勧めします。 4. 16 kHz を期待する他のオーディオ モデルとは対照的に、CLVP モデルはオーディオが 22.05 kHz でサンプリングされることを期待していることに注意してください。 ## Brief Explanation: - [`ClvpTokenizer`] はテキスト入力をトークン化し、[`ClvpFeatureExtractor`] は目的のオーディオからログ メル スペクトログラムを抽出します。 - [`ClvpConditioningEncoder`] は、これらのテキスト トークンとオーディオ表現を取得し、テキストとオーディオに基づいて条件付けされた埋め込みに変換します。 - [`ClvpForCausalLM`] は、これらの埋め込みを使用して複数の音声候補を生成します。 - 各音声候補は音声エンコーダ ([`ClvpEncoder`]) を通過してベクトル表現に変換され、テキスト エンコーダ ([`ClvpEncoder`]) はテキスト トークンを同じ潜在空間に変換します。 - 最後に、各音声ベクトルをテキスト ベクトルと比較して、どの音声ベクトルがテキスト ベクトルに最も類似しているかを確認します。 - [`ClvpModelForConditionalGeneration.generate()`] は、上記のすべてのロジックを 1 つのメソッドに圧縮します。 例 : ```python >>> import datasets >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). >>> text = "This is an example text." >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) >>> sample = ds[0]["audio"] >>> # Define processor and model. >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") >>> # Generate processor output and model output. >>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") >>> generated_output = model.generate(**processor_output) ``` ## ClvpConfig [[autodoc]] ClvpConfig - from_sub_model_configs ## ClvpEncoderConfig [[autodoc]] ClvpEncoderConfig ## ClvpDecoderConfig [[autodoc]] ClvpDecoderConfig ## ClvpTokenizer [[autodoc]] ClvpTokenizer - save_vocabulary ## ClvpFeatureExtractor [[autodoc]] ClvpFeatureExtractor - __call__ ## ClvpProcessor [[autodoc]] ClvpProcessor - __call__ - decode - batch_decode ## ClvpModelForConditionalGeneration [[autodoc]] ClvpModelForConditionalGeneration - forward - generate - get_text_features - get_speech_features ## ClvpForCausalLM [[autodoc]] ClvpForCausalLM ## ClvpModel [[autodoc]] ClvpModel ## ClvpEncoder [[autodoc]] ClvpEncoder ## ClvpDecoder [[autodoc]] ClvpDecoder
transformers/docs/source/ja/model_doc/clvp.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clvp.md", "repo_id": "transformers", "token_count": 2038 }
262
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeiT ## Overview DeiT モデルは、Hugo Touvron、Matthieu Cord、Matthijs Douze、Francisco Massa、Alexandre Sablayrolles, Hervé Jégou.によって [Training data-efficient image Transformers & distillation through attention](https://arxiv.org/abs/2012.12877) で提案されました。 サブレイロール、エルヴェ・ジェグー。 [Dosovitskiy et al., 2020](https://arxiv.org/abs/2010.11929) で紹介された [Vision Transformer (ViT)](vit) は、既存の畳み込みニューラルと同等、またはそれを上回るパフォーマンスを発揮できることを示しました。 Transformer エンコーダ (BERT のような) を使用したネットワーク。ただし、その論文で紹介された ViT モデルには、次のトレーニングが必要でした。 外部データを使用して、数週間にわたる高価なインフラストラクチャ。 DeiT (データ効率の高い画像変換器) はさらに優れています 画像分類用に効率的にトレーニングされたトランスフォーマーにより、必要なデータとコンピューティング リソースがはるかに少なくなります。 オリジナルの ViT モデルとの比較。 論文の要約は次のとおりです。 *最近、純粋に注意に基づくニューラル ネットワークが、画像などの画像理解タスクに対処できることが示されました。 分類。ただし、これらのビジュアル トランスフォーマーは、 インフラストラクチャが高価であるため、その採用が制限されています。この作業では、コンボリューションフリーの競争力のあるゲームを作成します。 Imagenet のみでトレーニングしてトランスフォーマーを作成します。 1 台のコンピューターで 3 日以内にトレーニングを行います。私たちの基準となるビジョン トランス (86M パラメータ) は、外部なしで ImageNet 上で 83.1% (単一クロップ評価) のトップ 1 の精度を達成します。 データ。さらに重要なのは、トランスフォーマーに特有の教師と生徒の戦略を導入することです。蒸留に依存している 学生が注意を払って教師から学ぶことを保証するトークン。私たちはこのトークンベースに興味を示します 特に convnet を教師として使用する場合。これにより、convnet と競合する結果を報告できるようになります。 Imagenet (最大 85.2% の精度が得られます) と他のタスクに転送するときの両方で。私たちはコードを共有し、 モデル。* このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。このモデルの TensorFlow バージョンは、[amyeroberts](https://huggingface.co/amyeroberts) によって追加されました。 ## Usage tips - ViT と比較して、DeiT モデルはいわゆる蒸留トークンを使用して教師から効果的に学習します (これは、 DeiT 論文は、ResNet のようなモデルです)。蒸留トークンは、バックプロパゲーションを通じて、と対話することによって学習されます。 セルフアテンション層を介したクラス ([CLS]) とパッチ トークン。 - 抽出されたモデルを微調整するには 2 つの方法があります。(1) 上部に予測ヘッドを配置するだけの古典的な方法。 クラス トークンの最終的な非表示状態を抽出し、蒸留シグナルを使用しない、または (2) 両方の 予測ヘッドはクラス トークンの上と蒸留トークンの上にあります。その場合、[CLS] 予測は head は、head の予測とグラウンド トゥルース ラベル間の通常のクロスエントロピーを使用してトレーニングされます。 蒸留予測ヘッドは、硬蒸留 (予測と予測の間のクロスエントロピー) を使用してトレーニングされます。 蒸留ヘッドと教師が予測したラベル)。推論時に、平均予測を取得します。 最終的な予測として両頭の間で。 (2) は「蒸留による微調整」とも呼ばれます。 下流のデータセットですでに微調整されている教師。モデル的には (1) に相当します。 [`DeiTForImageClassification`] と (2) に対応します。 [`DeiTForImageClassificationWithTeacher`]。 - 著者らは (2) についてもソフト蒸留を試みたことに注意してください (この場合、蒸留予測ヘッドは 教師のソフトマックス出力に一致するように KL ダイバージェンスを使用してトレーニングしました)が、ハード蒸留が最良の結果をもたらしました。 - リリースされたすべてのチェックポイントは、ImageNet-1k のみで事前トレーニングおよび微調整されました。外部データは使用されませんでした。これは JFT-300M データセット/Imagenet-21k などの外部データを使用した元の ViT モデルとは対照的です。 事前トレーニング。 - DeiT の作者は、より効率的にトレーニングされた ViT モデルもリリースしました。これは、直接プラグインできます。 [`ViTModel`] または [`ViTForImageClassification`]。データなどのテクニック はるかに大規模なデータセットでのトレーニングをシミュレートするために、拡張、最適化、正則化が使用されました。 (ただし、事前トレーニングには ImageNet-1k のみを使用します)。 4 つのバリエーション (3 つの異なるサイズ) が利用可能です。 *facebook/deit-tiny-patch16-224*、*facebook/deit-small-patch16-224*、*facebook/deit-base-patch16-224* および *facebook/deit-base-patch16-384*。以下を行うには [`DeiTImageProcessor`] を使用する必要があることに注意してください。 モデル用の画像を準備します。 ## Resources DeiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`DeiTForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) それに加えて: - [`DeiTForMaskedImageModeling`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining) でサポートされています。 ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## DeiTConfig [[autodoc]] DeiTConfig ## DeiTFeatureExtractor [[autodoc]] DeiTFeatureExtractor - __call__ ## DeiTImageProcessor [[autodoc]] DeiTImageProcessor - preprocess <frameworkcontent> <pt> ## DeiTModel [[autodoc]] DeiTModel - forward ## DeiTForMaskedImageModeling [[autodoc]] DeiTForMaskedImageModeling - forward ## DeiTForImageClassification [[autodoc]] DeiTForImageClassification - forward ## DeiTForImageClassificationWithTeacher [[autodoc]] DeiTForImageClassificationWithTeacher - forward </pt> <tf> ## TFDeiTModel [[autodoc]] TFDeiTModel - call ## TFDeiTForMaskedImageModeling [[autodoc]] TFDeiTForMaskedImageModeling - call ## TFDeiTForImageClassification [[autodoc]] TFDeiTForImageClassification - call ## TFDeiTForImageClassificationWithTeacher [[autodoc]] TFDeiTForImageClassificationWithTeacher - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deit.md", "repo_id": "transformers", "token_count": 3682 }
263
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ このファイルはMarkdown形式ですが、Hugging Faceのドキュメントビルダー向けに特定の構文を含んでいるため、 通常のMarkdownビューアーで正しく表示されないことに注意してください。 --> # Quick tour [[open-in-colab]] 🤗 Transformersを使い始めましょう! 開発者であろうと、日常的なユーザーであろうと、このクイックツアーは 初めて始めるのを支援し、[`pipeline`]を使った推論方法、[AutoClass](./model_doc/auto)で事前学習済みモデルとプリプロセッサをロードする方法、 そしてPyTorchまたはTensorFlowで素早くモデルをトレーニングする方法を示します。 初心者の場合、ここで紹介されたコンセプトの詳細な説明を提供する チュートリアルまたは[コース](https://huggingface.co/course/chapter1/1)を次に参照することをお勧めします。 始める前に、必要なライブラリがすべてインストールされていることを確認してください: ```bash !pip install transformers datasets ``` あなたはまた、好きな機械学習フレームワークをインストールする必要があります: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> [`pipeline`] は、事前学習済みモデルを推論に最も簡単で高速な方法です。 [`pipeline`] を使用することで、さまざまなモダリティにわたる多くのタスクに対して即座に使用できます。 いくつかのタスクは以下の表に示されています: <Tip> 使用可能なタスクの完全な一覧については、[pipeline API リファレンス](./main_classes/pipelines)を確認してください。 </Tip> | **タスク** | **説明** | **モダリティ** | **パイプライン識別子** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | テキスト分類 | テキストのシーケンスにラベルを割り当てる | NLP | pipeline(task="sentiment-analysis") | | テキスト生成 | プロンプトを指定してテキストを生成する | NLP | pipeline(task="text-generation") | | 要約 | テキストまたはドキュメントの要約を生成する | NLP | pipeline(task="summarization") | | 画像分類 | 画像にラベルを割り当てる | コンピュータビジョン | pipeline(task="image-classification") | | 画像セグメンテーション | 画像の各個別のピクセルにラベルを割り当てる(セマンティック、パノプティック、およびインスタンスセグメンテーションをサポート) | コンピュータビジョン | pipeline(task="image-segmentation") | | オブジェクト検出 | 画像内のオブジェクトの境界ボックスとクラスを予測する | コンピュータビジョン | pipeline(task="object-detection") | | オーディオ分類 | オーディオデータにラベルを割り当てる | オーディオ | pipeline(task="audio-classification") | | 自動音声認識 | 音声をテキストに変換する | オーディオ | pipeline(task="automatic-speech-recognition") | | ビジュアルクエスチョン応答 | 画像と質問が与えられた場合に、画像に関する質問に回答する | マルチモーダル | pipeline(task="vqa") | | ドキュメントクエスチョン応答 | ドキュメントと質問が与えられた場合に、ドキュメントに関する質問に回答する | マルチモーダル | pipeline(task="document-question-answering") | | 画像キャプショニング | 与えられた画像にキャプションを生成する | マルチモーダル | pipeline(task="image-to-text") | まず、[`pipeline`] のインスタンスを作成し、使用したいタスクを指定します。 このガイドでは、センチメント分析のために [`pipeline`] を使用する例を示します: ```python >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` [`pipeline`]は、感情分析のためのデフォルトの[事前学習済みモデル](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english)とトークナイザをダウンロードしてキャッシュし、使用できるようになります。 これで、`classifier`を対象のテキストに使用できます: ```python >>> classifier("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") [{'label': 'POSITIVE', 'score': 0.9998}] ``` 複数の入力がある場合は、[`pipeline`]に入力をリストとして渡して、辞書のリストを返します: ```py >>> results = classifier(["🤗 Transformersライブラリをご紹介できて非常に嬉しいです。", "嫌いにならないでほしいです。"]) >>> for result in results: ... print(f"label: {result['label']}, スコア: {round(result['score'], 4)}") label: POSITIVE, スコア: 0.9998 label: NEGATIVE, スコア: 0.5309 ``` [`pipeline`]は、任意のタスクに対してデータセット全体を繰り返し処理することもできます。この例では、自動音声認識をタスクとして選びましょう: ```python >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` オーディオデータセットをロードします(詳細については🤗 Datasets [クイックスタート](https://huggingface.co/docs/datasets/quickstart#audio)を参照してください)。 たとえば、[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードします: ```python >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` データセットのサンプリングレートが[`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h)がトレーニングされたサンプリングレートと一致することを確認してください: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` "audio"列を呼び出すと、オーディオファイルは自動的にロードされ、リサンプリングされます。最初の4つのサンプルから生の波形配列を抽出し、それをパイプラインにリストとして渡します。 ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` 大規模なデータセットで、入力が大きい場合(音声や画像など)、すべての入力をメモリに読み込む代わりに、リストではなくジェネレータを渡すことがお勧めです。詳細については[パイプラインAPIリファレンス](./main_classes/pipelines)を参照してください。 ### Use another model and tokenizer in the pipeline [`pipeline`]は[Hub](https://huggingface.co/models)からの任意のモデルを収容でき、他のユースケースに[`pipeline`]を適応させることが容易です。たとえば、フランス語のテキストを処理できるモデルが必要な場合、Hubのタグを使用して適切なモデルをフィルタリングできます。トップのフィルタリングされた結果は、フランス語のテキストに使用できる感情分析用に調整された多言語の[BERTモデル](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment)を返します: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> [`AutoModelForSequenceClassification`]と[`AutoTokenizer`]を使用して事前学習済みモデルとそれに関連するトークナイザをロードします(次のセクションで`AutoClass`について詳しく説明します): ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> 以下のコードは、[`TFAutoModelForSequenceClassification`]および[`AutoTokenizer`]を使用して、事前学習済みモデルとその関連するトークナイザをロードする方法を示しています(`TFAutoClass`については次のセクションで詳しく説明します): ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> 指定したモデルとトークナイザを[`pipeline`]に設定し、今度はフランス語のテキストに`classifier`を適用できます: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` もし、あなたのユースケースに適したモデルが見つからない場合、事前学習済みモデルをあなたのデータでファインチューニングする必要があります。 ファインチューニングの方法については、[ファインチューニングのチュートリアル](./training)をご覧ください。 最後に、ファインチューニングした事前学習済みモデルを共有し、コミュニティと共有ハブで共有することを検討してください。これにより、機械学習を民主化する手助けができます! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> [`AutoModelForSequenceClassification`] および [`AutoTokenizer`] クラスは、上記で使用した [`pipeline`] を駆動するために協力して動作します。 [AutoClass](./model_doc/auto) は、事前学習済みモデルのアーキテクチャをその名前またはパスから自動的に取得するショートカットです。 適切な `AutoClass` を選択し、それに関連する前処理クラスを選択するだけで済みます。 前のセクションからの例に戻り、`AutoClass` を使用して [`pipeline`] の結果を再現する方法を見てみましょう。 ### AutoTokenizer トークナイザはテキストをモデルの入力として使用できる数値の配列に前処理する役割を果たします。 トークナイゼーションプロセスには、単語をどのように分割するかや、単語をどのレベルで分割するかといった多くのルールがあります (トークナイゼーションについての詳細は [トークナイザサマリー](./tokenizer_summary) をご覧ください)。 最も重要なことは、モデルが事前学習済みになったときと同じトークナイゼーションルールを使用するために、同じモデル名でトークナイザをインスタンス化する必要があることです。 [`AutoTokenizer`] を使用してトークナイザをロードします: ```python >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Pass your text to the tokenizer: ```python >>> encoding = tokenizer("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` トークナイザは、次の情報を含む辞書を返します: - [input_ids](./glossary#input-ids): トークンの数値表現。 - [attention_mask](.glossary#attention-mask): どのトークンにアテンションを向けるかを示します。 トークナイザはまた、入力のリストを受け入れ、一様な長さのバッチを返すためにテキストをパディングおよび切り詰めることができます。 <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["🤗 Transformersライブラリをお見せできて非常に嬉しいです。", "嫌いではないことを願っています。"], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> [前処理](./preprocessing)チュートリアルをご覧いただき、トークナイゼーションの詳細や、[`AutoImageProcessor`]、[`AutoFeatureExtractor`]、[`AutoProcessor`]を使用して画像、オーディオ、およびマルチモーダル入力を前処理する方法について詳しく説明されているページもご覧ください。 </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformersは事前学習済みインスタンスを簡単に統一的にロードする方法を提供します。 これは、[`AutoTokenizer`]をロードするのと同じように[`AutoModel`]をロードできることを意味します。 タスクに適した[`AutoModel`]を選択する以外の違いはありません。 テキスト(またはシーケンス)分類の場合、[`AutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`]クラスでサポートされているタスクに関する詳細については、[タスクの概要](./task_summary)を参照してください。 </Tip> 今、前処理済みのバッチを直接モデルに渡します。辞書を展開するだけで、`**`を追加する必要があります: ```python >>> pt_outputs = pt_model(**pt_batch) ``` モデルは、`logits`属性に最終的なアクティベーションを出力します。 `logits`にsoftmax関数を適用して確率を取得します: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformersは事前学習済みインスタンスをロードするためのシンプルで統一された方法を提供します。 これは、[`TFAutoModel`]を[`AutoTokenizer`]をロードするのと同じようにロードできることを意味します。 唯一の違いは、タスクに適した[`TFAutoModel`]を選択することです。 テキスト(またはシーケンス)分類の場合、[`TFAutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> 詳細については、[`AutoModel`]クラスでサポートされているタスクに関する情報は、[タスクの概要](./task_summary)を参照してください。 </Tip> 次に、前処理済みのバッチを直接モデルに渡します。テンソルをそのまま渡すことができます: ```python >>> tf_outputs = tf_model(tf_batch) ``` モデルは`logits`属性に最終的なアクティベーションを出力します。`logits`にソフトマックス関数を適用して確率を取得します: ```python >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> 🤗 Transformersのすべてのモデル(PyTorchまたはTensorFlow)は、最終的な活性化関数(softmaxなど)*前*のテンソルを出力します。 最終的な活性化関数は、しばしば損失と結合されているためです。モデルの出力は特別なデータクラスであり、その属性はIDEで自動補完されます。 モデルの出力は、タプルまたは辞書のように動作します(整数、スライス、または文字列でインデックスを付けることができます)。 この場合、Noneである属性は無視されます。 </Tip> ### Save a Model <frameworkcontent> <pt> モデルをファインチューニングしたら、[`PreTrainedModel.save_pretrained`]を使用してトークナイザと共に保存できます: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` 再びモデルを使用する準備ができたら、[`PreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> モデルをファインチューニングしたら、そのトークナイザを使用してモデルを保存できます。[`TFPreTrainedModel.save_pretrained`]を使用します: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` モデルを再度使用する準備ができたら、[`TFPreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> 🤗 Transformersの特に素晴らしい機能の一つは、モデルを保存し、それをPyTorchモデルまたはTensorFlowモデルとして再ロードできることです。 `from_pt`または`from_tf`パラメータを使用してモデルをフレームワーク間で変換できます: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## Custom model builds モデルを構築方法を変更するには、モデルの設定クラスを変更できます。設定はモデルの属性を指定します。例えば、隠れ層の数やアテンションヘッドの数などがこれに含まれます。カスタム設定クラスからモデルを初期化する際には、ゼロから始めます。モデルの属性はランダムに初期化され、有意義な結果を得るためにモデルをトレーニングする必要があります。 最初に[`AutoConfig`]をインポートし、変更したい事前学習済みモデルをロードします。[`AutoConfig.from_pretrained`]内で、変更したい属性(例:アテンションヘッドの数)を指定できます: ```python >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> [`AutoModel.from_config`]を使用してカスタム設定からモデルを作成します: ```python >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> カスタム構成からモデルを作成するには、[`TFAutoModel.from_config`]を使用します: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> [カスタムアーキテクチャを作成](./create_a_model)ガイドを参照して、カスタム構成の詳細情報を確認してください。 ## Trainer - PyTorch向けの最適化されたトレーニングループ すべてのモデルは標準の[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)であるため、通常のトレーニングループで使用できます。 独自のトレーニングループを作成できますが、🤗 TransformersはPyTorch向けに[`Trainer`]クラスを提供しており、基本的なトレーニングループに加えて、 分散トレーニング、混合精度などの機能の追加を行っています。 タスクに応じて、通常は[`Trainer`]に以下のパラメータを渡します: 1. [`PreTrainedModel`]または[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)から始めます: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`]には、変更できるモデルのハイパーパラメータが含まれており、学習率、バッチサイズ、トレーニングエポック数などが変更できます。指定しない場合、デフォルト値が使用されます: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. データセットをロードする: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. データセットをトークン化するための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` その後、[`~datasets.Dataset.map`]を使用してデータセット全体に適用します: ```python >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. データセットからの例のバッチを作成するための [`DataCollatorWithPadding`]: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` 次に、これらのクラスを[`Trainer`]にまとめます: ```python >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` 訓練を開始する準備ができたら、[`~Trainer.train`]を呼び出してトレーニングを開始します: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> 翻訳や要約など、シーケンス間モデルを使用するタスクには、代わりに[`Seq2SeqTrainer`]と[`Seq2SeqTrainingArguments`]クラスを使用してください。 </Tip> [`Trainer`]内のメソッドをサブクラス化することで、トレーニングループの動作をカスタマイズできます。これにより、損失関数、オプティマイザ、スケジューラなどの機能をカスタマイズできます。サブクラス化できるメソッドの一覧については、[`Trainer`]リファレンスをご覧ください。 トレーニングループをカスタマイズする別の方法は、[Callbacks](./main_classes/callbacks)を使用することです。コールバックを使用して他のライブラリと統合し、トレーニングループを監視して進捗状況を報告したり、トレーニングを早期に停止したりできます。コールバックはトレーニングループ自体には何も変更を加えません。損失関数などのカスタマイズを行う場合は、[`Trainer`]をサブクラス化する必要があります。 ## Train with TensorFlow すべてのモデルは標準の[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)であるため、[Keras](https://keras.io/) APIを使用してTensorFlowでトレーニングできます。 🤗 Transformersは、データセットを`tf.data.Dataset`として簡単にロードできるようにする[`~TFPreTrainedModel.prepare_tf_dataset`]メソッドを提供しており、Kerasの[`compile`](https://keras.io/api/models/model_training_apis/#compile-method)および[`fit`](https://keras.io/api/models/model_training_apis/#fit-method)メソッドを使用してトレーニングをすぐに開始できます。 1. [`TFPreTrainedModel`]または[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)から始めます: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. データセットをトークナイズするための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. [`~datasets.Dataset.map`]を使用してデータセット全体にトークナイザを適用し、データセットとトークナイザを[`~TFPreTrainedModel.prepare_tf_dataset`]に渡します。バッチサイズを変更し、データセットをシャッフルすることもできます。 ```python >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. 準備ができたら、`compile`と`fit`を呼び出してトレーニングを開始できます。 Transformersモデルはすべてデフォルトのタスクに関連する損失関数を持っているため、指定しない限り、損失関数を指定する必要はありません。 ```python >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # 損失関数の引数は不要です! >>> model.fit(tf ``` ## What's next? 🤗 Transformersのクイックツアーを完了したら、ガイドをチェックして、カスタムモデルの作成、タスクのためのファインチューニング、スクリプトを使用したモデルのトレーニングなど、より具体的なことを学ぶことができます。🤗 Transformersのコアコンセプトについてもっと詳しく知りたい場合は、コンセプチュアルガイドを読んでみてください!
transformers/docs/source/ja/quicktour.md/0
{ "file_path": "transformers/docs/source/ja/quicktour.md", "repo_id": "transformers", "token_count": 13065 }
264
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Object detection [[open-in-colab]] オブジェクト検出は、画像内のインスタンス (人間、建物、車など) を検出するコンピューター ビジョン タスクです。物体検出モデルは画像を入力および出力として受け取ります 検出されたオブジェクトの境界ボックスと関連するラベルの座標。画像には複数のオブジェクトを含めることができます。 それぞれに独自の境界ボックスとラベルがあり (例: 車と建物を持つことができます)、各オブジェクトは 画像のさまざまな部分に存在する必要があります (たとえば、画像には複数の車が含まれている可能性があります)。 このタスクは、歩行者、道路標識、信号機などを検出するために自動運転で一般的に使用されます。 他のアプリケーションには、画像内のオブジェクトのカウント、画像検索などが含まれます。 このガイドでは、次の方法を学習します。 1. Finetune [DETR](https://huggingface.co/docs/transformers/model_doc/detr)、畳み込みアルゴリズムを組み合わせたモデル [CPPE-5](https://huggingface.co/datasets/cppe-5) 上のエンコーダー/デコーダー トランスフォーマーを備えたバックボーン データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このチュートリアルで説明するタスクは、次のモデル アーキテクチャでサポートされています。 <!--This tip is automatically generated by `make fix-copies`, do not fill manually!--> [Conditional DETR](../model_doc/conditional_detr), [Deformable DETR](../model_doc/deformable_detr), [DETA](../model_doc/deta), [DETR](../model_doc/detr), [Table Transformer](../model_doc/table-transformer), [YOLOS](../model_doc/yolos) <!--End of the generated tip--> </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q datasets transformers evaluate timm albumentations ``` 🤗 データセットを使用して Hugging Face Hub からデータセットをロードし、🤗 トランスフォーマーを使用してモデルをトレーニングします。 データを増強するための`albumentations`。 `timm` は現在、DETR モデルの畳み込みバックボーンをロードするために必要です。 モデルをコミュニティと共有することをお勧めします。 Hugging Face アカウントにログインして、ハブにアップロードします。 プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load the CPPE-5 dataset [CPPE-5 データセット](https://huggingface.co/datasets/cppe-5) には、次の画像が含まれています。 新型コロナウイルス感染症のパンデミックにおける医療用個人保護具 (PPE) を識別する注釈。 データセットをロードすることから始めます。 ```py >>> from datasets import load_dataset >>> cppe5 = load_dataset("cppe-5") >>> cppe5 DatasetDict({ train: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 1000 }) test: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 29 }) }) ``` このデータセットには、1000 枚の画像を含むトレーニング セットと 29 枚の画像を含むテスト セットがすでに付属していることがわかります。 データに慣れるために、例がどのようなものかを調べてください。 ```py >>> cppe5["train"][0] {'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7F9EC9E77C10>, 'width': 943, 'height': 663, 'objects': {'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0]}} ``` データセット内の例には次のフィールドがあります。 - `image_id`: サンプルの画像ID - `image`: 画像を含む `PIL.Image.Image` オブジェクト - `width`: 画像の幅 - `height`: 画像の高さ - `objects`: 画像内のオブジェクトの境界ボックスのメタデータを含む辞書: - `id`: アノテーションID - `area`: 境界ボックスの領域 - `bbox`: オブジェクトの境界ボックス ([COCO 形式](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) ) - `category`: オブジェクトのカテゴリー。可能な値には、`Coverall (0)`、`Face_Shield (1)`、`Gloves (2)`、`Goggles (3)`、および `Mask (4)` が含まれます。 `bbox`フィールドが COCO 形式に従っていることに気づくかもしれません。これは DETR モデルが予期する形式です。 ただし、「オブジェクト」内のフィールドのグループ化は、DETR が必要とする注釈形式とは異なります。あなたはするであろう このデータをトレーニングに使用する前に、いくつかの前処理変換を適用する必要があります。 データをさらに深く理解するには、データセット内の例を視覚化します。 ```py >>> import numpy as np >>> import os >>> from PIL import Image, ImageDraw >>> image = cppe5["train"][0]["image"] >>> annotations = cppe5["train"][0]["objects"] >>> draw = ImageDraw.Draw(image) >>> categories = cppe5["train"].features["objects"].feature["category"].names >>> id2label = {index: x for index, x in enumerate(categories, start=0)} >>> label2id = {v: k for k, v in id2label.items()} >>> for i in range(len(annotations["id"])): ... box = annotations["bbox"][i] ... class_idx = annotations["category"][i] ... x, y, w, h = tuple(box) ... draw.rectangle((x, y, x + w, y + h), outline="red", width=1) ... draw.text((x, y), id2label[class_idx], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/TdaqPJO.png" alt="CPPE-5 Image Example"/> </div> 関連付けられたラベルを使用して境界ボックスを視覚化するには、データセットのメタデータからラベルを取得します。 `category`フィールド。 また、ラベル ID をラベル クラスにマッピングする辞書 (`id2label`) やその逆 (`label2id`) を作成することもできます。 これらは、後でモデルをセットアップするときに使用できます。これらのマップを含めると、共有した場合に他の人がモデルを再利用できるようになります。 ハグフェイスハブに取り付けます。 データに慣れるための最後のステップとして、潜在的な問題がないかデータを調査します。データセットに関する一般的な問題の 1 つは、 オブジェクト検出は、画像の端を越えて「伸びる」境界ボックスです。このような「暴走」境界ボックスは、 トレーニング中にエラーが発生するため、この段階で対処する必要があります。このデータセットには、この問題に関する例がいくつかあります。 このガイドでは内容をわかりやすくするために、これらの画像をデータから削除します。 ```py >>> remove_idx = [590, 821, 822, 875, 876, 878, 879] >>> keep = [i for i in range(len(cppe5["train"])) if i not in remove_idx] >>> cppe5["train"] = cppe5["train"].select(keep) ``` ## Preprocess the data モデルを微調整するには、事前トレーニングされたモデルに使用されるアプローチと正確に一致するように、使用する予定のデータを前処理する必要があります。 [`AutoImageProcessor`] は、画像データを処理して `pixel_values`、`pixel_mask`、および DETR モデルをトレーニングできる「ラベル」。画像プロセッサには、心配する必要のないいくつかの属性があります。 - `image_mean = [0.485, 0.456, 0.406 ]` - `image_std = [0.229, 0.224, 0.225]` これらは、モデルの事前トレーニング中に画像を正規化するために使用される平均と標準偏差です。これらの価値観は非常に重要です 事前にトレーニングされた画像モデルを推論または微調整するときに複製します。 微調整するモデルと同じチェックポイントからイメージ プロセッサをインスタンス化します。 ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "facebook/detr-resnet-50" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` 画像を`image_processor`に渡す前に、2 つの前処理変換をデータセットに適用します。 - 画像の拡張 - DETR の期待に応えるための注釈の再フォーマット まず、モデルがトレーニング データにオーバーフィットしないようにするために、任意のデータ拡張ライブラリを使用して画像拡張を適用できます。ここでは[Albumentations](https://albumentations.ai/docs/)を使用します... このライブラリは、変換が画像に影響を与え、それに応じて境界ボックスを更新することを保証します。 🤗 データセット ライブラリのドキュメントには、詳細な [物体検出用に画像を拡張する方法に関するガイド](https://huggingface.co/docs/datasets/object_detection) が記載されています。 例としてまったく同じデータセットを使用しています。ここでも同じアプローチを適用し、各画像のサイズを (480, 480) に変更します。 水平に反転して明るくします。 ```py >>> import albumentations >>> import numpy as np >>> import torch >>> transform = albumentations.Compose( ... [ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], ... bbox_params=albumentations.BboxParams(format="coco", label_fields=["category"]), ... ) ``` `image_processor` は、注釈が次の形式であることを期待します: `{'image_id': int, 'annotations': List[Dict]}`, ここで、各辞書は COCO オブジェクトの注釈です。 1 つの例として、注釈を再フォーマットする関数を追加してみましょう。 ```py >>> def formatted_anns(image_id, category, area, bbox): ... annotations = [] ... for i in range(0, len(category)): ... new_ann = { ... "image_id": image_id, ... "category_id": category[i], ... "isCrowd": 0, ... "area": area[i], ... "bbox": list(bbox[i]), ... } ... annotations.append(new_ann) ... return annotations ``` これで、画像と注釈の変換を組み合わせてサンプルのバッチで使用できるようになりました。 ```py >>> # transforming a batch >>> def transform_aug_ann(examples): ... image_ids = examples["image_id"] ... images, bboxes, area, categories = [], [], [], [] ... for image, objects in zip(examples["image"], examples["objects"]): ... image = np.array(image.convert("RGB"))[:, :, ::-1] ... out = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) ... area.append(objects["area"]) ... images.append(out["image"]) ... bboxes.append(out["bboxes"]) ... categories.append(out["category"]) ... targets = [ ... {"image_id": id_, "annotations": formatted_anns(id_, cat_, ar_, box_)} ... for id_, cat_, ar_, box_ in zip(image_ids, categories, area, bboxes) ... ] ... return image_processor(images=images, annotations=targets, return_tensors="pt") ``` 🤗 Datasets [`~datasets.Dataset.with_transform`] メソッドを使用して、この前処理関数をデータセット全体に適用します。この方法が適用されるのは、 データセットの要素を読み込むときに、その場で変換します。 この時点で、データセットの例が変換後にどのようになるかを確認できます。テンソルが表示されるはずです `pixel_values`、テンソルと `pixel_mask`、および `labels` を使用します。 ```py >>> cppe5["train"] = cppe5["train"].with_transform(transform_aug_ann) >>> cppe5["train"][15] {'pixel_values': tensor([[[ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9638, -1.9638, -1.9638], ..., [-1.5699, -1.5699, -1.5699, ..., -1.9980, -1.9980, -1.9980], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809]], [[ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8256, -1.8256, -1.8256], ..., [-1.3179, -1.3179, -1.3179, ..., -1.8606, -1.8606, -1.8606], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431]], [[ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6302, -1.6302, -1.6302], ..., [-1.0201, -1.0201, -1.0201, ..., -1.5604, -1.5604, -1.5604], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430]]]), 'pixel_mask': tensor([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], ..., [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]]), 'labels': {'size': tensor([800, 800]), 'image_id': tensor([756]), 'class_labels': tensor([4]), 'boxes': tensor([[0.7340, 0.6986, 0.3414, 0.5944]]), 'area': tensor([519544.4375]), 'iscrowd': tensor([0]), 'orig_size': tensor([480, 480])}} ``` 個々の画像を正常に拡張し、それらの注釈を準備しました。ただし、前処理はそうではありません。 まだ完成しています。最後のステップでは、画像をバッチ処理するためのカスタム `collat​​e_fn` を作成します。 画像 (現在は `pixel_values`) をバッチ内の最大の画像にパディングし、対応する `pixel_mask` を作成します どのピクセルが実数 (1) で、どのピクセルがパディング (0) であるかを示します。 ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Training the DETR model 前のセクションで重労働のほとんどを完了したので、モデルをトレーニングする準備が整いました。 このデータセット内の画像は、サイズを変更した後でも依然として非常に大きいです。これは、このモデルを微調整すると、 少なくとも 1 つの GPU が必要です。 トレーニングには次の手順が含まれます。 1. 前処理と同じチェックポイントを使用して、[`AutoModelForObjectDetection`] でモデルを読み込みます。 2. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 3. トレーニング引数をモデル、データセット、画像プロセッサ、データ照合器とともに [`Trainer`] に渡します。 4. [`~Trainer.train`] を呼び出してモデルを微調整します。 前処理に使用したのと同じチェックポイントからモデルをロードするときは、必ず`label2id`を渡してください。 および `id2label` マップは、以前にデータセットのメタデータから作成したものです。さらに、`ignore_mismatched_sizes=True`を指定して、既存の分類頭部を新しい分類頭部に置き換えます。 ```py >>> from transformers import AutoModelForObjectDetection >>> model = AutoModelForObjectDetection.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ignore_mismatched_sizes=True, ... ) ``` [`TrainingArguments`] で、`output_dir` を使用してモデルの保存場所を指定し、必要に応じてハイパーパラメーターを構成します。 画像列が削除されるため、未使用の列を削除しないことが重要です。画像列がないと、 `pixel_values` を作成できません。このため、`remove_unused_columns`を`False`に設定します。 ハブにプッシュしてモデルを共有したい場合は、`push_to_hub` を `True` に設定します (Hugging にサインインする必要があります) 顔に向かってモデルをアップロードします)。 ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="detr-resnet-50_finetuned_cppe5", ... per_device_train_batch_size=8, ... num_train_epochs=10, ... fp16=True, ... save_steps=200, ... logging_steps=50, ... learning_rate=1e-5, ... weight_decay=1e-4, ... save_total_limit=2, ... remove_unused_columns=False, ... push_to_hub=True, ... ) ``` 最後に、すべてをまとめて、[`~transformers.Trainer.train`] を呼び出します。 ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=collate_fn, ... train_dataset=cppe5["train"], ... tokenizer=image_processor, ... ) >>> trainer.train() ``` `training_args`で`push_to_hub`を`True`に設定した場合、トレーニング チェックポイントは ハグフェイスハブ。トレーニングが完了したら、[`~transformers.Trainer.push_to_hub`] メソッドを呼び出して、最終モデルもハブにプッシュします。 ```py >>> trainer.push_to_hub() ``` ## Evaluate 物体検出モデルは通常、一連の <a href="https://cocodataset.org/#detection-eval">COCO スタイルの指標</a>を使用して評価されます。 既存のメトリクス実装のいずれかを使用できますが、ここでは`torchvision`のメトリクス実装を使用して最終的なメトリクスを評価します。 ハブにプッシュしたモデル。 `torchvision`エバリュエーターを使用するには、グラウンド トゥルース COCO データセットを準備する必要があります。 COCO データセットを構築するための API データを特定の形式で保存する必要があるため、最初に画像と注釈をディスクに保存する必要があります。と同じように トレーニング用にデータを準備するとき、`cppe5["test"]` からの注釈をフォーマットする必要があります。ただし、画像 そのままでいるべきです。 評価ステップには少し作業が必要ですが、大きく 3 つのステップに分けることができます。 まず、`cppe5["test"]` セットを準備します。注釈をフォーマットし、データをディスクに保存します。 ```py >>> import json >>> # format annotations the same as for training, no need for data augmentation >>> def val_formatted_anns(image_id, objects): ... annotations = [] ... for i in range(0, len(objects["id"])): ... new_ann = { ... "id": objects["id"][i], ... "category_id": objects["category"][i], ... "iscrowd": 0, ... "image_id": image_id, ... "area": objects["area"][i], ... "bbox": objects["bbox"][i], ... } ... annotations.append(new_ann) ... return annotations >>> # Save images and annotations into the files torchvision.datasets.CocoDetection expects >>> def save_cppe5_annotation_file_images(cppe5): ... output_json = {} ... path_output_cppe5 = f"{os.getcwd()}/cppe5/" ... if not os.path.exists(path_output_cppe5): ... os.makedirs(path_output_cppe5) ... path_anno = os.path.join(path_output_cppe5, "cppe5_ann.json") ... categories_json = [{"supercategory": "none", "id": id, "name": id2label[id]} for id in id2label] ... output_json["images"] = [] ... output_json["annotations"] = [] ... for example in cppe5: ... ann = val_formatted_anns(example["image_id"], example["objects"]) ... output_json["images"].append( ... { ... "id": example["image_id"], ... "width": example["image"].width, ... "height": example["image"].height, ... "file_name": f"{example['image_id']}.png", ... } ... ) ... output_json["annotations"].extend(ann) ... output_json["categories"] = categories_json ... with open(path_anno, "w") as file: ... json.dump(output_json, file, ensure_ascii=False, indent=4) ... for im, img_id in zip(cppe5["image"], cppe5["image_id"]): ... path_img = os.path.join(path_output_cppe5, f"{img_id}.png") ... im.save(path_img) ... return path_output_cppe5, path_anno ``` 次に、`cocoevaluator`で利用できる`CocoDetection`クラスのインスタンスを用意します。 ```py >>> import torchvision >>> class CocoDetection(torchvision.datasets.CocoDetection): ... def __init__(self, img_folder, image_processor, ann_file): ... super().__init__(img_folder, ann_file) ... self.image_processor = image_processor ... def __getitem__(self, idx): ... # read in PIL image and target in COCO format ... img, target = super(CocoDetection, self).__getitem__(idx) ... # preprocess image and target: converting target to DETR format, ... # resizing + normalization of both image and target) ... image_id = self.ids[idx] ... target = {"image_id": image_id, "annotations": target} ... encoding = self.image_processor(images=img, annotations=target, return_tensors="pt") ... pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension ... target = encoding["labels"][0] # remove batch dimension ... return {"pixel_values": pixel_values, "labels": target} >>> im_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> path_output_cppe5, path_anno = save_cppe5_annotation_file_images(cppe5["test"]) >>> test_ds_coco_format = CocoDetection(path_output_cppe5, im_processor, path_anno) ``` 最後に、メトリクスをロードして評価を実行します。 ```py >>> import evaluate >>> from tqdm import tqdm >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> module = evaluate.load("ybelkada/cocoevaluate", coco=test_ds_coco_format.coco) >>> val_dataloader = torch.utils.data.DataLoader( ... test_ds_coco_format, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn ... ) >>> with torch.no_grad(): ... for idx, batch in enumerate(tqdm(val_dataloader)): ... pixel_values = batch["pixel_values"] ... pixel_mask = batch["pixel_mask"] ... labels = [ ... {k: v for k, v in t.items()} for t in batch["labels"] ... ] # these are in DETR format, resized + normalized ... # forward pass ... outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask) ... orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0) ... results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to Pascal VOC format (xmin, ymin, xmax, ymax) ... module.add(prediction=results, reference=labels) ... del batch >>> results = module.compute() >>> print(results) Accumulating evaluation results... DONE (t=0.08s). IoU metric: bbox Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.352 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.681 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.292 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.168 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.208 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.429 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.274 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.484 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.191 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.323 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.590 ``` これらの結果は、[`~transformers.TrainingArguments`] のハイパーパラメータを調整することでさらに改善できます。試してごらん! ## Inference DETR モデルを微調整して評価し、Hugging Face Hub にアップロードしたので、それを推論に使用できます。 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。パイプラインをインスタンス化する モデルを使用してオブジェクトを検出し、それに画像を渡します。 ```py >>> from transformers import pipeline >>> import requests >>> url = "https://i.imgur.com/2lnWoly.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> obj_detector = pipeline("object-detection", model="devonho/detr-resnet-50_finetuned_cppe5") >>> obj_detector(image) ``` 必要に応じて、パイプラインの結果を手動で複製することもできます。 ```py >>> image_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> with torch.no_grad(): ... inputs = image_processor(images=image, return_tensors="pt") ... outputs = model(**inputs) ... target_sizes = torch.tensor([image.size[::-1]]) ... results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected Coverall with confidence 0.566 at location [1215.32, 147.38, 4401.81, 3227.08] Detected Mask with confidence 0.584 at location [2449.06, 823.19, 3256.43, 1413.9] ``` 結果をプロットしてみましょう: ```py >>> draw = ImageDraw.Draw(image) >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... x, y, x2, y2 = tuple(box) ... draw.rectangle((x, y, x2, y2), outline="red", width=1) ... draw.text((x, y), model.config.id2label[label.item()], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/4QZnf9A.png" alt="Object detection result on a new image"/> </div>
transformers/docs/source/ja/tasks/object_detection.md/0
{ "file_path": "transformers/docs/source/ja/tasks/object_detection.md", "repo_id": "transformers", "token_count": 12769 }
265
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 모델 학습 해부하기 [[model-training-anatomy]] 모델 훈련 속도와 메모리 활용의 효율성을 향상시키기 위해 적용할 수 있는 성능 최적화 기술을 이해하려면 GPU가 훈련 중에 어떻게 활용되는지, 그리고 수행되는 연산에 따라 연산 강도가 어떻게 변하는지에 익숙해져야 합니다. 먼저 GPU 활용과 모델 훈련 실행에 대한 예시를 살펴보겠습니다. 데모를 위해 몇몇 라이브러리를 설치해야 합니다: ```bash pip install transformers datasets accelerate nvidia-ml-py3 ``` `nvidia-ml-py3` 라이브러리는 Python 내부에서 모델의 메모리 사용량을 모니터링할 수 있게 해줍니다. 터미널의 `nvidia-smi` 명령어에 익숙할 수 있는데, 이 라이브러리는 Python에서 직접 동일한 정보에 접근할 수 있게 해줍니다. 그 다음, 100과 30000 사이의 무작위 토큰 ID와 분류기를 위한 이진 레이블인 더미 데이터를 생성합니다. 길이가 각각 512인 총 512개의 시퀀스를 가져와 PyTorch 형식의 [`~datasets.Dataset`]에 저장합니다. ```py >>> import numpy as np >>> from datasets import Dataset >>> seq_len, dataset_size = 512, 512 >>> dummy_data = { ... "input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)), ... "labels": np.random.randint(0, 1, (dataset_size)), ... } >>> ds = Dataset.from_dict(dummy_data) >>> ds.set_format("pt") ``` GPU 활용 및 [`Trainer`]로 실행한 훈련 과정에 대한 요약 통계를 출력하기 위해 두 개의 도우미 함수를 정의하겠습니다: ```py >>> from pynvml import * >>> def print_gpu_utilization(): ... nvmlInit() ... handle = nvmlDeviceGetHandleByIndex(0) ... info = nvmlDeviceGetMemoryInfo(handle) ... print(f"GPU memory occupied: {info.used//1024**2} MB.") >>> def print_summary(result): ... print(f"Time: {result.metrics['train_runtime']:.2f}") ... print(f"Samples/second: {result.metrics['train_samples_per_second']:.2f}") ... print_gpu_utilization() ``` 시작할 때 GPU 메모리가 비어 있는지 확인해 봅시다: ```py >>> print_gpu_utilization() GPU memory occupied: 0 MB. ``` 좋습니다. 모델을 로드하기 전에는 예상대로 GPU 메모리가 점유되지 않았습니다. 그렇지 않다면 사용자의 기기에서 GPU 메모리를 사용하는 모든 프로세스를 중단해야 합니다. 그러나 사용자는 모든 여유 GPU 메모리를 사용할 수는 없습니다. 모델이 GPU에 로드될 때 커널도 로드되므로 1-2GB의 메모리를 차지할 수 있습니다. 얼마나 되는지 확인하기 위해 GPU에 작은 텐서를 로드하여 커널이 로드되도록 트리거합니다. ```py >>> import torch >>> torch.ones((1, 1)).to("cuda") >>> print_gpu_utilization() GPU memory occupied: 1343 MB. ``` 커널만으로도 GPU 메모리의 1.3GB를 차지합니다. 이제 모델이 얼마나 많은 공간을 사용하는지 확인해 보겠습니다. ## 모델 로드 [[load-model]] 우선, `google-bert/bert-large-uncased` 모델을 로드합니다. 모델의 가중치를 직접 GPU에 로드해서 가중치만이 얼마나 많은 공간을 차지하는지 확인할 수 있습니다. ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-large-uncased").to("cuda") >>> print_gpu_utilization() GPU memory occupied: 2631 MB. ``` 모델의 가중치만으로도 GPU 메모리를 1.3 GB 차지하는 것을 볼 수 있습니다. 정확한 숫자는 사용하는 GPU에 따라 다릅니다. 최신 GPU에서는 모델 사용 속도를 높이는 최적화된 방식으로 가중치가 로드되므로, 모델이 더 많은 공간을 차지할 수 있습니다. 이제 `nvidia-smi` CLI와 동일한 결과를 얻는지 빠르게 확인할 수 있습니다: ```bash nvidia-smi ``` ```bash Tue Jan 11 08:58:05 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.91.03 Driver Version: 460.91.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:04.0 Off | 0 | | N/A 37C P0 39W / 300W | 2631MiB / 16160MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 3721 C ...nvs/codeparrot/bin/python 2629MiB | +-----------------------------------------------------------------------------+ ``` 이전과 동일한 숫자가 출력되고 16GB 메모리를 가진 V100 GPU를 사용하고 있다는 것도 볼 수 있습니다. 그러므로 이제 모델 훈련을 시작하여 GPU 메모리 사용량이 어떻게 달라지는지 볼 수 있습니다. 우선 몇몇 표준 훈련 인수를 설정합니다: ```py default_args = { "output_dir": "tmp", "evaluation_strategy": "steps", "num_train_epochs": 1, "log_level": "error", "report_to": "none", } ``` <Tip> 여러 실험을 실행할 계획이라면, 실험 간에 메모리를 제대로 비우기 위해서 Python 커널을 실험 사이마다 재시작해야 합니다. </Tip> ## 기본 훈련에서의 메모리 활용 [[memory-utilization-at-vanilla-training]] [`Trainer`]를 사용하여, GPU 성능 최적화 기술을 사용하지 않고 배치 크기가 4인 모델을 훈련시키겠습니다: ```py >>> from transformers import TrainingArguments, Trainer, logging >>> logging.set_verbosity_error() >>> training_args = TrainingArguments(per_device_train_batch_size=4, **default_args) >>> trainer = Trainer(model=model, args=training_args, train_dataset=ds) >>> result = trainer.train() >>> print_summary(result) ``` ``` Time: 57.82 Samples/second: 8.86 GPU memory occupied: 14949 MB. ``` 우리는 비교적 작은 배치 크기로도 전체 GPU 메모리를 거의 다 차지하는 것을 볼 수 있습니다. 그러나 배치 크기가 클수록 모델 수렴 속도가 빨라지고 최종 성능이 향상되는 경우가 많습니다. 그래서 이상적으로는 GPU 제한이 아닌 우리 모델의 요구사항에 맞게 배치 크기를 조정하려고 합니다. 흥미롭게도 우리는 모델의 크기보다 훨씬 더 많은 메모리를 사용합니다. 왜 이런 현상이 발생하는지 조금 더 잘 이해하기 위해 모델의 연산과 메모리 요구 사항을 살펴보겠습니다. ## 모델의 연산 해부하기 [[anatomy-of-models-operations]] 트랜스포머 아키텍처에는 연산 강도(compute-intensity)에 따라 그룹화된 3가지 주요 연산 그룹이 있습니다. 1. **텐서 축약(Tensor Contractions)** 선형 레이어와 멀티헤드 어텐션의 구성 요소는 모두 **행렬-행렬 곱셈(matrix-matrix multiplications)**을 일괄적으로 처리합니다. 이 연산은 트랜스포머 훈련에서 가장 연산 강도가 높은 부분입니다. 2. **통계 정규화(Statistical Normalizations)** 소프트맥스와 레이어 정규화는 텐서 축약보다 연산 강도가 낮습니다. 하나 이상의 **감소 연산(reduction operations)**을 포함하며, 그 결과는 map을 통해 적용됩니다. 3. **원소별 연산자(Element-wise Operators)** 그 외 연산자들, **편향(biases), 드롭아웃(dropout), 활성화 함수(activations), 잔차 연결(residual connections)**이 여기에 해당합니다. 이 연산들은 연산 강도가 가장 낮습니다. 이러한 지식은 성능 병목 현상을 분석할 때 도움이 될 수 있습니다. 이 내용은 [Data Movement Is All You Need: A Case Study on Optimizing Transformers 2020](https://arxiv.org/abs/2007.00072)을 참고하였습니다. ## 모델의 메모리 구조 [[anatomy-of-models-memory]] 모델을 훈련시키는 데는 단순히 GPU에 모델을 올리는 것보다 훨씬 더 많은 메모리를 사용한다는 것을 보았습니다. 이는 훈련 중 GPU 메모리를 사용하는 많은 구성 요소가 있기 때문입니다. GPU 메모리의 구성 요소는 다음과 같습니다: 1. 모델 가중치 2. 옵티마이저 상태 3. 그라디언트 4. 그라디언트 계산을 위해 저장된 순방향 활성화 5. 임시 버퍼 6. 기능별 메모리 AdamW를 사용하여 혼합 정밀도로 훈련된 일반적인 모델은 모델 파라미터당 18 바이트와 활성화 메모리가 필요합니다. 추론 단계에서는 옵티마이저와 그라디언트가 필요하지 않으므로 이들은 제외합니다. 따라서 혼합 정밀도 추론의 경우 모델 매개변수당 6 바이트와 활성화 메모리가 필요합니다. 자세히 살펴보겠습니다. **모델 가중치:** - fp32 훈련의 경우 매개 변수 수 * 4 바이트 - 혼합 정밀도 훈련의 경우 매개 변수 수 * 6 바이트 (메모리에 fp32와 fp16 두 가지 모델을 유지) **옵티마이저 상태:** - 일반 AdamW의 경우 매개 변수 수 * 8 바이트 (2가지 상태 유지) - [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)와 같은 8비트 AdamW 옵티마이저의 경우 매개 변수 수 * 2 바이트 - Momentum을 가진 SGD와 같은 옵티마이저의 경우 매개 변수 수 * 4 바이트 (하나의 상태만 유지) **그라디언트** - fp32 또는 혼합 정밀도 훈련의 경우 매개 변수 수 * 4 바이트 (그라디언트는 항상 fp32으로 유지됩니다.) **순방향 활성화** - 크기는 여러 요인에 따라 달라지며, 주요 요인은 시퀀스 길이, 은닉 상태의 크기 및 배치 크기입니다. 순방향 및 역방향 함수에서 전달 및 반환되는 입력과 출력이 있으며, 그라디언트 계산을 위해 저장된 순방향 활성화가 있습니다. **임시 메모리** 더불어 모든 종류의 임시 변수는 연산이 완료되면 곧바로 해제되지만, 그 순간에는 추가 메모리가 필요할 수 있고 OOM을 유발할 수 있습니다. 따라서 코딩할 때 이러한 임시 변수에 대해 전략적으로 생각하고 때로는 더 이상 필요 없는 임시 변수를 즉시 명시적으로 메모리에서 제거하는 것이 중요합니다. **기능별 메모리** 그런 다음, 소프트웨어에는 특별한 메모리 요구 사항이 있을 수 있습니다. 예를 들어, 빔 검색을 사용하여 텍스트를 생성할 때 소프트웨어는 입력과 출력 사본을 여러 개 유지해야 합니다. **`forward` vs `backward` 실행 속도** 합성곱과 선형 레이어의 경우 순방향에 비해 역방향에서는 2배의 플롭스가 필요하므로 일반적으로 2배 정도 느리게 변환됩니다(역방향의 경우 사이즈가 부자연스럽기 때문에, 때로는 더욱 느릴 수도 있습니다). 활성화는 일반적으로 대역폭이 제한되어 있으며, 일반적으로 순방향보다 역방향에서 더 많은 데이터를 읽어야 합니다. (예를 들어, 순방향 활성화 시 한 번 씩 읽고 쓰지만, 역방향 활성화에서는 순방향 gradOutput과 출력에 대해 총 두 번 읽고 gradInput에 대해 한 번 씁니다.) 보다시피, GPU 메모리를 절약하거나 작업 속도를 높일 수 있는 몇 가지 방법이 있습니다. 이제 GPU 활용과 계산 속도에 영향을 주는 것이 무엇인지를 이해했으므로, [Methods and tools for efficient training on a single GPU](perf_train_gpu_one) 문서 페이지를 참조하여 성능 최적화 기법에 대해 알아보세요.
transformers/docs/source/ko/model_memory_anatomy.md/0
{ "file_path": "transformers/docs/source/ko/model_memory_anatomy.md", "repo_id": "transformers", "token_count": 8877 }
266
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 이념과 목표 [[philosophy]] 🤗 Transformers는 다음과 같은 목적으로 만들어진 독자적인 라이브러리입니다: - 대규모 Transformers 모델을 사용하거나 연구하거나 확장하려는 기계 학습 연구원 및 교육자를 위한 것입니다. - 모델을 미세 조정하거나 제작용으로 사용하고자 하는 실전 개발자를 위한 것입니다. - 특정 기계 학습 작업을 해결하기 위해 사전훈련된 모델을 다운로드하고 사용하기만 하려는 엔지니어를 위한 것입니다. 이 라이브러리는 두 가지 주요 목표를 가지고 설계되었습니다: 1. 사용하기 쉽고 빠르게 만드는 것: - 학습해야 할 사용자 대상 추상화의 수를 제한했습니다. 실제로 거의 추상화가 없으며, 각 모델을 사용하기 위해 필요한 세 가지 표준 클래스인 [configuration](main_classes/configuration), [models](main_classes/model) 및 전처리 클래스인 ([tokenizer](main_classes/tokenizer)는 NLP용, [image processor](main_classes/image_processor)는 비전용, [feature extractor](main_classes/feature_extractor)는 오디오용, [processor](main_classes/processors)는 멀티모달 입력용)만 사용합니다. - 이러한 클래스는 공통적인 `from_pretrained()` 메서드를 사용하여 미리 훈련된 인스턴스에서 간단하고 통일된 방식으로 초기화할 수 있습니다. 이 메소드는 미리 훈련된 체크포인트에서 관련 클래스 인스턴스와 관련 데이터(구성의 하이퍼파라미터, 토크나이저의 어휘, 모델의 가중치)를 (필요한 경우) 다운로드하고 캐시하며 가져옵니다. 체크포인트는 [Hugging Face Hub](https://huggingface.co/models)에서 제공되거나 사용자 자체의 저장된 체크포인트에서 제공됩니다. - 이 세 가지 기본 클래스 위에 라이브러리는 [`pipeline`] API를 제공하여 주어진 작업에 대해 모델을 빠르게 추론하는 데 사용하고, [`Trainer`]를 제공하여 PyTorch 모델을 빠르게 훈련하거나 미세 조정할 수 있도록 합니다(모든 TensorFlow 모델은 `Keras.fit`과 호환됩니다). - 결과적으로, 이 라이브러리는 신경망을 구축하기 위한 모듈식 도구 상자가 아닙니다. 라이브러리를 확장하거나 구축하려면 일반적인 Python, PyTorch, TensorFlow, Keras 모듈을 사용하고 라이브러리의 기본 클래스를 상속하여 모델 로딩 및 저장과 같은 기능을 재사용하면 됩니다. 모델에 대한 코딩 철학에 대해 더 자세히 알고 싶다면 [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) 블로그 글을 확인해보세요. 2. 원래 모델과 가능한 한 근접한 성능을 제공하는 최신 모델을 제공하는 것: - 각 아키텍처에 대해 공식 저자가 제공한 결과를 재현하는 적어도 한 가지 예제를 제공합니다. - 코드는 원래 코드와 가능한 한 유사하게 유지되므로 PyTorch 코드는 TensorFlow 코드로 변환되어 *pytorchic*하지 않을 수 있고, 그 반대의 경우도 마찬가지입니다. 기타 목표 몇 가지: - 모델의 내부를 가능한 일관되게 노출시키기: - 전체 은닉 상태와 어텐션 가중치에 대한 액세스를 단일 API를 사용하여 제공합니다. - 전처리 클래스 및 기본 모델 API는 모델 간에 쉽게 전환할 수 있도록 표준화되어 있습니다. - 미세 조정 및 모델 탐색을 위한 유망한 도구들을 주관적으로 선택하기: - 미세 조정을 위해 어휘 및 임베딩에 새로운 토큰을 간단하고 일관된 방식으로 추가하는 방법을 제공합니다. - Transformer 헤드를 마스킹하고 가지치기하는 간단한 방법을 제공합니다. - PyTorch, TensorFlow 2.0 및 Flax 간에 쉽게 전환할 수 있도록 하여 하나의 프레임워크로 훈련하고 다른 프레임워크로 추론할 수 있게 합니다. ## 주요 개념 [[main-concepts]] 이 라이브러리는 각 모델에 대해 세 가지 유형의 클래스를 기반으로 구축되었습니다: - **모델 클래스**는 라이브러리에서 제공하는 사전 훈련된 가중치와 함께 작동하는 PyTorch 모델([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras 모델([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)), JAX/Flax 모델([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html))일 수 있습니다. - **구성 클래스**는 모델을 구축하는 데 필요한 하이퍼파라미터(예: 레이어 수 및 은닉 크기)를 저장합니다. 구성 클래스를 직접 인스턴스화할 필요는 없습니다. 특히, 수정 없이 고 사전 학습된 모델을 사용하는 경우 모델을 생성하면 모델의 일부인 구성을 자동으로 인스턴스화됩니다. - **전처리 클래스**는 원시 데이터를 모델이 수용하는 형식으로 변환합니다. [Tokenizer](main_classes/tokenizer)는 각 모델의 어휘를 저장하고, 문자열을 토큰 임베딩 인덱스 리스트로 인코딩하고 디코딩하기 위한 메소드를 제공합니다. [Image processors](main_classes/image_processor)는 비전 입력을 전처리하고, [feature extractors](main_classes/feature_extractor)는 오디오 입력을 전처리하며, [processor](main_classes/processors)는 멀티모달 입력을 처리합니다. 모든 이러한 클래스는 사전 훈련된 인스턴스에서 인스턴스화하고 로컬로 저장하며, 세 가지 메소드를 사용하여 Hub에서 공유할 수 있습니다: - `from_pretrained()` 메소드를 사용하면 라이브러리 자체에서 제공하는 사전 훈련된 버전(지원되는 모델은 [Model Hub](https://huggingface.co/models)에서 찾을 수 있음)이나 사용자가 로컬로 저장한 경우(또는 서버에 저장한 경우)의 모델, 구성 및 전처리 클래스를 인스턴스화할 수 있습니다. - `save_pretrained()` 메소드를 사용하면 모델, 구성 및 전처리 클래스를 로컬로 저장하여 `from_pretrained()`를 사용하여 다시 가져올 수 있습니다. - `push_to_hub()` 메소드를 사용하면 모델, 구성 및 전처리 클래스를 Hub에 공유하여 모두에게 쉽게 접근할 수 있습니다.
transformers/docs/source/ko/philosophy.md/0
{ "file_path": "transformers/docs/source/ko/philosophy.md", "repo_id": "transformers", "token_count": 5165 }
267
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # TensorFlow 모델을 위한 XLA 통합 [[xla-integration-for-tensorflow-models]] [[open-in-colab]] XLA(Accelerated Linear Algebra)는 TensorFlow 모델의 실행 시간을 가속화하기 위한 컴파일러입니다. [공식 문서](https://www.tensorflow.org/xla)에 따르면 다음과 같습니다: XLA(Accelerated Linear Algebra)는 선형 대수를 위한 도메인 특화 컴파일러로, TensorFlow 모델을 소스 코드 변경 없이 가속화할 수 있습니다. TensorFlow에서 XLA를 사용하는 것은 간단합니다. XLA는 `tensorflow` 라이브러리 내에 패키지로 제공되며, [`tf.function`](https://www.tensorflow.org/guide/intro_to_graphs)과 같은 그래프 생성 함수에서 `jit_compile` 인수를 사용하여 활성화할 수 있습니다. `fit()` 및 `predict()`와 같은 Keras 메소드를 사용하는 경우, `jit_compile` 인수를 `model.compile()`에 전달하여 XLA를 간단하게 활성화할 수 있습니다. 그러나 XLA는 이러한 메소드에 국한되지 않고 임의의 `tf.function`을 가속화하는 데에도 사용할 수 있습니다. 🤗 Transformers에서는 [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2), [T5](https://huggingface.co/docs/transformers/model_doc/t5), [OPT](https://huggingface.co/docs/transformers/model_doc/opt)와 같은 모델의 텍스트 생성, 그리고 [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)와 같은 모델의 음성 처리를 포함하여 여러 TensorFlow 메소드가 XLA와 호환되도록 다시 작성되었습니다. 정확한 속도 향상은 모델에 따라 다르지만, 🤗 Transformers 내의 TensorFlow 텍스트 생성 모델의 경우 최대 100배의 속도 향상을 확인했습니다. 이 문서에서는 이러한 모델에 대해 XLA를 사용하여 최대 성능을 얻는 방법을 설명합니다. 또한 XLA 통합의 벤치마크 및 디자인 철학에 대한 추가 자료 링크도 제공할 것입니다. ## XLA를 사용하여 TF 함수 실행하기 [[running-tf-functions-with-xla]] TensorFlow에서 다음과 같은 모델을 고려해 봅시다: ```py import tensorflow as tf model = tf.keras.Sequential( [tf.keras.layers.Dense(10, input_shape=(10,), activation="relu"), tf.keras.layers.Dense(5, activation="softmax")] ) ``` 위 모델은 차원이 `(10, )`인 입력을 받습니다. 다음과 같이 모델을 사용하여 순전파를 실행할 수 있습니다: ```py # 모델에 대한 임의의 입력을 생성합니다. batch_size = 16 input_vector_dim = 10 random_inputs = tf.random.normal((batch_size, input_vector_dim)) # 순전파를 실행합니다. _ = model(random_inputs) ``` XLA로 컴파일된 함수로 순전파를 실행하려면 다음과 같이 해야 합니다: ```py xla_fn = tf.function(model, jit_compile=True) _ = xla_fn(random_inputs) ``` `model`의 기본 `call()` 함수는 XLA 그래프를 컴파일하는 데 사용됩니다. 그러나 다른 모델 함수를 XLA로 컴파일하려면 다음과 같이 할 수도 있습니다: ```py my_xla_fn = tf.function(model.my_xla_fn, jit_compile=True) ``` ## 🤗 Transformers에서 XLA를 사용하여 TF 텍스트 생성 모델 실행하기 [[running-a-tf-text-generation-model-with-xla-from-transformers]] 🤗 Transformers에서 XLA로 가속화된 생성을 활성화하려면 최신 버전의 `transformers`가 설치되어 있어야 합니다. 다음과 같이 설치할 수 있습니다: ```bash pip install transformers --upgrade ``` 그리고 다음 코드를 실행할 수 있습니다: ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM # 최소 버전의 Transformers가 설치되어 있지 않다면 오류가 발생합니다. from transformers.utils import check_min_version check_min_version("4.21.0") tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") input_string = ["TensorFlow is"] # XLA 생성 함수를 만들기 위한 한 줄 xla_generate = tf.function(model.generate, jit_compile=True) tokenized_input = tokenizer(input_string, return_tensors="tf") generated_tokens = xla_generate(**tokenized_input, num_beams=2) decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) print(f"Generated -- {decoded_text}") # Generated -- TensorFlow is an open-source, open-source, distributed-source application # framework for the ``` 알 수 있듯이, `generate()`에서 XLA를 활성화하는 것은 단 한 줄의 코드입니다. 코드의 나머지 부분은 변경되지 않습니다. 그러나 위 코드 스니펫에서는 XLA에 특정한 몇 가지 주의할 점이 있습니다. XLA가 가져다줄 속도 향상을 실현하기 위해서는 이를 알고 있어야 합니다. 다음 섹션에서 이에 대해 논의합니다. ## 주의할 점 [[gotchas-to-be-aware-of]] XLA 활성화 함수(`xla_generate()`와 같은)를 처음 실행할 때 내부적으로 계산 그래프를 추론하려고 하며, 이는 시간이 소요됩니다. 이 과정은 [“추적(tracing)”](https://www.tensorflow.org/guide/intro_to_graphs#when_is_a_function_tracing)이라고 알려져 있습니다. 생성 시간이 빠르지 않다는 것을 알 수 있을 것입니다. `xla_generate()`(또는 다른 XLA 활성화 함수)의 연속 호출은 함수에 전달된 입력이 초기에 구축된 계산 그래프와 동일한 형태를 따른다면, 계산 그래프를 추론할 필요가 없습니다. 이는 입력 형태가 고정된 모달리티(예: 이미지)에는 문제가 되지 않지만, 가변 입력 형태 모달리티(예: 텍스트)를 사용할 때 주의해야 합니다. `xla_generate()`가 항상 동일한 입력 형태로 동작하도록 하려면, 토크나이저를 호출할 때 `padding` 인수를 지정할 수 있습니다. ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") input_string = ["TensorFlow is"] xla_generate = tf.function(model.generate, jit_compile=True) # 여기서, padding 옵션이 있는 토크나이저를 호출합니다. tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") generated_tokens = xla_generate(**tokenized_input, num_beams=2) decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) print(f"Generated -- {decoded_text}") ``` 이렇게 하면 `xla_generate()`에 대한 입력이 항상 추적된 형태로 전달되어 생성 시간이 가속화됩니다. 다음 코드로 이를 확인할 수 있습니다: ```py import time import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") xla_generate = tf.function(model.generate, jit_compile=True) for input_string in ["TensorFlow is", "TensorFlow is a", "TFLite is a"]: tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") start = time.time_ns() generated_tokens = xla_generate(**tokenized_input, num_beams=2) end = time.time_ns() print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n") ``` Tesla T4 GPU에서는 다음과 같은 출력을 예상할 수 있습니다: ```bash Execution time -- 30819.6 ms Execution time -- 79.0 ms Execution time -- 78.9 ms ``` `xla_generate()`의 첫 번째 호출은 추적 때문에 시간이 오래 걸리지만, 연속 호출은 몇 배나 빠릅니다. 생성 옵션에 대한 어떤 변경이든 다시 추적을 유발하므로 생성 시간이 느려질 수 있음을 명심하세요. 이 문서에서는 🤗 Transformers에서 제공하는 모든 텍스트 생성 옵션을 다루지 않았습니다. 고급 사용 사례에 대해 문서를 참조하시기 바랍니다. ## 추가 자료 [[additional-resources]] 여기에 🤗 Transformers와 XLA에 대해 더 자세히 알고 싶은 경우 도움이 될 수 있는 몇 가지 추가 자료를 제공합니다. * [이 Colab 노트북](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb)은 XLA와 호환되는 인코더-디코더([T5](https://huggingface.co/docs/transformers/model_doc/t5)와 같은) 및 디코더 전용([GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)와 같은) 텍스트 생성 모델을 실험해 볼 수 있는 대화형 데모를 제공합니다. * [이 블로그 글](https://huggingface.co/blog/tf-xla-generate)은 TensorFlow에서 XLA에 대한 친절한 소개와 함께 XLA와 호환되는 모델의 비교 벤치마크에 대한 개요를 제공합니다. * [이 블로그 글](https://blog.tensorflow.org/2022/11/how-hugging-face-improved-text-generation-performance-with-xla.html)은 🤗 Transformers의 TensorFlow 모델에 XLA 지원을 추가하는 것에 대한 디자인 철학을 논의합니다. * XLA와 TensorFlow 그래프에 대해 더 자세히 알고 싶은 경우 추천하는 글: * [XLA: 기계 학습을 위한 최적화 컴파일러](https://www.tensorflow.org/xla) * [그래프 및 tf.function 소개](https://www.tensorflow.org/guide/intro_to_graphs) * [tf.function으로 성능 향상하기](https://www.tensorflow.org/guide/function)
transformers/docs/source/ko/tf_xla.md/0
{ "file_path": "transformers/docs/source/ko/tf_xla.md", "repo_id": "transformers", "token_count": 6235 }
268
<!--版权2023年HuggingFace团队保留所有权利。 根据Apache许可证第2.0版(“许可证”)许可;除非符合许可证,否则您不得使用此文件。您可以在以下网址获取许可证的副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则按“按原样”分发的软件,无论是明示还是暗示的,都没有任何担保或条件。请参阅许可证以了解特定语言下的权限和限制。 ⚠️ 请注意,本文件虽然使用Markdown编写,但包含了特定的语法,适用于我们的doc-builder(类似于MDX),可能无法在您的Markdown查看器中正常渲染。 --> # 🤗 加速分布式训练 随着模型变得越来越大,并行性已经成为在有限硬件上训练更大模型和加速训练速度的策略,增加了数个数量级。在Hugging Face,我们创建了[🤗 加速](https://huggingface.co/docs/accelerate)库,以帮助用户在任何类型的分布式设置上轻松训练🤗 Transformers模型,无论是在一台机器上的多个GPU还是在多个机器上的多个GPU。在本教程中,了解如何自定义您的原生PyTorch训练循环,以启用分布式环境中的训练。 ## 设置 通过安装🤗 加速开始: ```bash pip install accelerate ``` 然后导入并创建[`~accelerate.Accelerator`]对象。[`~accelerate.Accelerator`]将自动检测您的分布式设置类型,并初始化所有必要的训练组件。您不需要显式地将模型放在设备上。 ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## 准备加速 下一步是将所有相关的训练对象传递给[`~accelerate.Accelerator.prepare`]方法。这包括您的训练和评估DataLoader、一个模型和一个优化器: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## 反向传播 最后一步是用🤗 加速的[`~accelerate.Accelerator.backward`]方法替换训练循环中的典型`loss.backward()`: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` 如您在下面的代码中所见,您只需要添加四行额外的代码到您的训练循环中即可启用分布式训练! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## 训练 在添加了相关代码行后,可以在脚本或笔记本(如Colaboratory)中启动训练。 ### 用脚本训练 如果您从脚本中运行训练,请运行以下命令以创建和保存配置文件: ```bash accelerate config ``` 然后使用以下命令启动训练: ```bash accelerate launch train.py ``` ### 用笔记本训练 🤗 加速还可以在笔记本中运行,如果您计划使用Colaboratory的TPU,则可在其中运行。将负责训练的所有代码包装在一个函数中,并将其传递给[`~accelerate.notebook_launcher`]: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` 有关🤗 加速及其丰富功能的更多信息,请参阅[文档](https://huggingface.co/docs/accelerate)。
transformers/docs/source/zh/accelerate.md/0
{ "file_path": "transformers/docs/source/zh/accelerate.md", "repo_id": "transformers", "token_count": 2552 }
269
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 用于生成的工具 此页面列出了所有由 [`~generation.GenerationMixin.generate`]。 ## 生成输出 [`~generation.GenerationMixin.generate`] 的输出是 [`~utils.ModelOutput`] 的一个子类的实例。这个输出是一种包含 [`~generation.GenerationMixin.generate`] 返回的所有信息数据结构,但也可以作为元组或字典使用。 这里是一个例子: ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) ``` `generation_output` 的对象是 [`~generation.GenerateDecoderOnlyOutput`] 的一个实例,从该类的文档中我们可以看到,这意味着它具有以下属性: - `sequences`: 生成的tokens序列 - `scores`(可选): 每个生成步骤的语言建模头的预测分数 - `hidden_states`(可选): 每个生成步骤模型的hidden states - `attentions`(可选): 每个生成步骤模型的注意力权重 在这里,由于我们传递了 `output_scores=True`,我们具有 `scores` 属性。但我们没有 `hidden_states` 和 `attentions`,因为没有传递 `output_hidden_states=True` 或 `output_attentions=True`。 您可以像通常一样访问每个属性,如果该属性未被模型返回,则将获得 `None`。例如,在这里 `generation_output.scores` 是语言建模头的所有生成预测分数,而 `generation_output.attentions` 为 `None`。 当我们将 `generation_output` 对象用作元组时,它只保留非 `None` 值的属性。例如,在这里它有两个元素,`loss` 然后是 `logits`,所以 ```python generation_output[:2] ``` 将返回元组`(generation_output.sequences, generation_output.scores)`。 当我们将`generation_output`对象用作字典时,它只保留非`None`的属性。例如,它有两个键,分别是`sequences`和`scores`。 我们在此记录所有输出类型。 ### PyTorch [[autodoc]] generation.GenerateDecoderOnlyOutput [[autodoc]] generation.GenerateEncoderDecoderOutput [[autodoc]] generation.GenerateBeamDecoderOnlyOutput [[autodoc]] generation.GenerateBeamEncoderDecoderOutput ### TensorFlow [[autodoc]] generation.TFGreedySearchEncoderDecoderOutput [[autodoc]] generation.TFGreedySearchDecoderOnlyOutput [[autodoc]] generation.TFSampleEncoderDecoderOutput [[autodoc]] generation.TFSampleDecoderOnlyOutput [[autodoc]] generation.TFBeamSearchEncoderDecoderOutput [[autodoc]] generation.TFBeamSearchDecoderOnlyOutput [[autodoc]] generation.TFBeamSampleEncoderDecoderOutput [[autodoc]] generation.TFBeamSampleDecoderOnlyOutput [[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput [[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput ### FLAX [[autodoc]] generation.FlaxSampleOutput [[autodoc]] generation.FlaxGreedySearchOutput [[autodoc]] generation.FlaxBeamSearchOutput ## LogitsProcessor [`LogitsProcessor`] 可以用于修改语言模型头的预测分数以进行生成 ### PyTorch [[autodoc]] AlternatingCodebooksLogitsProcessor - __call__ [[autodoc]] ClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] EncoderNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] EncoderRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] EpsilonLogitsWarper - __call__ [[autodoc]] EtaLogitsWarper - __call__ [[autodoc]] ExponentialDecayLengthPenalty - __call__ [[autodoc]] ForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] ForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] ForceTokensLogitsProcessor - __call__ [[autodoc]] HammingDiversityLogitsProcessor - __call__ [[autodoc]] InfNanRemoveLogitsProcessor - __call__ [[autodoc]] LogitNormalization - __call__ [[autodoc]] LogitsProcessor - __call__ [[autodoc]] LogitsProcessorList - __call__ [[autodoc]] LogitsWarper - __call__ [[autodoc]] MinLengthLogitsProcessor - __call__ [[autodoc]] MinNewTokensLengthLogitsProcessor - __call__ [[autodoc]] NoBadWordsLogitsProcessor - __call__ [[autodoc]] NoRepeatNGramLogitsProcessor - __call__ [[autodoc]] PrefixConstrainedLogitsProcessor - __call__ [[autodoc]] RepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] SequenceBiasLogitsProcessor - __call__ [[autodoc]] SuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] SuppressTokensLogitsProcessor - __call__ [[autodoc]] TemperatureLogitsWarper - __call__ [[autodoc]] TopKLogitsWarper - __call__ [[autodoc]] TopPLogitsWarper - __call__ [[autodoc]] TypicalLogitsWarper - __call__ [[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] WhisperTimeStampLogitsProcessor - __call__ ### TensorFlow [[autodoc]] TFForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] TFForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] TFForceTokensLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessorList - __call__ [[autodoc]] TFLogitsWarper - __call__ [[autodoc]] TFMinLengthLogitsProcessor - __call__ [[autodoc]] TFNoBadWordsLogitsProcessor - __call__ [[autodoc]] TFNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] TFRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensLogitsProcessor - __call__ [[autodoc]] TFTemperatureLogitsWarper - __call__ [[autodoc]] TFTopKLogitsWarper - __call__ [[autodoc]] TFTopPLogitsWarper - __call__ ### FLAX [[autodoc]] FlaxForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForceTokensLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessorList - __call__ [[autodoc]] FlaxLogitsWarper - __call__ [[autodoc]] FlaxMinLengthLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensLogitsProcessor - __call__ [[autodoc]] FlaxTemperatureLogitsWarper - __call__ [[autodoc]] FlaxTopKLogitsWarper - __call__ [[autodoc]] FlaxTopPLogitsWarper - __call__ [[autodoc]] FlaxWhisperTimeStampLogitsProcessor - __call__ ## StoppingCriteria 可以使用[`StoppingCriteria`]来更改停止生成的时间(除了EOS token以外的方法)。请注意,这仅适用于我们的PyTorch实现。 [[autodoc]] StoppingCriteria - __call__ [[autodoc]] StoppingCriteriaList - __call__ [[autodoc]] MaxLengthCriteria - __call__ [[autodoc]] MaxTimeCriteria - __call__ ## Constraints 可以使用[`Constraint`]来强制生成结果包含输出中的特定tokens或序列。请注意,这仅适用于我们的PyTorch实现。 [[autodoc]] Constraint [[autodoc]] PhrasalConstraint [[autodoc]] DisjunctiveConstraint [[autodoc]] ConstraintListState ## BeamSearch [[autodoc]] BeamScorer - process - finalize [[autodoc]] BeamSearchScorer - process - finalize [[autodoc]] ConstrainedBeamSearchScorer - process - finalize ## Streamers [[autodoc]] TextStreamer [[autodoc]] TextIteratorStreamer
transformers/docs/source/zh/internal/generation_utils.md/0
{ "file_path": "transformers/docs/source/zh/internal/generation_utils.md", "repo_id": "transformers", "token_count": 3487 }
270
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Logging 🤗 Transformers拥有一个集中式的日志系统,因此您可以轻松设置库输出的日志详细程度。 当前库的默认日志详细程度为`WARNING`。 要更改日志详细程度,只需使用其中一个直接的setter。例如,以下是如何将日志详细程度更改为INFO级别的方法: ```python import transformers transformers.logging.set_verbosity_info() ``` 您还可以使用环境变量`TRANSFORMERS_VERBOSITY`来覆盖默认的日志详细程度。您可以将其设置为以下级别之一:`debug`、`info`、`warning`、`error`、`critical`。例如: ```bash TRANSFORMERS_VERBOSITY=error ./myprogram.py ``` 此外,通过将环境变量`TRANSFORMERS_NO_ADVISORY_WARNINGS`设置为`true`(如*1*),可以禁用一些`warnings`。这将禁用[`logger.warning_advice`]记录的任何警告。例如: ```bash TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py ``` 以下是如何在您自己的模块或脚本中使用与库相同的logger的示例: ```python from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger("transformers") logger.info("INFO") logger.warning("WARN") ``` 此日志模块的所有方法都在下面进行了记录,主要的方法包括 [`logging.get_verbosity`] 用于获取logger当前输出日志详细程度的级别和 [`logging.set_verbosity`] 用于将详细程度设置为您选择的级别。按照顺序(从最不详细到最详细),这些级别(及其相应的整数值)为: - `transformers.logging.CRITICAL` 或 `transformers.logging.FATAL`(整数值,50):仅报告最关键的errors。 - `transformers.logging.ERROR`(整数值,40):仅报告errors。 - `transformers.logging.WARNING` 或 `transformers.logging.WARN`(整数值,30):仅报告error和warnings。这是库使用的默认级别。 - `transformers.logging.INFO`(整数值,20):报告error、warnings和基本信息。 - `transformers.logging.DEBUG`(整数值,10):报告所有信息。 默认情况下,将在模型下载期间显示`tqdm`进度条。[`logging.disable_progress_bar`] 和 [`logging.enable_progress_bar`] 可用于禁止或启用此行为。 ## `logging` vs `warnings` Python有两个经常一起使用的日志系统:如上所述的`logging`,和对特定buckets中的警告进行进一步分类的`warnings`,例如,`FutureWarning`用于输出已经被弃用的功能或路径,`DeprecationWarning`用于指示即将被弃用的内容。 我们在`transformers`库中同时使用这两个系统。我们利用并调整了`logging`的`captureWarning`方法,以便通过上面的详细程度setters来管理这些警告消息。 对于库的开发人员,这意味着什么呢?我们应该遵循以下启发法则: - 库的开发人员和依赖于`transformers`的库应优先使用`warnings` - `logging`应该用于在日常项目中经常使用它的用户 以下是`captureWarnings`方法的参考。 [[autodoc]] logging.captureWarnings ## Base setters [[autodoc]] logging.set_verbosity_error [[autodoc]] logging.set_verbosity_warning [[autodoc]] logging.set_verbosity_info [[autodoc]] logging.set_verbosity_debug ## Other functions [[autodoc]] logging.get_verbosity [[autodoc]] logging.set_verbosity [[autodoc]] logging.get_logger [[autodoc]] logging.enable_default_handler [[autodoc]] logging.disable_default_handler [[autodoc]] logging.enable_explicit_format [[autodoc]] logging.reset_format [[autodoc]] logging.enable_progress_bar [[autodoc]] logging.disable_progress_bar
transformers/docs/source/zh/main_classes/logging.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/logging.md", "repo_id": "transformers", "token_count": 2154 }
271
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Automatic Speech Recognition - Flax Examples ## Sequence to Sequence The script [`run_flax_speech_recognition_seq2seq.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py) can be used to fine-tune any [Flax Speech Sequence-to-Sequence Model](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.FlaxAutoModelForSpeechSeq2Seq) for automatic speech recognition on one of the [official speech recognition datasets](https://huggingface.co/datasets?task_ids=task_ids:automatic-speech-recognition) or a custom dataset. This includes the Whisper model from OpenAI, or a warm-started Speech-Encoder-Decoder Model, an example for which is included below. ### Whisper Model We can load all components of the Whisper model directly from the pretrained checkpoint, including the pretrained model weights, feature extractor and tokenizer. We simply have to specify the id of fine-tuning dataset and the necessary training hyperparameters. The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of the [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) dataset. Note that before running this script you must accept the dataset's [terms of use](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) and register your Hugging Face Hub token on your device by running `huggingface-hub login`. ```bash python run_flax_speech_recognition_seq2seq.py \ --model_name_or_path="openai/whisper-small" \ --dataset_name="mozilla-foundation/common_voice_13_0" \ --dataset_config_name="hi" \ --language="hindi" \ --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="./whisper-small-hi-flax" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --num_train_epochs="10" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --generation_max_length="40" \ --preprocessing_num_workers="32" \ --dataloader_num_workers="32" \ --max_duration_in_seconds="30" \ --text_column_name="sentence" \ --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ --push_to_hub \ --use_auth_token ``` On a TPU v4-8, training should take approximately 25 minutes, with a final cross-entropy loss of 0.02 and word error rate of **34%**. See the checkpoint [sanchit-gandhi/whisper-small-hi-flax](https://huggingface.co/sanchit-gandhi/whisper-small-hi-flax) for an example training run.
transformers/examples/flax/speech-recognition/README.md/0
{ "file_path": "transformers/examples/flax/speech-recognition/README.md", "repo_id": "transformers", "token_count": 1039 }
272
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for question-answering.""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, SquadDataset, Trainer, TrainingArguments, ) from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/question-answering/run_squad_trainer.py/0
{ "file_path": "transformers/examples/legacy/question-answering/run_squad_trainer.py", "repo_id": "transformers", "token_count": 2569 }
273
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset BERT_BASE_CASED = "google-bert/bert-base-cased" PEGASUS_XSUM = "google/pegasus-xsum" ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestAll(TestCasePlus): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) @slow def test_seq2seq_dataset_truncation(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) max_src_len = 4 max_tgt_len = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated src_lang, tgt_lang = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. train_dataset = Seq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=max_src_len, max_target_length=max_tgt_len, # ignored src_lang=src_lang, tgt_lang=tgt_lang, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(batch, dict) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def test_legacy_dataset_truncation(self, tok): tokenizer = AutoTokenizer.from_pretrained(tok) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) trunc_target = 4 train_dataset = LegacySeq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=20, max_target_length=trunc_target, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def test_pack_dataset(self): tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") tmp_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) orig_examples = tmp_dir.joinpath("train.source").open().readlines() save_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(tokenizer, tmp_dir, 128, save_dir) orig_paths = {x.name for x in tmp_dir.iterdir()} new_paths = {x.name for x in save_dir.iterdir()} packed_examples = save_dir.joinpath("train.source").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(packed_examples) < len(orig_examples) assert len(packed_examples) == 1 assert len(packed_examples[0]) == sum(len(x) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq") def test_dynamic_batch_size(self): if not FAIRSEQ_AVAILABLE: return ds, max_tokens, tokenizer = self._get_dataset(max_len=64) required_batch_size_multiple = 64 batch_sampler = ds.make_dynamic_sampler(max_tokens, required_batch_size_multiple=required_batch_size_multiple) batch_sizes = [len(x) for x in batch_sampler] assert len(set(batch_sizes)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(batch_sizes) == len(ds) # no dropped or added examples data_loader = DataLoader(ds, batch_sampler=batch_sampler, collate_fn=ds.collate_fn, num_workers=2) failures = [] num_src_per_batch = [] for batch in data_loader: src_shape = batch["input_ids"].shape bs = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple num_src_tokens = np.product(batch["input_ids"].shape) num_src_per_batch.append(num_src_tokens) if num_src_tokens > (max_tokens * 1.1): failures.append(num_src_tokens) assert num_src_per_batch[0] == max(num_src_per_batch) if failures: raise AssertionError(f"too many tokens in {len(failures)} batches") def test_sortish_sampler_reduces_padding(self): ds, _, tokenizer = self._get_dataset(max_len=512) bs = 2 sortish_sampler = ds.make_sortish_sampler(bs, shuffle=False) naive_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2) sortish_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2, sampler=sortish_sampler) pad = tokenizer.pad_token_id def count_pad_tokens(data_loader, k="input_ids"): return [batch[k].eq(pad).sum().item() for batch in data_loader] assert sum(count_pad_tokens(sortish_dl, k="labels")) < sum(count_pad_tokens(naive_dl, k="labels")) assert sum(count_pad_tokens(sortish_dl)) < sum(count_pad_tokens(naive_dl)) assert len(sortish_dl) == len(naive_dl) def _get_dataset(self, n_obs=1000, max_len=128): if os.getenv("USE_REAL_DATA", False): data_dir = "examples/seq2seq/wmt_en_ro" max_tokens = max_len * 2 * 64 if not Path(data_dir).joinpath("train.len").exists(): save_len_file(MARIAN_TINY, data_dir) else: data_dir = "examples/seq2seq/test_data/wmt_en_ro" max_tokens = max_len * 4 save_len_file(MARIAN_TINY, data_dir) tokenizer = AutoTokenizer.from_pretrained(MARIAN_TINY) ds = Seq2SeqDataset( tokenizer, data_dir=data_dir, type_path="train", max_source_length=max_len, max_target_length=max_len, n_obs=n_obs, ) return ds, max_tokens, tokenizer def test_distributed_sortish_sampler_splits_indices_between_procs(self): ds, max_tokens, tokenizer = self._get_dataset() ids1 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=0, add_extra_examples=False)) ids2 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=1, add_extra_examples=False)) assert ids1.intersection(ids2) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) def test_dataset_kwargs(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name, use_fast=False) if tok_name == MBART_TINY: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", ) kwargs = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, ) kwargs = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(kwargs) == 1 if tok_name == BART_TINY else len(kwargs) == 0
transformers/examples/legacy/seq2seq/old_test_datasets.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_datasets.py", "repo_id": "transformers", "token_count": 5152 }
274
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from typing import Optional from seq2seq_trainer import arg_to_scheduler from transformers import TrainingArguments logger = logging.getLogger(__name__) @dataclass class Seq2SeqTrainingArguments(TrainingArguments): """ Parameters: label_smoothing (:obj:`float`, `optional`, defaults to 0): The label smoothing epsilon to apply (if not zero). sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size. predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). """ label_smoothing: Optional[float] = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) sortish_sampler: bool = field(default=False, metadata={"help": "Whether to SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) adafactor: bool = field(default=False, metadata={"help": "whether to use adafactor"}) encoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) decoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) dropout: Optional[float] = field(default=None, metadata={"help": "Dropout probability. Goes into model.config."}) attention_dropout: Optional[float] = field( default=None, metadata={"help": "Attention dropout probability. Goes into model.config."} ) lr_scheduler: Optional[str] = field( default="linear", metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"}, )
transformers/examples/legacy/seq2seq/seq2seq_training_args.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/seq2seq_training_args.py", "repo_id": "transformers", "token_count": 888 }
275
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library's seq2seq models for question answering using the 🤗 Seq2SeqTrainer. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. import logging import os import sys import warnings from dataclasses import dataclass, field from typing import List, Optional, Tuple import datasets import evaluate import numpy as np from datasets import load_dataset from trainer_seq2seq_qa import QuestionAnsweringSeq2SeqTrainer import transformers from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainingArguments, set_seed, ) from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) context_column: Optional[str] = field( default="context", metadata={"help": "The name of the column in the datasets containing the contexts (for question answering)."}, ) question_column: Optional[str] = field( default="question", metadata={"help": "The name of the column in the datasets containing the questions (for question answering)."}, ) answer_column: Optional[str] = field( default="answers", metadata={"help": "The name of the column in the datasets containing the answers (for question answering)."}, ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=384, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) val_max_answer_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, some of the examples do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={ "help": ( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) n_best_size: int = field( default=20, metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training/validation file/test_file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if self.val_max_answer_length is None: self.val_max_answer_length = self.max_answer_length question_answering_column_name_mapping = { "squad_v2": ("question", "context", "answer"), } def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_seq2seq_qa", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to generate and tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Get the column names for input/target. dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None) if data_args.question_column is None: question_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: question_column = data_args.question_column if question_column not in column_names: raise ValueError( f"--question_column' value '{data_args.question_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.context_column is None: context_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: context_column = data_args.context_column if context_column not in column_names: raise ValueError( f"--context_column' value '{data_args.context_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.answer_column is None: answer_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: answer_column = data_args.answer_column if answer_column not in column_names: raise ValueError( f"--answer_column' value '{data_args.answer_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_answer_length for training. max_answer_length = data_args.max_answer_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_squad_batch( examples, question_column: str, context_column: str, answer_column: str, ) -> Tuple[List[str], List[str]]: questions = examples[question_column] contexts = examples[context_column] answers = examples[answer_column] def generate_input(_question, _context): return " ".join(["question:", _question.lstrip(), "context:", _context.lstrip()]) inputs = [generate_input(question, context) for question, context in zip(questions, contexts)] targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers] return inputs, targets def preprocess_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True) # Tokenize targets with text_target=... labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # Validation preprocessing def preprocess_validation_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer( inputs, max_length=max_seq_length, padding=padding, truncation=True, return_overflowing_tokens=True, return_offsets_mapping=True, ) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = model_inputs.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. model_inputs["example_id"] = [] # Augment the overflowing tokens to the labels labels_out = [] for i in range(len(model_inputs["input_ids"])): # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] model_inputs["example_id"].append(examples["id"][sample_index]) labels_out.append(labels["input_ids"][sample_index]) model_inputs["labels"] = labels_out return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) metric = evaluate.load( "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Post-processing: def post_processing_function( examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval" ): # Decode the predicted tokens. preds = outputs.predictions if isinstance(preds, tuple): preds = preds[0] # Replace -100s used for padding as we can't decode them preds = np.where(preds != -100, preds, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} feature_per_example = {example_id_to_index[feature["example_id"]]: i for i, feature in enumerate(features)} predictions = {} # Let's loop over all the examples! for example_index, example in enumerate(examples): # This is the index of the feature associated to the current example. feature_index = feature_per_example[example_index] predictions[example["id"]] = decoded_preds[feature_index] # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) # Initialize our Trainer trainer = QuestionAnsweringSeq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, post_process_function=post_processing_function, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_answer_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name trainer.push_to_hub(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/question-answering/run_seq2seq_qa.py/0
{ "file_path": "transformers/examples/pytorch/question-answering/run_seq2seq_qa.py", "repo_id": "transformers", "token_count": 13528 }
276
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Summarization This directory contains examples for finetuning and evaluating transformers on summarization tasks. Please tag @patil-suraj with any issues/unexpected behaviors, or send a PR! For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/blob/main/examples/legacy/seq2seq). ### Supported Architectures - `BartForConditionalGeneration` - `FSMTForConditionalGeneration` (translation only) - `MBartForConditionalGeneration` - `MarianMTModel` - `PegasusForConditionalGeneration` - `T5ForConditionalGeneration` - `MT5ForConditionalGeneration` `run_summarization.py` is a lightweight example of how to download and preprocess a dataset from the [🤗 Datasets](https://github.com/huggingface/datasets) library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ## With Trainer Here is an example on a summarization task: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` Only T5 models `google-t5/t5-small`, `google-t5/t5-base`, `google-t5/t5-large`, `google-t5/t5-3b` and `google-t5/t5-11b` must use an additional argument: `--source_prefix "summarize: "`. We used CNN/DailyMail dataset in this example as `google-t5/t5-small` was trained on it and one can get good scores even when pre-training with a very small sample. Extreme Summarization (XSum) Dataset is another commonly used dataset for the task of summarization. To use it replace `--dataset_name cnn_dailymail --dataset_config "3.0.0"` with `--dataset_name xsum`. And here is how you would use it on your own files, after adjusting the values for the arguments `--train_file`, `--validation_file`, `--text_column` and `--summary_column` to match your setup: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` The task of summarization supports custom CSV and JSONLINES formats. #### Custom CSV Files If it's a csv file the training and validation files should have a column for the inputs texts and a column for the summaries. If the csv file has just two columns as in the following example: ```csv text,summary "I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder","I'm sitting in a room where I'm waiting for something to happen" "I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.","I'm a gardener and I'm a big fan of flowers." "Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share","It's that time of year again." ``` The first column is assumed to be for `text` and the second is for summary. If the csv file has multiple columns, you can then specify the names of the columns to use: ```bash --text_column text_column_name \ --summary_column summary_column_name \ ``` For example if the columns were: ```csv id,date,text,summary ``` and you wanted to select only `text` and `summary`, then you'd pass these additional arguments: ```bash --text_column text \ --summary_column summary \ ``` #### Custom JSONLINES Files The second supported format is jsonlines. Here is an example of a jsonlines custom data file. ```json {"text": "I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder", "summary": "I'm sitting in a room where I'm waiting for something to happen"} {"text": "I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.", "summary": "I'm a gardener and I'm a big fan of flowers."} {"text": "Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share", "summary": "It's that time of year again."} ``` Same as with the CSV files, by default the first value will be used as the text record and the second as the summary record. Therefore you can use any key names for the entries, in this example `text` and `summary` were used. And as with the CSV files, you can specify which values to select from the file, by explicitly specifying the corresponding key names. In our example this again would be: ```bash --text_column text \ --summary_column summary \ ``` ## With Accelerate Based on the script [`run_summarization_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py). Like `run_summarization.py`, this script allows you to fine-tune any of the models supported on a summarization task, the main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash python run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/summarization/README.md/0
{ "file_path": "transformers/examples/pytorch/summarization/README.md", "repo_id": "transformers", "token_count": 2636 }
277
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token classification ## PyTorch version Fine-tuning the library models for token classification task such as Named Entity Recognition (NER), Parts-of-speech tagging (POS) or phrase extraction (CHUNKS). The main scrip `run_ner.py` leverages the 🤗 Datasets library and the Trainer API. You can easily customize it to your needs if you need extra processing on your datasets. It will either run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation, you might just need to add some tweaks in the data preprocessing. The following example fine-tunes BERT on CoNLL-2003: ```bash python run_ner.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name conll2003 \ --output_dir /tmp/test-ner \ --do_train \ --do_eval ``` or just can just run the bash script `run.sh`. To run on your own training and validation files, use the following command: ```bash python run_ner.py \ --model_name_or_path google-bert/bert-base-uncased \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --output_dir /tmp/test-ner \ --do_train \ --do_eval ``` **Note:** This script only works with models that have a fast tokenizer (backed by the 🤗 Tokenizers library) as it uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in [this table](https://huggingface.co/transformers/index.html#supported-frameworks), if it doesn't you can still use the old version of the script. > If your model classification head dimensions do not fit the number of labels in the dataset, you can specify `--ignore_mismatched_sizes` to adapt it. ## Old version of the script You can find the old version of the PyTorch script [here](https://github.com/huggingface/transformers/blob/main/examples/legacy/token-classification/run_ner.py). ## Pytorch version, no Trainer Based on the script [run_ner_no_trainer.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py). Like `run_ner.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) on a token classification task, either NER, POS or CHUNKS tasks or your own data in a csv or a JSON file. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash export TASK_NAME=ner python run_ner_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash export TASK_NAME=ner accelerate launch run_ner_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/token-classification/README.md/0
{ "file_path": "transformers/examples/pytorch/token-classification/README.md", "repo_id": "transformers", "token_count": 1442 }
278
# Patience-based Early Exit Patience-based Early Exit (PABEE) is a plug-and-play inference method for pretrained language models. We have already implemented it on BERT and ALBERT. Basically, you can make your LM faster and more robust with PABEE. It can even improve the performance of ALBERT on GLUE. The only sacrifice is that the batch size can only be 1. Learn more in the paper ["BERT Loses Patience: Fast and Robust Inference with Early Exit"](https://arxiv.org/abs/2006.04152) and the official [GitHub repo](https://github.com/JetRunner/PABEE). ![PABEE](https://github.com/JetRunner/PABEE/raw/master/bert-loses-patience.png) ## Training You can fine-tune a pretrained language model (you can choose from BERT and ALBERT) and train the internal classifiers by: ```bash export GLUE_DIR=/path/to/glue_data export TASK_NAME=MRPC python ./run_glue_with_pabee.py \ --model_type albert \ --model_name_or_path google-bert/bert-base-uncased/albert/albert-base-v2 \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --do_lower_case \ --data_dir "$GLUE_DIR/$TASK_NAME" \ --max_seq_length 128 \ --per_gpu_train_batch_size 32 \ --per_gpu_eval_batch_size 32 \ --learning_rate 2e-5 \ --save_steps 50 \ --logging_steps 50 \ --num_train_epochs 5 \ --output_dir /path/to/save/ \ --evaluate_during_training ``` ## Inference You can inference with different patience settings by: ```bash export GLUE_DIR=/path/to/glue_data export TASK_NAME=MRPC python ./run_glue_with_pabee.py \ --model_type albert \ --model_name_or_path /path/to/save/ \ --task_name $TASK_NAME \ --do_eval \ --do_lower_case \ --data_dir "$GLUE_DIR/$TASK_NAME" \ --max_seq_length 128 \ --per_gpu_eval_batch_size 1 \ --learning_rate 2e-5 \ --logging_steps 50 \ --num_train_epochs 15 \ --output_dir /path/to/save/ \ --eval_all_checkpoints \ --patience 3,4,5,6,7,8 ``` where `patience` can be a list of patience settings, separated by a comma. It will help determine which patience works best. When evaluating on a regression task (STS-B), you may add `--regression_threshold 0.1` to define the regression threshold. ## Results On the GLUE dev set: | Model | \#Param | Speed | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST\-2 | STS\-B | |--------------|---------|--------|-------|-------|-------|-------|-------|-------|--------|--------| | ALBERT\-base | 12M | | 58\.9 | 84\.6 | 89\.5 | 91\.7 | 89\.6 | 78\.6 | 92\.8 | 89\.5 | | \+PABEE | 12M | 1\.57x | 61\.2 | 85\.1 | 90\.0 | 91\.8 | 89\.6 | 80\.1 | 93\.0 | 90\.1 | | Model | \#Param | Speed\-up | MNLI | SST\-2 | STS\-B | |---------------|---------|-----------|-------|--------|--------| | BERT\-base | 108M | | 84\.5 | 92\.1 | 88\.9 | | \+PABEE | 108M | 1\.62x | 83\.6 | 92\.0 | 88\.7 | | ALBERT\-large | 18M | | 86\.4 | 94\.9 | 90\.4 | | \+PABEE | 18M | 2\.42x | 86\.8 | 95\.2 | 90\.6 | ## Citation If you find this resource useful, please consider citing the following paper: ```bibtex @misc{zhou2020bert, title={BERT Loses Patience: Fast and Robust Inference with Early Exit}, author={Wangchunshu Zhou and Canwen Xu and Tao Ge and Julian McAuley and Ke Xu and Furu Wei}, year={2020}, eprint={2006.04152}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
transformers/examples/research_projects/bert-loses-patience/README.md/0
{ "file_path": "transformers/examples/research_projects/bert-loses-patience/README.md", "repo_id": "transformers", "token_count": 1329 }
279
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import math import os import time import psutil import torch from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from lm_seqs_dataset import LmSeqsDataset from torch import nn from torch.optim import AdamW from torch.utils.data import BatchSampler, DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from transformers import get_linear_schedule_with_warmup from utils import logger try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter class Distiller: def __init__( self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module ): logger.info("Initializing Distiller") self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0.0 self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info("Using MLM loss for LM step.") self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info("Using CLM loss for LM step.") self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.0: self.last_loss_mse = 0 if self.alpha_cos > 0.0: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if self.alpha_mse > 0.0: self.mse_loss_fct = nn.MSELoss(reduction="sum") if self.alpha_cos > 0.0: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") logger.info("--- Initializing model optimizer") assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = ( int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": params.weight_decay, }, { "params": [ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": 0.0, }, ] logger.info( "------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) ) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW( optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) ) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps ) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize( self.student, self.optimizer, opt_level=self.params.fp16_opt_level ) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel( self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True, ) self.is_master = params.is_master if self.is_master: logger.info("--- Initializing Tensorboard") self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros( bs * max_seq_len, dtype=torch.bool, device=token_ids.device ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = ( _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() ) token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids["pad_token"] else: pad_id = self.params.special_tok_ids["unk_token"] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info("Starting training") self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix( {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} ) iter_bar.close() if self.is_master: logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") self.end_epoch() if self.is_master: logger.info("Save very last checkpoint as `pytorch_model.bin`.") self.save_checkpoint(checkpoint_name="pytorch_model.bin") logger.info("Training is finished") def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: student_outputs = self.student( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) else: student_outputs = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) s_logits, s_hidden_states = student_outputs["logits"], student_outputs["hidden_states"] t_logits, t_hidden_states = teacher_outputs["logits"], teacher_outputs["hidden_states"] assert s_logits.size() == t_logits.size() # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) loss = self.alpha_ce * loss_ce if self.alpha_mlm > 0.0: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.0: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.0: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( 0 ) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.0: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.0: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.0: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.0: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.0: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error("NaN detected") exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar( tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter ) if param.grad is None: continue self.tensorboard.add_scalar( tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.n_total_iter, ) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar( tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter ) if self.alpha_mlm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter ) if self.alpha_clm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter ) if self.alpha_mse > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter ) if self.alpha_cos > 0.0: self.tensorboard.add_scalar( tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, global_step=self.n_total_iter, ) self.tensorboard.add_scalar( tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter ) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") if self.is_master: self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") self.tensorboard.add_scalar( tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch ) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
transformers/examples/research_projects/distillation/distiller.py/0
{ "file_path": "transformers/examples/research_projects/distillation/distiller.py", "repo_id": "transformers", "token_count": 12494 }
280
<p align="center"> <img src="http://sayef.tech:8082/uploads/FSNER-LOGO-2.png" alt="FSNER LOGO"> </p> <p align="center"> Implemented by <a href="https://huggingface.co/sayef"> sayef </a>. </p> ## Overview The FSNER model was proposed in [Example-Based Named Entity Recognition](https://arxiv.org/abs/2008.10570) by Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it uses a train-free few-shot learning approach inspired by question-answering. ## Abstract ---- > We present a novel approach to named entity recognition (NER) in the presence of scarce data that we call example-based NER. Our train-free few-shot learning approach takes inspiration from question-answering to identify entity spans in a new and unseen domain. In comparison with the current state-of-the-art, the proposed method performs significantly better, especially when using a low number of support examples. ## Model Training Details ----- | identifier | epochs | datasets | | ---------- |:----------:| :-----:| | [sayef/fsner-bert-base-uncased](https://huggingface.co/sayef/fsner-bert-base-uncased) | 10 | ontonotes5, conll2003, wnut2017, and fin (Alvarado et al.). | ## Installation and Example Usage ------ You can use the FSNER model in 3 ways: 1. Install directly from PyPI: `pip install fsner` and import the model as shown in the code example below or 2. Install from source: `python setup.py install` and import the model as shown in the code example below or 3. Clone repo and change directory to `src` and import the model as shown in the code example below ```python from fsner import FSNERModel, FSNERTokenizerUtils model = FSNERModel("sayef/fsner-bert-base-uncased") tokenizer = FSNERTokenizerUtils("sayef/fsner-bert-base-uncased") # size of query and supports must be the same. If you want to find all the entitites in one particular query, just repeat the same query n times where n is equal to the number of supports (or entities). query = [ 'KWE 4000 can reach with a maximum speed from up to 450 P/min an accuracy from 50 mg', 'I would like to order a computer from eBay.', ] # each list in supports are the examples of one entity type # wrap entities around with [E] and [/E] in the examples supports = [ [ 'Horizontal flow wrapper [E] Pack 403 [/E] features the new retrofit-kit „paper-ON-form“', '[E] Paloma Pick-and-Place-Roboter [/E] arranges the bakery products for the downstream tray-forming equipment', 'Finally, the new [E] Kliklok ACE [/E] carton former forms cartons and trays without the use of glue', 'We set up our pilot plant with the right [E] FibreForm® [/E] configuration to make prototypes for your marketing tests and package validation', 'The [E] CAR-T5 [/E] is a reliable, purely mechanically driven cartoning machine for versatile application fields' ], [ "[E] Walmart [/E] is a leading e-commerce company", "I recently ordered a book from [E] Amazon [/E]", "I ordered this from [E] ShopClues [/E]", "[E] Flipkart [/E] started it's journey from zero" ] ] device = 'cpu' W_query = tokenizer.tokenize(query).to(device) W_supports = tokenizer.tokenize(supports).to(device) start_prob, end_prob = model(W_query, W_supports) output = tokenizer.extract_entity_from_scores(query, W_query, start_prob, end_prob, thresh=0.50) print(output) ```
transformers/examples/research_projects/fsner/README.md/0
{ "file_path": "transformers/examples/research_projects/fsner/README.md", "repo_id": "transformers", "token_count": 1174 }
281
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class FlaxBigBirdForNaturalQuestionsModule(FlaxBigBirdForQuestionAnsweringModule): """ BigBirdForQuestionAnswering with CLS Head over the top for predicting category This way we can load its weights with FlaxBigBirdForQuestionAnswering """ config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True def setup(self): super().setup() self.cls = nn.Dense(5, dtype=self.dtype) def __call__(self, *args, **kwargs): outputs = super().__call__(*args, **kwargs) cls_out = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering): module_class = FlaxBigBirdForNaturalQuestionsModule def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels): def cross_entropy(logits, labels, reduction=None): """ Args: logits: bsz, seqlen, vocab_size labels: bsz, seqlen """ vocab_size = logits.shape[-1] labels = (labels[..., None] == jnp.arange(vocab_size)[None]).astype("f4") logits = jax.nn.log_softmax(logits, axis=-1) loss = -jnp.sum(labels * logits, axis=-1) if reduction is not None: loss = reduction(loss) return loss cross_entropy = partial(cross_entropy, reduction=jnp.mean) start_loss = cross_entropy(start_logits, start_labels) end_loss = cross_entropy(end_logits, end_labels) pooled_loss = cross_entropy(pooled_logits, pooler_labels) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class Args: model_id: str = "google/bigbird-roberta-base" logging_steps: int = 3000 save_steps: int = 10500 block_size: int = 128 num_random_blocks: int = 3 batch_size_per_device: int = 1 max_epochs: int = 5 # tx_args lr: float = 3e-5 init_lr: float = 0.0 warmup_steps: int = 20000 weight_decay: float = 0.0095 save_dir: str = "bigbird-roberta-natural-questions" base_dir: str = "training-expt" tr_data_path: str = "data/nq-training.jsonl" val_data_path: str = "data/nq-validation.jsonl" def __post_init__(self): os.makedirs(self.base_dir, exist_ok=True) self.save_dir = os.path.join(self.base_dir, self.save_dir) self.batch_size = self.batch_size_per_device * jax.device_count() @dataclass class DataCollator: pad_id: int max_length: int = 4096 # no dynamic padding on TPUs def __call__(self, batch): batch = self.collate_fn(batch) batch = jax.tree_util.tree_map(shard, batch) return batch def collate_fn(self, features): input_ids, attention_mask = self.fetch_inputs(features["input_ids"]) batch = { "input_ids": jnp.array(input_ids, dtype=jnp.int32), "attention_mask": jnp.array(attention_mask, dtype=jnp.int32), "start_labels": jnp.array(features["start_token"], dtype=jnp.int32), "end_labels": jnp.array(features["end_token"], dtype=jnp.int32), "pooled_labels": jnp.array(features["category"], dtype=jnp.int32), } return batch def fetch_inputs(self, input_ids: list): inputs = [self._fetch_inputs(ids) for ids in input_ids] return zip(*inputs) def _fetch_inputs(self, input_ids: list): attention_mask = [1 for _ in range(len(input_ids))] while len(input_ids) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def get_batched_dataset(dataset, batch_size, seed=None): if seed is not None: dataset = dataset.shuffle(seed=seed) for i in range(len(dataset) // batch_size): batch = dataset[i * batch_size : (i + 1) * batch_size] yield dict(batch) @partial(jax.pmap, axis_name="batch") def train_step(state, drp_rng, **model_inputs): def loss_fn(params): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=params, dropout_rng=drp_rng, train=True) start_logits, end_logits, pooled_logits = outputs return state.loss_fn( start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels, ) drp_rng, new_drp_rng = jax.random.split(drp_rng) grad_fn = jax.value_and_grad(loss_fn) loss, grads = grad_fn(state.params) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") grads = jax.lax.pmean(grads, "batch") state = state.apply_gradients(grads=grads) return state, metrics, new_drp_rng @partial(jax.pmap, axis_name="batch") def val_step(state, **model_inputs): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=state.params, train=False) start_logits, end_logits, pooled_logits = outputs loss = state.loss_fn(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") return metrics class TrainState(train_state.TrainState): loss_fn: Callable = struct.field(pytree_node=False) @dataclass class Trainer: args: Args data_collator: Callable train_step_fn: Callable val_step_fn: Callable model_save_fn: Callable logger: wandb scheduler_fn: Callable = None def create_state(self, model, tx, num_train_steps, ckpt_dir=None): params = model.params state = TrainState.create( apply_fn=model.__call__, params=params, tx=tx, loss_fn=calculate_loss_for_nq, ) if ckpt_dir is not None: params, opt_state, step, args, data_collator = restore_checkpoint(ckpt_dir, state) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) state = train_state.TrainState( step=step, apply_fn=model.__call__, params=params, tx=tx, opt_state=opt_state, ) self.args = args self.data_collator = data_collator self.scheduler_fn = lr model.params = params state = jax_utils.replicate(state) return state def train(self, state, tr_dataset, val_dataset): args = self.args total = len(tr_dataset) // args.batch_size rng = jax.random.PRNGKey(0) drp_rng = jax.random.split(rng, jax.device_count()) for epoch in range(args.max_epochs): running_loss = jnp.array(0, dtype=jnp.float32) tr_dataloader = get_batched_dataset(tr_dataset, args.batch_size, seed=epoch) i = 0 for batch in tqdm(tr_dataloader, total=total, desc=f"Running EPOCH-{epoch}"): batch = self.data_collator(batch) state, metrics, drp_rng = self.train_step_fn(state, drp_rng, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: state_step = jax_utils.unreplicate(state.step) tr_loss = running_loss.item() / i lr = self.scheduler_fn(state_step - 1) eval_loss = self.evaluate(state, val_dataset) logging_dict = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(logging_dict)) self.logger.log(logging_dict, commit=True) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=state) def evaluate(self, state, dataset): dataloader = get_batched_dataset(dataset, self.args.batch_size) total = len(dataset) // self.args.batch_size running_loss = jnp.array(0, dtype=jnp.float32) i = 0 for batch in tqdm(dataloader, total=total, desc="Evaluating ... "): batch = self.data_collator(batch) metrics = self.val_step_fn(state, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def save_checkpoint(self, save_dir, state): state = jax_utils.unreplicate(state) print(f"SAVING CHECKPOINT IN {save_dir}", end=" ... ") self.model_save_fn(save_dir, params=state.params) with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args, os.path.join(save_dir, "args.joblib")) joblib.dump(self.data_collator, os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "w") as f: json.dump({"step": state.step.item()}, f) print("DONE") def restore_checkpoint(save_dir, state): print(f"RESTORING CHECKPOINT FROM {save_dir}", end=" ... ") with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f: params = from_bytes(state.params, f.read()) with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f: opt_state = from_bytes(state.opt_state, f.read()) args = joblib.load(os.path.join(save_dir, "args.joblib")) data_collator = joblib.load(os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "r") as f: training_state = json.load(f) step = training_state["step"] print("DONE") return params, opt_state, step, args, data_collator def scheduler_fn(lr, init_lr, warmup_steps, num_train_steps): decay_steps = num_train_steps - warmup_steps warmup_fn = optax.linear_schedule(init_value=init_lr, end_value=lr, transition_steps=warmup_steps) decay_fn = optax.linear_schedule(init_value=lr, end_value=1e-7, transition_steps=decay_steps) lr = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps]) return lr def build_tx(lr, init_lr, warmup_steps, num_train_steps, weight_decay): def weight_decay_mask(params): params = traverse_util.flatten_dict(params) mask = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(mask) lr = scheduler_fn(lr, init_lr, warmup_steps, num_train_steps) tx = optax.adamw(learning_rate=lr, weight_decay=weight_decay, mask=weight_decay_mask) return tx, lr
transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py", "repo_id": "transformers", "token_count": 5480 }
282
# Wav2Vec2 Contrastive Loss PreTraining examples The following example showcases how to pretrain a wav2vec2 model using the JAX/Flax backend. Pretraining Wav2Vec2 is rather complex, so it is highly recommended to read the [official paper](https://arxiv.org/abs/2006.11477). JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. `run_wav2vec2_pretrain_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then pretrain the wav2vec2 architectures above on it. For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets#json-files) and you also will find examples of these below. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"wav2vec2-base-robust"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ```bash huggingface-cli repo create wav2vec2-base-robust ``` Next we clone the model repository to add the tokenizer and model files. ```bash git clone https://huggingface.co/<your-username>/wav2vec2-base-robust ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ```bash cd wav2vec2-base-robust git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_wav2vec2_pretrain_flax`. ```bash export MODEL_DIR="./wav2vec2-base-robust" ln -s ~/transformers/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py ./ ``` ### Create the model configuration Let's first create the model configuration and store it in the model repository. Note that many training parameters can be set in the model configuration including the configuration about the masking distribution (`mask_time_length`, `mask_time_prob`), dropout (`attention_dropout`, ...), the trade-off between the contrastive loss and the diversity loss, etc... Mostly likely you will need to change these parameters depending on your use case. Again, we highly recommend to read the [official paper](https://arxiv.org/abs/2006.11477) to better understand which parameters can be set for pretraining. For this example, we will be using a `"base"`-sized model of Wav2Vec2 with robust layer norm and keep most of the default settings. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2Config config = Wav2Vec2Config.from_pretrained( "facebook/wav2vec2-base", mask_time_length=10, mask_time_prob=0.05, diversity_loss_weight=0.1, num_negatives=100, do_stable_layer_norm=True, feat_extract_norm="layer", ) config.save_pretrained(model_dir) ``` ### Create a feature extractor configuration Before we can start the training, we need to define a feature extractor that takes care of normalization, etc... Here we can also re-use the feature extractor of [wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base) while making sure that padding is allowed. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2FeatureExtractor config = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base", return_attention_mask=True) config.save_pretrained(model_dir) ``` ### Train the model Finally, we can run the example script to train the model: ```bash ./run_wav2vec2_pretrain_flax.py \ --output_dir=${MODEL_DIR} \ --num_train_epochs="5" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --learning_rate="5e-4" \ --weight_decay="0.01" \ --warmup_steps="2000" \ --model_name_or_path=${MODEL_DIR} \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="4" \ --max_duration_in_seconds="10.0" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --pad_to_multiple_of="16384" \ --push_to_hub ``` Note that this script is not fully tested yet, so we cannot ensure that the above script leads to satisfying results.
transformers/examples/research_projects/jax-projects/wav2vec2/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/wav2vec2/README.md", "repo_id": "transformers", "token_count": 1507 }
283
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class ResizeShortestEdge: def __init__(self, short_edge_length, max_size=sys.maxsize): """ Args: short_edge_length (list[min, max]) max_size (int): maximum allowed longest edge length. """ self.interp_method = "bilinear" self.max_size = max_size self.short_edge_length = short_edge_length def __call__(self, imgs): img_augs = [] for img in imgs: h, w = img.shape[:2] # later: provide list and randomly choose index for resize size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) if size == 0: return img scale = size * 1.0 / min(h, w) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size if max(newh, neww) > self.max_size: scale = self.max_size * 1.0 / max(newh, neww) newh = newh * scale neww = neww * scale neww = int(neww + 0.5) newh = int(newh + 0.5) if img.dtype == np.uint8: pil_image = Image.fromarray(img) pil_image = pil_image.resize((neww, newh), PILImageResampling.BILINEAR) img = np.asarray(pil_image) else: img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw img = nn.functional.interpolate( img, (newh, neww), mode=self.interp_method, align_corners=False ).squeeze(0) img_augs.append(img) return img_augs class Preprocess: def __init__(self, cfg): self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST) self.input_format = cfg.INPUT.FORMAT self.size_divisibility = cfg.SIZE_DIVISIBILITY self.pad_value = cfg.PAD_VALUE self.max_image_size = cfg.INPUT.MAX_SIZE_TEST self.device = cfg.MODEL.DEVICE self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std def pad(self, images): max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) image_sizes = [im.shape[-2:] for im in images] images = [ nn.functional.pad( im, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, ) for size, im in zip(image_sizes, images) ] return torch.stack(images), torch.tensor(image_sizes) def __call__(self, images, single_image=False): with torch.no_grad(): if not isinstance(images, list): images = [images] if single_image: assert len(images) == 1 for i in range(len(images)): if isinstance(images[i], torch.Tensor): images.insert(i, images.pop(i).to(self.device).float()) elif not isinstance(images[i], torch.Tensor): images.insert( i, torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format)) .to(self.device) .float(), ) # resize smallest edge raw_sizes = torch.tensor([im.shape[:2] for im in images]) images = self.aug(images) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic images = [self.normalizer(x) for x in images] # now pad them to do the following operations images, sizes = self.pad(images) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad scales_yx = torch.true_divide(raw_sizes, sizes) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _scale_box(boxes, scale_yx): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h)
transformers/examples/research_projects/lxmert/processing_image.py/0
{ "file_path": "transformers/examples/research_projects/lxmert/processing_image.py", "repo_id": "transformers", "token_count": 2793 }
284
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Masked BERT model configuration. It replicates the class `~transformers.BertConfig` and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`.""" import logging from transformers.configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) class MaskedBertConfig(PretrainedConfig): """ A class replicating the `~transformers.BertConfig` with additional parameters for pruning/masking configuration. """ model_type = "masked_bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, pruning_method="topK", mask_init="constant", mask_scale=0.0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pruning_method = pruning_method self.mask_init = mask_init self.mask_scale = mask_scale
transformers/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py", "repo_id": "transformers", "token_count": 995 }
285