index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
10,044
sentence_transformers.SentenceTransformer
encode_multi_process
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages and sent to individual processes, which encode these on the different GPUs. This method is only suitable for encoding large sets of sentences :param sentences: List of sentences :param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool :param prompt_name: The name of the prompt to use for encoding. Must be a key in the `prompts` dictionary, which is either set in the constructor or loaded from the model configuration. For example if `prompt_name` is ``"query"`` and the `prompts` is ``{"query": "query: {}", ...}``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?". If `prompt` is also set, this argument is ignored. :param prompt: The prompt to use for encoding. For example, if the prompt is ``"query: {}"``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?". If `prompt` is set, `prompt_name` is ignored. :param batch_size: Encode sentences with batch size :param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size. :param normalize_embeddings: Whether to normalize returned vectors to have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. :return: 2d numpy array with shape [num_inputs, output_dimension]
def encode_multi_process( self, sentences: List[str], pool: Dict[str, object], prompt_name: Optional[str] = None, prompt: Optional[str] = None, batch_size: int = 32, chunk_size: int = None, normalize_embeddings: bool = False, ): """ This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages and sent to individual processes, which encode these on the different GPUs. This method is only suitable for encoding large sets of sentences :param sentences: List of sentences :param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool :param prompt_name: The name of the prompt to use for encoding. Must be a key in the `prompts` dictionary, which is either set in the constructor or loaded from the model configuration. For example if `prompt_name` is ``"query"`` and the `prompts` is ``{"query": "query: {}", ...}``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?". If `prompt` is also set, this argument is ignored. :param prompt: The prompt to use for encoding. For example, if the prompt is ``"query: {}"``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?". If `prompt` is set, `prompt_name` is ignored. :param batch_size: Encode sentences with batch size :param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size. :param normalize_embeddings: Whether to normalize returned vectors to have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. :return: 2d numpy array with shape [num_inputs, output_dimension] """ if chunk_size is None: chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000) logger.debug(f"Chunk data into {math.ceil(len(sentences) / chunk_size)} packages of size {chunk_size}") input_queue = pool["input"] last_chunk_id = 0 chunk = [] for sentence in sentences: chunk.append(sentence) if len(chunk) >= chunk_size: input_queue.put([last_chunk_id, batch_size, chunk, prompt_name, prompt, normalize_embeddings]) last_chunk_id += 1 chunk = [] if len(chunk) > 0: input_queue.put([last_chunk_id, batch_size, chunk, prompt_name, prompt, normalize_embeddings]) last_chunk_id += 1 output_queue = pool["output"] results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0]) embeddings = np.concatenate([result[1] for result in results_list]) return embeddings
(self, sentences: List[str], pool: Dict[str, object], prompt_name: Optional[str] = None, prompt: Optional[str] = None, batch_size: int = 32, chunk_size: Optional[int] = None, normalize_embeddings: bool = False)
10,045
torch.nn.modules.module
eval
Set the module in evaluation mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`. See :ref:`locally-disable-grad-doc` for a comparison between `.eval()` and several similar mechanisms that may be confused with it. Returns: Module: self
def eval(self: T) -> T: r"""Set the module in evaluation mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`. See :ref:`locally-disable-grad-doc` for a comparison between `.eval()` and several similar mechanisms that may be confused with it. Returns: Module: self """ return self.train(False)
(self: ~T) -> ~T
10,046
sentence_transformers.SentenceTransformer
evaluate
Evaluate the model :param evaluator: the evaluator :param output_path: the evaluator can write the results to this path
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None): """ Evaluate the model :param evaluator: the evaluator :param output_path: the evaluator can write the results to this path """ if output_path is not None: os.makedirs(output_path, exist_ok=True) return evaluator(self, output_path)
(self, evaluator: sentence_transformers.evaluation.SentenceEvaluator.SentenceEvaluator, output_path: Optional[str] = None)
10,047
torch.nn.modules.container
extend
null
def extend(self, sequential) -> 'Sequential': for layer in sequential: self.append(layer) return self
(self, sequential) -> torch.nn.modules.container.Sequential
10,048
torch.nn.modules.module
extra_repr
Set the extra representation of the module. To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable.
def extra_repr(self) -> str: r"""Set the extra representation of the module. To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable. """ return ''
(self) -> str
10,049
sentence_transformers.SentenceTransformer
fit
Train the model with the given training objective Each training objective is sampled in turn for one batch. We sample only as many batches from each objective as there are in the smallest one to make sure of equal training with each dataset. :param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning :param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc. :param epochs: Number of epochs for training :param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives. :param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts :param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero. :param optimizer_class: Optimizer :param optimizer_params: Optimizer parameters :param weight_decay: Weight decay for model parameters :param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps :param output_path: Storage path for the model and evaluation files :param save_best_model: If true, the best model (according to evaluator) is stored at output_path :param max_grad_norm: Used for gradient normalization. :param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0 :param callback: Callback function that is invoked after each evaluation. It must accept the following three parameters in this order: `score`, `epoch`, `steps` :param show_progress_bar: If True, output a tqdm progress bar :param checkpoint_path: Folder to save checkpoints during training :param checkpoint_save_steps: Will save a checkpoint after so many steps :param checkpoint_save_total_limit: Total number of checkpoints to store
def fit( self, train_objectives: Iterable[Tuple[DataLoader, nn.Module]], evaluator: SentenceEvaluator = None, epochs: int = 1, steps_per_epoch=None, scheduler: str = "WarmupLinear", warmup_steps: int = 10000, optimizer_class: Type[Optimizer] = torch.optim.AdamW, optimizer_params: Dict[str, object] = {"lr": 2e-5}, weight_decay: float = 0.01, evaluation_steps: int = 0, output_path: str = None, save_best_model: bool = True, max_grad_norm: float = 1, use_amp: bool = False, callback: Callable[[float, int, int], None] = None, show_progress_bar: bool = True, checkpoint_path: str = None, checkpoint_save_steps: int = 500, checkpoint_save_total_limit: int = 0, ): """ Train the model with the given training objective Each training objective is sampled in turn for one batch. We sample only as many batches from each objective as there are in the smallest one to make sure of equal training with each dataset. :param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning :param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc. :param epochs: Number of epochs for training :param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives. :param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts :param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero. :param optimizer_class: Optimizer :param optimizer_params: Optimizer parameters :param weight_decay: Weight decay for model parameters :param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps :param output_path: Storage path for the model and evaluation files :param save_best_model: If true, the best model (according to evaluator) is stored at output_path :param max_grad_norm: Used for gradient normalization. :param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0 :param callback: Callback function that is invoked after each evaluation. It must accept the following three parameters in this order: `score`, `epoch`, `steps` :param show_progress_bar: If True, output a tqdm progress bar :param checkpoint_path: Folder to save checkpoints during training :param checkpoint_save_steps: Will save a checkpoint after so many steps :param checkpoint_save_total_limit: Total number of checkpoints to store """ ##Add info to model card # info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives]) info_loss_functions = [] for dataloader, loss in train_objectives: info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss)) info_loss_functions = "\n\n".join([text for text in info_loss_functions]) info_fit_parameters = json.dumps( { "evaluator": fullname(evaluator), "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm, }, indent=4, sort_keys=True, ) self._model_card_text = None self._model_card_vars["{TRAINING_SECTION}"] = ModelCardTemplate.__TRAINING_SECTION__.replace( "{LOSS_FUNCTIONS}", info_loss_functions ).replace("{FIT_PARAMETERS}", info_fit_parameters) if use_amp: if is_torch_npu_available(): scaler = torch.npu.amp.GradScaler() else: scaler = torch.cuda.amp.GradScaler() self.to(self.device) dataloaders = [dataloader for dataloader, _ in train_objectives] # Use smart batching for dataloader in dataloaders: dataloader.collate_fn = self.smart_batching_collate loss_models = [loss for _, loss in train_objectives] for loss_model in loss_models: loss_model.to(self.device) self.best_score = -9999999 if steps_per_epoch is None or steps_per_epoch == 0: steps_per_epoch = min([len(dataloader) for dataloader in dataloaders]) num_train_steps = int(steps_per_epoch * epochs) # Prepare optimizers optimizers = [] schedulers = [] for loss_model in loss_models: param_optimizer = list(loss_model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params) scheduler_obj = self._get_scheduler( optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps ) optimizers.append(optimizer) schedulers.append(scheduler_obj) global_step = 0 data_iterators = [iter(dataloader) for dataloader in dataloaders] num_train_objectives = len(train_objectives) skip_scheduler = False for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar): training_steps = 0 for loss_model in loss_models: loss_model.zero_grad() loss_model.train() for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar): for train_idx in range(num_train_objectives): loss_model = loss_models[train_idx] optimizer = optimizers[train_idx] scheduler = schedulers[train_idx] data_iterator = data_iterators[train_idx] try: data = next(data_iterator) except StopIteration: data_iterator = iter(dataloaders[train_idx]) data_iterators[train_idx] = data_iterator data = next(data_iterator) features, labels = data labels = labels.to(self.device) features = list(map(lambda batch: batch_to_device(batch, self.device), features)) if use_amp: with torch.autocast(device_type=self.device.type): loss_value = loss_model(features, labels) scale_before_step = scaler.get_scale() scaler.scale(loss_value).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm) scaler.step(optimizer) scaler.update() skip_scheduler = scaler.get_scale() != scale_before_step else: loss_value = loss_model(features, labels) loss_value.backward() torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm) optimizer.step() optimizer.zero_grad() if not skip_scheduler: scheduler.step() training_steps += 1 global_step += 1 if evaluation_steps > 0 and training_steps % evaluation_steps == 0: self._eval_during_training( evaluator, output_path, save_best_model, epoch, training_steps, callback ) for loss_model in loss_models: loss_model.zero_grad() loss_model.train() if ( checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 and global_step % checkpoint_save_steps == 0 ): self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step) self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback) if evaluator is None and output_path is not None: # No evaluator, but output path: save final model version self.save(output_path) if checkpoint_path is not None: self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
(self, train_objectives: Iterable[Tuple[torch.utils.data.dataloader.DataLoader, torch.nn.modules.module.Module]], evaluator: Optional[sentence_transformers.evaluation.SentenceEvaluator.SentenceEvaluator] = None, epochs: int = 1, steps_per_epoch=None, scheduler: str = 'WarmupLinear', warmup_steps: int = 10000, optimizer_class: Type[torch.optim.optimizer.Optimizer] = <class 'torch.optim.adamw.AdamW'>, optimizer_params: Dict[str, object] = {'lr': 2e-05}, weight_decay: float = 0.01, evaluation_steps: int = 0, output_path: Optional[str] = None, save_best_model: bool = True, max_grad_norm: float = 1, use_amp: bool = False, callback: Optional[Callable[[float, int, int], NoneType]] = None, show_progress_bar: bool = True, checkpoint_path: Optional[str] = None, checkpoint_save_steps: int = 500, checkpoint_save_total_limit: int = 0)
10,050
torch.nn.modules.module
float
Casts all floating point parameters and buffers to ``float`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self
def float(self: T) -> T: r"""Casts all floating point parameters and buffers to ``float`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.float() if t.is_floating_point() else t)
(self: ~T) -> ~T
10,051
torch.nn.modules.container
forward
null
def forward(self, input): for module in self: input = module(input) return input
(self, input)
10,052
torch.nn.modules.module
get_buffer
Return the buffer given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the buffer to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.Tensor: The buffer referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not a buffer
def get_buffer(self, target: str) -> "Tensor": """Return the buffer given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the buffer to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.Tensor: The buffer referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not a buffer """ module_path, _, buffer_name = target.rpartition(".") mod: torch.nn.Module = self.get_submodule(module_path) if not hasattr(mod, buffer_name): raise AttributeError(mod._get_name() + " has no attribute `" + buffer_name + "`") buffer: torch.Tensor = getattr(mod, buffer_name) if buffer_name not in mod._buffers: raise AttributeError("`" + buffer_name + "` is not a buffer") return buffer
(self, target: str) -> torch.Tensor
10,053
torch.nn.modules.module
get_extra_state
Return any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`. Note that extra state should be picklable to ensure working serialization of the state_dict. We only provide provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict
def get_extra_state(self) -> Any: """Return any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`. Note that extra state should be picklable to ensure working serialization of the state_dict. We only provide provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict """ raise RuntimeError( "Reached a code path in Module.get_extra_state() that should never be called. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "to report this bug.")
(self) -> Any
10,054
sentence_transformers.SentenceTransformer
get_max_seq_length
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
def get_max_seq_length(self): """ Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated """ if hasattr(self._first_module(), "max_seq_length"): return self._first_module().max_seq_length return None
(self)
10,055
torch.nn.modules.module
get_parameter
Return the parameter given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the Parameter to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.nn.Parameter: The Parameter referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Parameter``
def get_parameter(self, target: str) -> "Parameter": """Return the parameter given by ``target`` if it exists, otherwise throw an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the Parameter to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.nn.Parameter: The Parameter referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Parameter`` """ module_path, _, param_name = target.rpartition(".") mod: torch.nn.Module = self.get_submodule(module_path) if not hasattr(mod, param_name): raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`") param: torch.nn.Parameter = getattr(mod, param_name) if not isinstance(param, torch.nn.Parameter): raise AttributeError("`" + param_name + "` is not an " "nn.Parameter") return param
(self, target: str) -> torch.nn.parameter.Parameter
10,056
sentence_transformers.SentenceTransformer
get_sentence_embedding_dimension
:return: The number of dimensions in the output of `encode`. If it's not known, it's `None`.
def get_sentence_embedding_dimension(self) -> Optional[int]: """ :return: The number of dimensions in the output of `encode`. If it's not known, it's `None`. """ output_dim = None for mod in reversed(self._modules.values()): sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None) if callable(sent_embedding_dim_method): output_dim = sent_embedding_dim_method() break if self.truncate_dim is not None: # The user requested truncation. If they set it to a dim greater than output_dim, # no truncation will actually happen. So return output_dim insead of self.truncate_dim return min(output_dim or np.inf, self.truncate_dim) return output_dim
(self) -> Optional[int]
10,057
sentence_transformers.SentenceTransformer
get_sentence_features
null
def get_sentence_features(self, *features): return self._first_module().get_sentence_features(*features)
(self, *features)
10,058
torch.nn.modules.module
get_submodule
Return the submodule given by ``target`` if it exists, otherwise throw an error. For example, let's say you have an ``nn.Module`` ``A`` that looks like this: .. code-block:: text A( (net_b): Module( (net_c): Module( (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2)) ) (linear): Linear(in_features=100, out_features=200, bias=True) ) ) (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested submodule ``net_b``, which itself has two submodules ``net_c`` and ``linear``. ``net_c`` then has a submodule ``conv``.) To check whether or not we have the ``linear`` submodule, we would call ``get_submodule("net_b.linear")``. To check whether we have the ``conv`` submodule, we would call ``get_submodule("net_b.net_c.conv")``. The runtime of ``get_submodule`` is bounded by the degree of module nesting in ``target``. A query against ``named_modules`` achieves the same result, but it is O(N) in the number of transitive modules. So, for a simple check to see if some submodule exists, ``get_submodule`` should always be used. Args: target: The fully-qualified string name of the submodule to look for. (See above example for how to specify a fully-qualified string.) Returns: torch.nn.Module: The submodule referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Module``
def get_submodule(self, target: str) -> "Module": """Return the submodule given by ``target`` if it exists, otherwise throw an error. For example, let's say you have an ``nn.Module`` ``A`` that looks like this: .. code-block:: text A( (net_b): Module( (net_c): Module( (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2)) ) (linear): Linear(in_features=100, out_features=200, bias=True) ) ) (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested submodule ``net_b``, which itself has two submodules ``net_c`` and ``linear``. ``net_c`` then has a submodule ``conv``.) To check whether or not we have the ``linear`` submodule, we would call ``get_submodule("net_b.linear")``. To check whether we have the ``conv`` submodule, we would call ``get_submodule("net_b.net_c.conv")``. The runtime of ``get_submodule`` is bounded by the degree of module nesting in ``target``. A query against ``named_modules`` achieves the same result, but it is O(N) in the number of transitive modules. So, for a simple check to see if some submodule exists, ``get_submodule`` should always be used. Args: target: The fully-qualified string name of the submodule to look for. (See above example for how to specify a fully-qualified string.) Returns: torch.nn.Module: The submodule referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Module`` """ if target == "": return self atoms: List[str] = target.split(".") mod: torch.nn.Module = self for item in atoms: if not hasattr(mod, item): raise AttributeError(mod._get_name() + " has no " "attribute `" + item + "`") mod = getattr(mod, item) if not isinstance(mod, torch.nn.Module): raise AttributeError("`" + item + "` is not " "an nn.Module") return mod
(self, target: str) -> torch.nn.modules.module.Module
10,059
torch.nn.modules.module
half
Casts all floating point parameters and buffers to ``half`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self
def half(self: T) -> T: r"""Casts all floating point parameters and buffers to ``half`` datatype. .. note:: This method modifies the module in-place. Returns: Module: self """ return self._apply(lambda t: t.half() if t.is_floating_point() else t)
(self: ~T) -> ~T
10,060
torch.nn.modules.container
insert
null
def insert(self, index: int, module: Module) -> 'Sequential': if not isinstance(module, Module): raise AssertionError( f'module should be of type: {Module}') n = len(self._modules) if not (-n <= index <= n): raise IndexError( f'Index out of range: {index}') if index < 0: index += n for i in range(n, index, -1): self._modules[str(i)] = self._modules[str(i - 1)] self._modules[str(index)] = module return self
(self, index: int, module: torch.nn.modules.module.Module) -> torch.nn.modules.container.Sequential
10,061
torch.nn.modules.module
ipu
Move all model parameters and buffers to the IPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on IPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self
def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the IPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on IPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.ipu(device))
(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T
10,062
sentence_transformers.SentenceTransformer
load
null
@staticmethod def load(input_path): return SentenceTransformer(input_path)
(input_path)
10,063
torch.nn.modules.module
load_state_dict
Copy parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict` unless :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): When ``False``, the properties of the tensors in the current module are preserved while when ``True``, the properties of the Tensors in the state dict are preserved. The only exception is the ``requires_grad`` field of :class:`~torch.nn.Parameter`s for which the value from the module is preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``.
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False): r"""Copy parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict` unless :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): When ``False``, the properties of the tensors in the current module are preserved while when ``True``, the properties of the Tensors in the state dict are preserved. The only exception is the ``requires_grad`` field of :class:`~torch.nn.Parameter`s for which the value from the module is preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.") missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = OrderedDict(state_dict) if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] def load(module, local_state_dict, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) if assign: local_metadata['assign_to_params_buffers'] = assign module._load_from_state_dict( local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: child_prefix = prefix + name + '.' child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} load(child, child_state_dict, child_prefix) # noqa: F821 # Note that the hook can modify missing_keys and unexpected_keys. incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys) for hook in module._load_state_dict_post_hooks.values(): out = hook(module, incompatible_keys) assert out is None, ( "Hooks registered with ``register_load_state_dict_post_hook`` are not" "expected to return new values, if incompatible_keys need to be modified," "it should be done inplace." ) load(self, state_dict) del load if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, 'Unexpected key(s) in state_dict: {}. '.format( ', '.join(f'"{k}"' for k in unexpected_keys))) if len(missing_keys) > 0: error_msgs.insert( 0, 'Missing key(s) in state_dict: {}. '.format( ', '.join(f'"{k}"' for k in missing_keys))) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( self.__class__.__name__, "\n\t".join(error_msgs))) return _IncompatibleKeys(missing_keys, unexpected_keys)
(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
10,064
torch.nn.modules.module
modules
Return an iterator over all modules in the network. Yields: Module: a module in the network Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.modules()): ... print(idx, '->', m) 0 -> Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) 1 -> Linear(in_features=2, out_features=2, bias=True)
def modules(self) -> Iterator['Module']: r"""Return an iterator over all modules in the network. Yields: Module: a module in the network Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.modules()): ... print(idx, '->', m) 0 -> Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) 1 -> Linear(in_features=2, out_features=2, bias=True) """ for _, module in self.named_modules(): yield module
(self) -> Iterator[torch.nn.modules.module.Module]
10,065
torch.nn.modules.module
named_buffers
Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, torch.Tensor): Tuple containing the name and buffer Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size())
def named_buffers(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[Tuple[str, Tensor]]: r"""Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, torch.Tensor): Tuple containing the name and buffer Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size()) """ gen = self._named_members( lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate) yield from gen
(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[Tuple[str, torch.Tensor]]
10,066
torch.nn.modules.module
named_children
Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module)
def named_children(self) -> Iterator[Tuple[str, 'Module']]: r"""Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module) """ memo = set() for name, module in self._modules.items(): if module is not None and module not in memo: memo.add(module) yield name, module
(self) -> Iterator[Tuple[str, torch.nn.modules.module.Module]]
10,067
torch.nn.modules.module
named_modules
Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.named_modules()): ... print(idx, '->', m) 0 -> ('', Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )) 1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = '', remove_duplicate: bool = True): r"""Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.named_modules()): ... print(idx, '->', m) 0 -> ('', Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )) 1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) """ if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield prefix, self for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ('.' if prefix else '') + name yield from module.named_modules(memo, submodule_prefix, remove_duplicate)
(self, memo: Optional[Set[torch.nn.modules.module.Module]] = None, prefix: str = '', remove_duplicate: bool = True)
10,068
torch.nn.modules.module
named_parameters
Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. remove_duplicate (bool, optional): whether to remove the duplicated parameters in the result. Defaults to True. Yields: (str, Parameter): Tuple containing the name and parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, param in self.named_parameters(): >>> if name in ['bias']: >>> print(param.size())
def named_parameters( self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, Parameter]]: r"""Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. remove_duplicate (bool, optional): whether to remove the duplicated parameters in the result. Defaults to True. Yields: (str, Parameter): Tuple containing the name and parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for name, param in self.named_parameters(): >>> if name in ['bias']: >>> print(param.size()) """ gen = self._named_members( lambda module: module._parameters.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate) yield from gen
(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[Tuple[str, torch.nn.parameter.Parameter]]
10,069
torch.nn.modules.module
parameters
Return an iterator over module parameters. This is typically passed to an optimizer. Args: recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: Parameter: module parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for param in model.parameters(): >>> print(type(param), param.size()) <class 'torch.Tensor'> (20L,) <class 'torch.Tensor'> (20L, 1L, 5L, 5L)
def parameters(self, recurse: bool = True) -> Iterator[Parameter]: r"""Return an iterator over module parameters. This is typically passed to an optimizer. Args: recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: Parameter: module parameter Example:: >>> # xdoctest: +SKIP("undefined vars") >>> for param in model.parameters(): >>> print(type(param), param.size()) <class 'torch.Tensor'> (20L,) <class 'torch.Tensor'> (20L, 1L, 5L, 5L) """ for name, param in self.named_parameters(recurse=recurse): yield param
(self, recurse: bool = True) -> Iterator[torch.nn.parameter.Parameter]
10,070
torch.nn.modules.container
pop
null
def pop(self, key: Union[int, slice]) -> Module: v = self[key] del self[key] return v
(self, key: Union[int, slice]) -> torch.nn.modules.module.Module
10,071
sentence_transformers.SentenceTransformer
push_to_hub
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository. :param repo_id: Repository name for your model in the Hub, including the user or organization. :param token: An authentication token (See https://huggingface.co/settings/token) :param private: Set to true, for hosting a private model :param safe_serialization: If true, save the model using safetensors. If false, save the model the traditional PyTorch way :param commit_message: Message to commit while pushing. :param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded :param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible :param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card :param train_datasets: Datasets used to train the model. If set, the datasets will be added to the model card in the Hub. :return: The url of the commit of your model in the repository on the Hugging Face Hub.
def push_to_hub( self, repo_id: str, token: Optional[str] = None, private: Optional[bool] = None, safe_serialization: bool = True, commit_message: str = "Add new SentenceTransformer model.", local_model_path: Optional[str] = None, exist_ok: bool = False, replace_model_card: bool = False, train_datasets: Optional[List[str]] = None, ) -> str: """ Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository. :param repo_id: Repository name for your model in the Hub, including the user or organization. :param token: An authentication token (See https://huggingface.co/settings/token) :param private: Set to true, for hosting a private model :param safe_serialization: If true, save the model using safetensors. If false, save the model the traditional PyTorch way :param commit_message: Message to commit while pushing. :param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded :param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible :param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card :param train_datasets: Datasets used to train the model. If set, the datasets will be added to the model card in the Hub. :return: The url of the commit of your model in the repository on the Hugging Face Hub. """ api = HfApi(token=token) repo_url = api.create_repo( repo_id=repo_id, private=private, repo_type=None, exist_ok=exist_ok, ) repo_id = repo_url.repo_id # Update the repo_id in case the old repo_id didn't contain a user or organization if local_model_path: folder_url = api.upload_folder( repo_id=repo_id, folder_path=local_model_path, commit_message=commit_message ) else: with tempfile.TemporaryDirectory() as tmp_dir: create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, "README.md")) self.save( tmp_dir, model_name=repo_url.repo_id, create_model_card=create_model_card, train_datasets=train_datasets, safe_serialization=safe_serialization, ) folder_url = api.upload_folder(repo_id=repo_id, folder_path=tmp_dir, commit_message=commit_message) refs = api.list_repo_refs(repo_id=repo_id) for branch in refs.branches: if branch.name == "main": return f"https://huggingface.co/{repo_id}/commit/{branch.target_commit}" # This isn't expected to ever be reached. return folder_url
(self, repo_id: str, token: Optional[str] = None, private: Optional[bool] = None, safe_serialization: bool = True, commit_message: str = 'Add new SentenceTransformer model.', local_model_path: Optional[str] = None, exist_ok: bool = False, replace_model_card: bool = False, train_datasets: Optional[List[str]] = None) -> str
10,072
torch.nn.modules.module
register_backward_hook
Register a backward hook on the module. This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_backward_hook( self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] ) -> RemovableHandle: r"""Register a backward hook on the module. This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ if self._is_full_backward_hook is True: raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a " "single Module. Please use only one of them.") self._is_full_backward_hook = False handle = hooks.RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook return handle
(self, hook: Callable[[torch.nn.modules.module.Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, Tuple[torch.Tensor, ...], torch.Tensor]]) -> torch.utils.hooks.RemovableHandle
10,073
torch.nn.modules.module
register_buffer
Add a buffer to the module. This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's ``running_mean`` is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:`persistent` to ``False``. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:`state_dict`. Buffers can be accessed as attributes using given names. Args: name (str): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor or None): buffer to be registered. If ``None``, then operations that run on buffers, such as :attr:`cuda`, are ignored. If ``None``, the buffer is **not** included in the module's :attr:`state_dict`. persistent (bool): whether the buffer is part of this module's :attr:`state_dict`. Example:: >>> # xdoctest: +SKIP("undefined vars") >>> self.register_buffer('running_mean', torch.zeros(num_features))
def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool = True) -> None: r"""Add a buffer to the module. This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's ``running_mean`` is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:`persistent` to ``False``. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:`state_dict`. Buffers can be accessed as attributes using given names. Args: name (str): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor or None): buffer to be registered. If ``None``, then operations that run on buffers, such as :attr:`cuda`, are ignored. If ``None``, the buffer is **not** included in the module's :attr:`state_dict`. persistent (bool): whether the buffer is part of this module's :attr:`state_dict`. Example:: >>> # xdoctest: +SKIP("undefined vars") >>> self.register_buffer('running_mean', torch.zeros(num_features)) """ if persistent is False and isinstance(self, torch.jit.ScriptModule): raise RuntimeError("ScriptModule does not support non-persistent buffers") if '_buffers' not in self.__dict__: raise AttributeError( "cannot assign buffer before Module.__init__() call") elif not isinstance(name, str): raise TypeError(f"buffer name should be a string. Got {torch.typename(name)}") elif '.' in name: raise KeyError("buffer name can't contain \".\"") elif name == '': raise KeyError("buffer name can't be empty string \"\"") elif hasattr(self, name) and name not in self._buffers: raise KeyError(f"attribute '{name}' already exists") elif tensor is not None and not isinstance(tensor, torch.Tensor): raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' " "(torch Tensor or None required)" ) else: for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, tensor) if output is not None: tensor = output self._buffers[name] = tensor if persistent: self._non_persistent_buffers_set.discard(name) else: self._non_persistent_buffers_set.add(name)
(self, name: str, tensor: Optional[torch.Tensor], persistent: bool = True) -> NoneType
10,074
torch.nn.modules.module
register_forward_hook
Register a forward hook on the module. The hook will be called every time after :func:`forward` has computed an output. If ``with_kwargs`` is ``False`` or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after :func:`forward` is called. The hook should have the following signature:: hook(module, args, output) -> None or modified output If ``with_kwargs`` is ``True``, the forward hook will be passed the ``kwargs`` given to the forward function and be expected to return the output possibly modified. The hook should have the following signature:: hook(module, args, kwargs, output) -> None or modified output Args: hook (Callable): The user defined hook to be registered. prepend (bool): If ``True``, the provided ``hook`` will be fired before all existing ``forward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``forward`` hooks registered with :func:`register_module_forward_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If ``True``, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` always_call (bool): If ``True`` the ``hook`` will be run regardless of whether an exception is raised while calling the Module. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_forward_hook( self, hook: Union[ Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]], ], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False, ) -> RemovableHandle: r"""Register a forward hook on the module. The hook will be called every time after :func:`forward` has computed an output. If ``with_kwargs`` is ``False`` or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after :func:`forward` is called. The hook should have the following signature:: hook(module, args, output) -> None or modified output If ``with_kwargs`` is ``True``, the forward hook will be passed the ``kwargs`` given to the forward function and be expected to return the output possibly modified. The hook should have the following signature:: hook(module, args, kwargs, output) -> None or modified output Args: hook (Callable): The user defined hook to be registered. prepend (bool): If ``True``, the provided ``hook`` will be fired before all existing ``forward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``forward`` hooks registered with :func:`register_module_forward_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If ``True``, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` always_call (bool): If ``True`` the ``hook`` will be run regardless of whether an exception is raised while calling the Module. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = hooks.RemovableHandle( self._forward_hooks, extra_dict=[self._forward_hooks_with_kwargs, self._forward_hooks_always_called], ) self._forward_hooks[handle.id] = hook if with_kwargs: self._forward_hooks_with_kwargs[handle.id] = True if always_call: self._forward_hooks_always_called[handle.id] = True if prepend: self._forward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle
(self, hook: Union[Callable[[~T, Tuple[Any, ...], Any], Optional[Any]], Callable[[~T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) -> torch.utils.hooks.RemovableHandle
10,075
torch.nn.modules.module
register_forward_pre_hook
Register a forward pre-hook on the module. The hook will be called every time before :func:`forward` is invoked. If ``with_kwargs`` is false or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned (unless that value is already a tuple). The hook should have the following signature:: hook(module, args) -> None or modified input If ``with_kwargs`` is true, the forward pre-hook will be passed the kwargs given to the forward function. And if the hook modifies the input, both the args and kwargs should be returned. The hook should have the following signature:: hook(module, args, kwargs) -> None or a tuple of modified input and kwargs Args: hook (Callable): The user defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``forward_pre`` hooks registered with :func:`register_module_forward_pre_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If true, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_forward_pre_hook( self, hook: Union[ Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]], ], *, prepend: bool = False, with_kwargs: bool = False, ) -> RemovableHandle: r"""Register a forward pre-hook on the module. The hook will be called every time before :func:`forward` is invoked. If ``with_kwargs`` is false or not specified, the input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the ``forward``. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned (unless that value is already a tuple). The hook should have the following signature:: hook(module, args) -> None or modified input If ``with_kwargs`` is true, the forward pre-hook will be passed the kwargs given to the forward function. And if the hook modifies the input, both the args and kwargs should be returned. The hook should have the following signature:: hook(module, args, kwargs) -> None or a tuple of modified input and kwargs Args: hook (Callable): The user defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``forward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``forward_pre`` hooks registered with :func:`register_module_forward_pre_hook` will fire before all hooks registered by this method. Default: ``False`` with_kwargs (bool): If true, the ``hook`` will be passed the kwargs given to the forward function. Default: ``False`` Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = hooks.RemovableHandle( self._forward_pre_hooks, extra_dict=self._forward_pre_hooks_with_kwargs ) self._forward_pre_hooks[handle.id] = hook if with_kwargs: self._forward_pre_hooks_with_kwargs[handle.id] = True if prepend: self._forward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle
(self, hook: Union[Callable[[~T, Tuple[Any, ...]], Optional[Any]], Callable[[~T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) -> torch.utils.hooks.RemovableHandle
10,076
torch.nn.modules.module
register_full_backward_hook
Register a backward hook on the module. The hook will be called every time the gradients with respect to a module are computed, i.e. the hook will execute if and only if the gradients with respect to module outputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``backward`` hooks registered with :func:`register_module_full_backward_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_full_backward_hook( self, hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]], prepend: bool = False, ) -> RemovableHandle: r"""Register a backward hook on the module. The hook will be called every time the gradients with respect to a module are computed, i.e. the hook will execute if and only if the gradients with respect to module outputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``backward`` hooks registered with :func:`register_module_full_backward_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ if self._is_full_backward_hook is False: raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a " "single Module. Please use only one of them.") self._is_full_backward_hook = True handle = hooks.RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook if prepend: self._backward_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle
(self, hook: Callable[[torch.nn.modules.module.Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) -> torch.utils.hooks.RemovableHandle
10,077
torch.nn.modules.module
register_full_backward_pre_hook
Register a backward pre-hook on the module. The hook will be called every time the gradients for the module are computed. The hook should have the following signature:: hook(module, grad_output) -> tuple[Tensor] or None The :attr:`grad_output` is a tuple. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the output that will be used in place of :attr:`grad_output` in subsequent computations. Entries in :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``backward_pre`` hooks registered with :func:`register_module_full_backward_pre_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_full_backward_pre_hook( self, hook: Callable[["Module", _grad_t], Union[None, _grad_t]], prepend: bool = False, ) -> RemovableHandle: r"""Register a backward pre-hook on the module. The hook will be called every time the gradients for the module are computed. The hook should have the following signature:: hook(module, grad_output) -> tuple[Tensor] or None The :attr:`grad_output` is a tuple. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the output that will be used in place of :attr:`grad_output` in subsequent computations. Entries in :attr:`grad_output` will be ``None`` for all non-Tensor arguments. For technical reasons, when this hook is applied to a Module, its forward function will receive a view of each Tensor passed to the Module. Similarly the caller will receive a view of each Tensor returned by the Module's forward function. .. warning :: Modifying inputs inplace is not allowed when using backward hooks and will raise an error. Args: hook (Callable): The user-defined hook to be registered. prepend (bool): If true, the provided ``hook`` will be fired before all existing ``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. Otherwise, the provided ``hook`` will be fired after all existing ``backward_pre`` hooks on this :class:`torch.nn.modules.Module`. Note that global ``backward_pre`` hooks registered with :func:`register_module_full_backward_pre_hook` will fire before all hooks registered by this method. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = hooks.RemovableHandle(self._backward_pre_hooks) self._backward_pre_hooks[handle.id] = hook if prepend: self._backward_pre_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] return handle
(self, hook: Callable[[torch.nn.modules.module.Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) -> torch.utils.hooks.RemovableHandle
10,078
torch.nn.modules.module
register_load_state_dict_post_hook
Register a post hook to be run after module's ``load_state_dict`` is called. It should have the following signature:: hook(module, incompatible_keys) -> None The ``module`` argument is the current module that this hook is registered on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys`` is a ``list`` of ``str`` containing the missing keys and ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys. The given incompatible_keys can be modified inplace if needed. Note that the checks performed when calling :func:`load_state_dict` with ``strict=True`` are affected by modifications the hook makes to ``missing_keys`` or ``unexpected_keys``, as expected. Additions to either set of keys will result in an error being thrown when ``strict=True``, and clearing out both missing and unexpected keys will avoid an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()``
def register_load_state_dict_post_hook(self, hook): r"""Register a post hook to be run after module's ``load_state_dict`` is called. It should have the following signature:: hook(module, incompatible_keys) -> None The ``module`` argument is the current module that this hook is registered on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys`` is a ``list`` of ``str`` containing the missing keys and ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys. The given incompatible_keys can be modified inplace if needed. Note that the checks performed when calling :func:`load_state_dict` with ``strict=True`` are affected by modifications the hook makes to ``missing_keys`` or ``unexpected_keys``, as expected. Additions to either set of keys will result in an error being thrown when ``strict=True``, and clearing out both missing and unexpected keys will avoid an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` """ handle = hooks.RemovableHandle(self._load_state_dict_post_hooks) self._load_state_dict_post_hooks[handle.id] = hook return handle
(self, hook)
10,079
torch.nn.modules.module
register_module
Alias for :func:`add_module`.
def register_module(self, name: str, module: Optional['Module']) -> None: r"""Alias for :func:`add_module`.""" self.add_module(name, module)
(self, name: str, module: Optional[torch.nn.modules.module.Module]) -> NoneType
10,080
torch.nn.modules.module
register_parameter
Add a parameter to the module. The parameter can be accessed as an attribute using given name. Args: name (str): name of the parameter. The parameter can be accessed from this module using the given name param (Parameter or None): parameter to be added to the module. If ``None``, then operations that run on parameters, such as :attr:`cuda`, are ignored. If ``None``, the parameter is **not** included in the module's :attr:`state_dict`.
def register_parameter(self, name: str, param: Optional[Parameter]) -> None: r"""Add a parameter to the module. The parameter can be accessed as an attribute using given name. Args: name (str): name of the parameter. The parameter can be accessed from this module using the given name param (Parameter or None): parameter to be added to the module. If ``None``, then operations that run on parameters, such as :attr:`cuda`, are ignored. If ``None``, the parameter is **not** included in the module's :attr:`state_dict`. """ if '_parameters' not in self.__dict__: raise AttributeError( "cannot assign parameter before Module.__init__() call") elif not isinstance(name, str): raise TypeError(f"parameter name should be a string. Got {torch.typename(name)}") elif '.' in name: raise KeyError("parameter name can't contain \".\"") elif name == '': raise KeyError("parameter name can't be empty string \"\"") elif hasattr(self, name) and name not in self._parameters: raise KeyError(f"attribute '{name}' already exists") if param is None: self._parameters[name] = None elif not isinstance(param, Parameter): raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' " "(torch.nn.Parameter or None required)" ) elif param.grad_fn: raise ValueError( f"Cannot assign non-leaf Tensor to parameter '{name}'. Model " f"parameters must be created explicitly. To express '{name}' " "as a function of another Tensor, compute the value in " "the forward() method.") else: for hook in _global_parameter_registration_hooks.values(): output = hook(self, name, param) if output is not None: param = output self._parameters[name] = param
(self, name: str, param: Optional[torch.nn.parameter.Parameter]) -> NoneType
10,081
torch.nn.modules.module
register_state_dict_pre_hook
Register a pre-hook for the :meth:`~torch.nn.Module.state_dict` method. These hooks will be called with arguments: ``self``, ``prefix``, and ``keep_vars`` before calling ``state_dict`` on ``self``. The registered hooks can be used to perform pre-processing before the ``state_dict`` call is made.
def register_state_dict_pre_hook(self, hook): r"""Register a pre-hook for the :meth:`~torch.nn.Module.state_dict` method. These hooks will be called with arguments: ``self``, ``prefix``, and ``keep_vars`` before calling ``state_dict`` on ``self``. The registered hooks can be used to perform pre-processing before the ``state_dict`` call is made. """ handle = hooks.RemovableHandle(self._state_dict_pre_hooks) self._state_dict_pre_hooks[handle.id] = hook return handle
(self, hook)
10,082
torch.nn.modules.module
requires_grad_
Change if autograd should record operations on parameters in this module. This method sets the parameters' :attr:`requires_grad` attributes in-place. This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training). See :ref:`locally-disable-grad-doc` for a comparison between `.requires_grad_()` and several similar mechanisms that may be confused with it. Args: requires_grad (bool): whether autograd should record operations on parameters in this module. Default: ``True``. Returns: Module: self
def requires_grad_(self: T, requires_grad: bool = True) -> T: r"""Change if autograd should record operations on parameters in this module. This method sets the parameters' :attr:`requires_grad` attributes in-place. This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training). See :ref:`locally-disable-grad-doc` for a comparison between `.requires_grad_()` and several similar mechanisms that may be confused with it. Args: requires_grad (bool): whether autograd should record operations on parameters in this module. Default: ``True``. Returns: Module: self """ for p in self.parameters(): p.requires_grad_(requires_grad) return self
(self: ~T, requires_grad: bool = True) -> ~T
10,083
sentence_transformers.SentenceTransformer
save
Saves all elements for this seq. sentence embedder into different sub-folders :param path: Path on disc :param model_name: Optional model name :param create_model_card: If True, create a README.md with basic information about this model :param train_datasets: Optional list with the names of the datasets used to to train the model :param safe_serialization: If true, save the model using safetensors. If false, save the model the traditional PyTorch way
def save( self, path: str, model_name: Optional[str] = None, create_model_card: bool = True, train_datasets: Optional[List[str]] = None, safe_serialization: bool = True, ): """ Saves all elements for this seq. sentence embedder into different sub-folders :param path: Path on disc :param model_name: Optional model name :param create_model_card: If True, create a README.md with basic information about this model :param train_datasets: Optional list with the names of the datasets used to to train the model :param safe_serialization: If true, save the model using safetensors. If false, save the model the traditional PyTorch way """ if path is None: return os.makedirs(path, exist_ok=True) logger.info("Save model to {}".format(path)) modules_config = [] # Save some model info if "__version__" not in self._model_config: self._model_config["__version__"] = { "sentence_transformers": __version__, "transformers": transformers.__version__, "pytorch": torch.__version__, } with open(os.path.join(path, "config_sentence_transformers.json"), "w") as fOut: config = self._model_config.copy() config["prompts"] = self.prompts config["default_prompt_name"] = self.default_prompt_name json.dump(config, fOut, indent=2) # Save modules for idx, name in enumerate(self._modules): module = self._modules[name] if idx == 0 and isinstance(module, Transformer): # Save transformer model in the main folder model_path = path + "/" else: model_path = os.path.join(path, str(idx) + "_" + type(module).__name__) os.makedirs(model_path, exist_ok=True) if isinstance(module, Transformer): module.save(model_path, safe_serialization=safe_serialization) else: module.save(model_path) modules_config.append( {"idx": idx, "name": name, "path": os.path.basename(model_path), "type": type(module).__module__} ) with open(os.path.join(path, "modules.json"), "w") as fOut: json.dump(modules_config, fOut, indent=2) # Create model card if create_model_card: self._create_model_card(path, model_name, train_datasets)
(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True, train_datasets: Optional[List[str]] = None, safe_serialization: bool = True)
10,084
sentence_transformers.SentenceTransformer
save_to_hub
DEPRECATED, use `push_to_hub` instead. Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository. :param repo_id: Repository name for your model in the Hub, including the user or organization. :param token: An authentication token (See https://huggingface.co/settings/token) :param private: Set to true, for hosting a private model :param safe_serialization: If true, save the model using safetensors. If false, save the model the traditional PyTorch way :param commit_message: Message to commit while pushing. :param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded :param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible :param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card :param train_datasets: Datasets used to train the model. If set, the datasets will be added to the model card in the Hub. :param organization: Deprecated. Organization in which you want to push your model or tokenizer (you must be a member of this organization). :return: The url of the commit of your model in the repository on the Hugging Face Hub.
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]): """ Tokenizes the texts """ kwargs = {} # HPU models reach optimal performance if the padding is not dynamic if self.device.type == "hpu": kwargs["padding"] = "max_length" try: return self._first_module().tokenize(texts, **kwargs) except TypeError: # In case some Module does not allow for kwargs in tokenize, we also try without any return self._first_module().tokenize(texts)
(self, repo_id: str, organization: Optional[str] = None, token: Optional[str] = None, private: Optional[bool] = None, safe_serialization: bool = True, commit_message: str = 'Add new SentenceTransformer model.', local_model_path: Optional[str] = None, exist_ok: bool = False, replace_model_card: bool = False, train_datasets: Optional[List[str]] = None) -> str
10,085
torch.nn.modules.module
set_extra_state
Set extra state contained in the loaded `state_dict`. This function is called from :func:`load_state_dict` to handle any extra state found within the `state_dict`. Implement this function and a corresponding :func:`get_extra_state` for your module if you need to store extra state within its `state_dict`. Args: state (dict): Extra state from the `state_dict`
def set_extra_state(self, state: Any) -> None: """Set extra state contained in the loaded `state_dict`. This function is called from :func:`load_state_dict` to handle any extra state found within the `state_dict`. Implement this function and a corresponding :func:`get_extra_state` for your module if you need to store extra state within its `state_dict`. Args: state (dict): Extra state from the `state_dict` """ raise RuntimeError( "Reached a code path in Module.set_extra_state() that should never be called. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "to report this bug.")
(self, state: Any) -> NoneType
10,086
sentence_transformers.SentenceTransformer
set_pooling_include_prompt
Sets the `include_prompt` attribute in the pooling layer in the model, if there is one. :param include_prompt: Whether to include the prompt in the pooling layer.
def set_pooling_include_prompt(self, include_prompt: bool) -> None: """ Sets the `include_prompt` attribute in the pooling layer in the model, if there is one. :param include_prompt: Whether to include the prompt in the pooling layer. """ for module in self: if isinstance(module, Pooling): module.include_prompt = include_prompt break
(self, include_prompt: bool) -> NoneType
10,087
torch.nn.modules.module
share_memory
See :meth:`torch.Tensor.share_memory_`.
def share_memory(self: T) -> T: r"""See :meth:`torch.Tensor.share_memory_`.""" return self._apply(lambda t: t.share_memory_())
(self: ~T) -> ~T
10,088
sentence_transformers.SentenceTransformer
smart_batching_collate
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model Here, batch is a list of InputExample instances: [InputExample(...), ...] :param batch: a batch from a SmartBatchingDataset :return: a batch of tensors for the model
def smart_batching_collate(self, batch: List["InputExample"]) -> Tuple[List[Dict[str, Tensor]], Tensor]: """ Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model Here, batch is a list of InputExample instances: [InputExample(...), ...] :param batch: a batch from a SmartBatchingDataset :return: a batch of tensors for the model """ texts = [example.texts for example in batch] sentence_features = [self.tokenize(sentence) for sentence in zip(*texts)] labels = torch.tensor([example.label for example in batch]) return sentence_features, labels
(self, batch: List[ForwardRef('InputExample')]) -> Tuple[List[Dict[str, torch.Tensor]], torch.Tensor]
10,089
sentence_transformers.SentenceTransformer
start_multi_process_pool
Starts multi process to process the encoding with several, independent processes. This method is recommended if you want to encode on multiple GPUs or CPUs. It is advised to start only one process per GPU. This method works together with encode_multi_process and stop_multi_process_pool. :param target_devices: PyTorch target devices, e.g. ["cuda:0", "cuda:1", ...], ["npu:0", "npu:1", ...] or ["cpu", "cpu", "cpu", "cpu"]. If target_devices is None and CUDA/NPU is available, then all available CUDA/NPU devices will be used. If target_devices is None and CUDA/NPU is not available, then 4 CPU devices will be used. :return: Returns a dict with the target processes, an input queue and and output queue.
def start_multi_process_pool(self, target_devices: List[str] = None): """ Starts multi process to process the encoding with several, independent processes. This method is recommended if you want to encode on multiple GPUs or CPUs. It is advised to start only one process per GPU. This method works together with encode_multi_process and stop_multi_process_pool. :param target_devices: PyTorch target devices, e.g. ["cuda:0", "cuda:1", ...], ["npu:0", "npu:1", ...] or ["cpu", "cpu", "cpu", "cpu"]. If target_devices is None and CUDA/NPU is available, then all available CUDA/NPU devices will be used. If target_devices is None and CUDA/NPU is not available, then 4 CPU devices will be used. :return: Returns a dict with the target processes, an input queue and and output queue. """ if target_devices is None: if torch.cuda.is_available(): target_devices = ["cuda:{}".format(i) for i in range(torch.cuda.device_count())] elif is_torch_npu_available(): target_devices = ["npu:{}".format(i) for i in range(torch.npu.device_count())] else: logger.info("CUDA/NPU is not available. Starting 4 CPU workers") target_devices = ["cpu"] * 4 logger.info("Start multi-process pool on devices: {}".format(", ".join(map(str, target_devices)))) self.to("cpu") self.share_memory() ctx = mp.get_context("spawn") input_queue = ctx.Queue() output_queue = ctx.Queue() processes = [] for device_id in target_devices: p = ctx.Process( target=SentenceTransformer._encode_multi_process_worker, args=(device_id, self, input_queue, output_queue), daemon=True, ) p.start() processes.append(p) return {"input": input_queue, "output": output_queue, "processes": processes}
(self, target_devices: Optional[List[str]] = None)
10,090
torch.nn.modules.module
state_dict
Return a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~torch.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight']
def state_dict(self, *args, destination=None, prefix='', keep_vars=False): r"""Return a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~torch.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight'] """ # TODO: Remove `args` and the parsing logic when BC allows. if len(args) > 0: if destination is None: destination = args[0] if len(args) > 1 and prefix == '': prefix = args[1] if len(args) > 2 and keep_vars is False: keep_vars = args[2] # DeprecationWarning is ignored by default warnings.warn( "Positional args are being deprecated, use kwargs instead. Refer to " "https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict" " for details.") if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() local_metadata = dict(version=self._version) if hasattr(destination, "_metadata"): destination._metadata[prefix[:-1]] = local_metadata for hook in self._state_dict_pre_hooks.values(): hook(self, prefix, keep_vars) self._save_to_state_dict(destination, prefix, keep_vars) for name, module in self._modules.items(): if module is not None: module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars) for hook in self._state_dict_hooks.values(): hook_result = hook(self, destination, prefix, local_metadata) if hook_result is not None: destination = hook_result return destination
(self, *args, destination=None, prefix='', keep_vars=False)
10,091
sentence_transformers.SentenceTransformer
stop_multi_process_pool
Stops all processes started with start_multi_process_pool
@staticmethod def stop_multi_process_pool(pool): """ Stops all processes started with start_multi_process_pool """ for p in pool["processes"]: p.terminate() for p in pool["processes"]: p.join() p.close() pool["input"].close() pool["output"].close()
(pool)
10,092
torch.nn.modules.module
to
Move and/or cast the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) :noindex: .. function:: to(dtype, non_blocking=False) :noindex: .. function:: to(tensor, non_blocking=False) :noindex: .. function:: to(memory_format=torch.channels_last) :noindex: Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`\ s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
def to(self, *args, **kwargs): r"""Move and/or cast the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) :noindex: .. function:: to(dtype, non_blocking=False) :noindex: .. function:: to(tensor, non_blocking=False) :noindex: .. function:: to(memory_format=torch.channels_last) :noindex: Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`\ s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) """ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) if dtype is not None: if not (dtype.is_floating_point or dtype.is_complex): raise TypeError('nn.Module.to only accepts floating point or complex ' f'dtypes, but got desired dtype={dtype}') if dtype.is_complex: warnings.warn( "Complex modules are a new feature under active development whose design may change, " "and some modules might not work as expected when using complex tensors as parameters or buffers. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml " "if a complex module does not work as expected.") def convert(t): try: if convert_to_format is not None and t.dim() in (4, 5): return t.to( device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking, memory_format=convert_to_format, ) return t.to( device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking, ) except NotImplementedError as e: if str(e) == "Cannot copy out of meta tensor; no data!": raise NotImplementedError( f"{e} Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() " f"when moving module from meta to a different device." ) from None else: raise return self._apply(convert)
(self, *args, **kwargs)
10,093
torch.nn.modules.module
to_empty
Move the parameters and buffers to the specified device without copying storage. Args: device (:class:`torch.device`): The desired device of the parameters and buffers in this module. recurse (bool): Whether parameters and buffers of submodules should be recursively moved to the specified device. Returns: Module: self
def to_empty(self: T, *, device: Optional[DeviceLikeType], recurse: bool = True) -> T: r"""Move the parameters and buffers to the specified device without copying storage. Args: device (:class:`torch.device`): The desired device of the parameters and buffers in this module. recurse (bool): Whether parameters and buffers of submodules should be recursively moved to the specified device. Returns: Module: self """ return self._apply(lambda t: torch.empty_like(t, device=device), recurse=recurse)
(self: ~T, *, device: Union[int, str, torch.device, NoneType], recurse: bool = True) -> ~T
10,094
sentence_transformers.SentenceTransformer
tokenize
Tokenizes the texts
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]): """ Tokenizes the texts """ kwargs = {} # HPU models reach optimal performance if the padding is not dynamic if self.device.type == "hpu": kwargs["padding"] = "max_length" try: return self._first_module().tokenize(texts, **kwargs) except TypeError: # In case some Module does not allow for kwargs in tokenize, we also try without any return self._first_module().tokenize(texts)
(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]])
10,095
torch.nn.modules.module
train
Set the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Args: mode (bool): whether to set training mode (``True``) or evaluation mode (``False``). Default: ``True``. Returns: Module: self
def train(self: T, mode: bool = True) -> T: r"""Set the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Args: mode (bool): whether to set training mode (``True``) or evaluation mode (``False``). Default: ``True``. Returns: Module: self """ if not isinstance(mode, bool): raise ValueError("training mode is expected to be boolean") self.training = mode for module in self.children(): module.train(mode) return self
(self: ~T, mode: bool = True) -> ~T
10,096
sentence_transformers.SentenceTransformer
truncate_sentence_embeddings
In this context, `model.encode` outputs sentence embeddings truncated at dimension `truncate_dim`. This may be useful when you are using the same model for different applications where different dimensions are needed. :param truncate_dim: The dimension to truncate sentence embeddings to. `None` does no truncation. Example:: from sentence_transformers import SentenceTransformer model = SentenceTransformer("model-name") with model.truncate_sentence_embeddings(truncate_dim=16): embeddings_truncated = model.encode(["hello there", "hiya"]) assert embeddings_truncated.shape[-1] == 16
def encode( self, sentences: Union[str, List[str]], prompt_name: Optional[str] = None, prompt: Optional[str] = None, batch_size: int = 32, show_progress_bar: bool = None, output_value: Optional[Literal["sentence_embedding", "token_embeddings"]] = "sentence_embedding", precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = "float32", convert_to_numpy: bool = True, convert_to_tensor: bool = False, device: str = None, normalize_embeddings: bool = False, ) -> Union[List[Tensor], ndarray, Tensor]: """ Computes sentence embeddings. :param sentences: the sentences to embed. :param prompt_name: The name of the prompt to use for encoding. Must be a key in the `prompts` dictionary, which is either set in the constructor or loaded from the model configuration. For example if `prompt_name` is ``"query"`` and the `prompts` is ``{"query": "query: ", ...}``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If `prompt` is also set, this argument is ignored. :param prompt: The prompt to use for encoding. For example, if the prompt is ``"query: "``, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If `prompt` is set, `prompt_name` is ignored. :param batch_size: the batch size used for the computation. :param show_progress_bar: Whether to output a progress bar when encode sentences. :param output_value: The type of embeddings to return: "sentence_embedding" to get sentence embeddings, "token_embeddings" to get wordpiece token embeddings, and `None`, to get all output values. Defaults to "sentence_embedding". :param precision: The precision to use for the embeddings. Can be "float32", "int8", "uint8", "binary", or "ubinary". All non-float32 precisions are quantized embeddings. Quantized embeddings are smaller in size and faster to compute, but may have a lower accuracy. They are useful for reducing the size of the embeddings of a corpus for semantic search, among other tasks. Defaults to "float32". :param convert_to_numpy: Whether the output should be a list of numpy vectors. If False, it is a list of PyTorch tensors. :param convert_to_tensor: Whether the output should be one large tensor. Overwrites `convert_to_numpy`. :param device: Which `torch.device` to use for the computation. :param normalize_embeddings: Whether to normalize returned vectors to have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. :return: By default, a 2d numpy array with shape [num_inputs, output_dimension] is returned. If only one string input is provided, then the output is a 1d array with shape [output_dimension]. If `convert_to_tensor`, a torch Tensor is returned instead. If `self.truncate_dim <= output_dimension` then output_dimension is `self.truncate_dim`. """ if self.device.type == "hpu" and not self.is_hpu_graph_enabled: import habana_frameworks.torch as ht ht.hpu.wrap_in_hpu_graph(self, disable_tensor_cache=True) self.is_hpu_graph_enabled = True self.eval() if show_progress_bar is None: show_progress_bar = ( logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG ) if convert_to_tensor: convert_to_numpy = False if output_value != "sentence_embedding": convert_to_tensor = False convert_to_numpy = False input_was_string = False if isinstance(sentences, str) or not hasattr( sentences, "__len__" ): # Cast an individual sentence to a list with length 1 sentences = [sentences] input_was_string = True if prompt is None: if prompt_name is not None: try: prompt = self.prompts[prompt_name] except KeyError: raise ValueError( f"Prompt name '{prompt_name}' not found in the configured prompts dictionary with keys {list(self.prompts.keys())!r}." ) elif self.default_prompt_name is not None: prompt = self.prompts.get(self.default_prompt_name, None) else: if prompt_name is not None: logger.warning( "Encode with either a `prompt`, a `prompt_name`, or neither, but not both. " "Ignoring the `prompt_name` in favor of `prompt`." ) extra_features = {} if prompt is not None: sentences = [prompt + sentence for sentence in sentences] # Some models (e.g. INSTRUCTOR, GRIT) require removing the prompt before pooling # Tracking the prompt length allow us to remove the prompt during pooling tokenized_prompt = self.tokenize([prompt]) if "input_ids" in tokenized_prompt: extra_features["prompt_length"] = tokenized_prompt["input_ids"].shape[-1] - 1 if device is None: device = self.device self.to(device) all_embeddings = [] length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences]) sentences_sorted = [sentences[idx] for idx in length_sorted_idx] for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar): sentences_batch = sentences_sorted[start_index : start_index + batch_size] features = self.tokenize(sentences_batch) features = batch_to_device(features, device) features.update(extra_features) with torch.no_grad(): out_features = self.forward(features) out_features["sentence_embedding"] = truncate_embeddings( out_features["sentence_embedding"], self.truncate_dim ) if output_value == "token_embeddings": embeddings = [] for token_emb, attention in zip(out_features[output_value], out_features["attention_mask"]): last_mask_id = len(attention) - 1 while last_mask_id > 0 and attention[last_mask_id].item() == 0: last_mask_id -= 1 embeddings.append(token_emb[0 : last_mask_id + 1]) elif output_value is None: # Return all outputs embeddings = [] for sent_idx in range(len(out_features["sentence_embedding"])): row = {name: out_features[name][sent_idx] for name in out_features} embeddings.append(row) else: # Sentence embeddings embeddings = out_features[output_value] embeddings = embeddings.detach() if normalize_embeddings: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) # fixes for #522 and #487 to avoid oom problems on gpu with large datasets if convert_to_numpy: embeddings = embeddings.cpu() all_embeddings.extend(embeddings) all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)] if precision and precision != "float32": all_embeddings = quantize_embeddings(all_embeddings, precision=precision) if convert_to_tensor: if len(all_embeddings): if isinstance(all_embeddings, np.ndarray): all_embeddings = torch.from_numpy(all_embeddings) else: all_embeddings = torch.stack(all_embeddings) else: all_embeddings = torch.Tensor() elif convert_to_numpy: if not isinstance(all_embeddings, np.ndarray): all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings]) elif isinstance(all_embeddings, np.ndarray): all_embeddings = [torch.from_numpy(embedding) for embedding in all_embeddings] if input_was_string: all_embeddings = all_embeddings[0] return all_embeddings
(self, truncate_dim: Optional[int])
10,097
torch.nn.modules.module
type
Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self
def type(self: T, dst_type: Union[dtype, str]) -> T: r"""Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self """ return self._apply(lambda t: t.type(dst_type))
(self: ~T, dst_type: Union[torch.dtype, str]) -> ~T
10,098
torch.nn.modules.module
xpu
Move all model parameters and buffers to the XPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: r"""Move all model parameters and buffers to the XPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self """ return self._apply(lambda t: t.xpu(device))
(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T
10,099
torch.nn.modules.module
zero_grad
Reset gradients of all model parameters. See similar function under :class:`torch.optim.Optimizer` for more context. Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details.
def zero_grad(self, set_to_none: bool = True) -> None: r"""Reset gradients of all model parameters. See similar function under :class:`torch.optim.Optimizer` for more context. Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. """ if getattr(self, '_is_replica', False): warnings.warn( "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. " "The parameters are copied (in a differentiable manner) from the original module. " "This means they are not leaf nodes in autograd and so don't accumulate gradients. " "If you need gradients in your forward method, consider using autograd.grad instead.") for p in self.parameters(): if p.grad is not None: if set_to_none: p.grad = None else: if p.grad.grad_fn is not None: p.grad.detach_() else: p.grad.requires_grad_(False) p.grad.zero_()
(self, set_to_none: bool = True) -> NoneType
10,100
sentence_transformers.datasets.SentencesDataset
SentencesDataset
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
class SentencesDataset(Dataset): """ DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader. """ def __init__(self, examples: List[InputExample], model: SentenceTransformer): self.examples = examples def __getitem__(self, item): return self.examples[item] def __len__(self): return len(self.examples)
(examples: List[sentence_transformers.readers.InputExample.InputExample], model: <module 'sentence_transformers.SentenceTransformer' from '/usr/local/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py'>)
10,102
sentence_transformers.datasets.SentencesDataset
__getitem__
null
def __getitem__(self, item): return self.examples[item]
(self, item)
10,103
sentence_transformers.datasets.SentencesDataset
__init__
null
def __init__(self, examples: List[InputExample], model: SentenceTransformer): self.examples = examples
(self, examples: List[sentence_transformers.readers.InputExample.InputExample], model: <module 'sentence_transformers.SentenceTransformer' from '/usr/local/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py'>)
10,104
sentence_transformers.datasets.SentencesDataset
__len__
null
def __len__(self): return len(self.examples)
(self)
10,111
sentence_transformers.quantization
quantize_embeddings
Quantizes embeddings to a lower precision. This can be used to reduce the memory footprint and increase the speed of similarity search. The supported precisions are "float32", "int8", "uint8", "binary", and "ubinary". :param embeddings: Unquantized (e.g. float) embeddings with to quantize to a given precision :param precision: The precision to convert to. Options are "float32", "int8", "uint8", "binary", "ubinary". :param ranges: Ranges for quantization of embeddings. This is only used for int8 quantization, where the ranges refers to the minimum and maximum values for each dimension. So, it's a 2D array with shape (2, embedding_dim). Default is None, which means that the ranges will be calculated from the calibration embeddings. :type ranges: Optional[np.ndarray] :param calibration_embeddings: Embeddings used for calibration during quantization. This is only used for int8 quantization, where the calibration embeddings can be used to compute ranges, i.e. the minimum and maximum values for each dimension. Default is None, which means that the ranges will be calculated from the query embeddings. This is not recommended. :type calibration_embeddings: Optional[np.ndarray] :return: Quantized embeddings with the specified precision
def quantize_embeddings( embeddings: Union[Tensor, np.ndarray], precision: Literal["float32", "int8", "uint8", "binary", "ubinary"], ranges: Optional[np.ndarray] = None, calibration_embeddings: Optional[np.ndarray] = None, ) -> np.ndarray: """ Quantizes embeddings to a lower precision. This can be used to reduce the memory footprint and increase the speed of similarity search. The supported precisions are "float32", "int8", "uint8", "binary", and "ubinary". :param embeddings: Unquantized (e.g. float) embeddings with to quantize to a given precision :param precision: The precision to convert to. Options are "float32", "int8", "uint8", "binary", "ubinary". :param ranges: Ranges for quantization of embeddings. This is only used for int8 quantization, where the ranges refers to the minimum and maximum values for each dimension. So, it's a 2D array with shape (2, embedding_dim). Default is None, which means that the ranges will be calculated from the calibration embeddings. :type ranges: Optional[np.ndarray] :param calibration_embeddings: Embeddings used for calibration during quantization. This is only used for int8 quantization, where the calibration embeddings can be used to compute ranges, i.e. the minimum and maximum values for each dimension. Default is None, which means that the ranges will be calculated from the query embeddings. This is not recommended. :type calibration_embeddings: Optional[np.ndarray] :return: Quantized embeddings with the specified precision """ if isinstance(embeddings, Tensor): embeddings = embeddings.cpu().numpy() elif isinstance(embeddings, list): if isinstance(embeddings[0], Tensor): embeddings = [embedding.cpu().numpy() for embedding in embeddings] embeddings = np.array(embeddings) if embeddings.dtype in (np.uint8, np.int8): raise Exception("Embeddings to quantize must be float rather than int8 or uint8.") if precision == "float32": return embeddings.astype(np.float32) if precision.endswith("int8"): # Either use the 1. provided ranges, 2. the calibration dataset or 3. the provided embeddings if ranges is None: if calibration_embeddings is not None: ranges = np.vstack((np.min(calibration_embeddings, axis=0), np.max(calibration_embeddings, axis=0))) else: if embeddings.shape[0] < 100: logger.warning( f"Computing {precision} quantization buckets based on {len(embeddings)} embedding{'s' if len(embeddings) != 1 else ''}." f" {precision} quantization is more stable with `ranges` calculated from more embeddings " "or a `calibration_embeddings` that can be used to calculate the buckets." ) ranges = np.vstack((np.min(embeddings, axis=0), np.max(embeddings, axis=0))) starts = ranges[0, :] steps = (ranges[1, :] - ranges[0, :]) / 255 if precision == "uint8": return ((embeddings - starts) / steps).astype(np.uint8) elif precision == "int8": return ((embeddings - starts) / steps - 128).astype(np.int8) if precision == "binary": return (np.packbits(embeddings > 0).reshape(embeddings.shape[0], -1) - 128).astype(np.int8) if precision == "ubinary": return np.packbits(embeddings > 0).reshape(embeddings.shape[0], -1) raise ValueError(f"Precision {precision} is not supported")
(embeddings: Union[torch.Tensor, numpy.ndarray], precision: Literal['float32', 'int8', 'uint8', 'binary', 'ubinary'], ranges: Optional[numpy.ndarray] = None, calibration_embeddings: Optional[numpy.ndarray] = None) -> numpy.ndarray
10,115
tree_format._text
format_tree
null
def format_tree(node, format_node, get_children): lines = itertools.chain( [format_node(node)], _format_tree(node, format_node, get_children), [u''], ) return u'\n'.join(lines)
(node, format_node, get_children)
10,116
tree_format._text
print_tree
null
def print_tree(*args, **kwargs): print(format_tree(*args, **kwargs))
(*args, **kwargs)
10,117
importlib.metadata
metadata
Get the metadata for the named package. :param distribution_name: The name of the distribution package to query. :return: A PackageMetadata containing the parsed metadata.
def metadata(distribution_name) -> _meta.PackageMetadata: """Get the metadata for the named package. :param distribution_name: The name of the distribution package to query. :return: A PackageMetadata containing the parsed metadata. """ return Distribution.from_name(distribution_name).metadata
(distribution_name) -> importlib.metadata._meta.PackageMetadata
10,119
pygments.formatter
Formatter
Converts a token stream to text. Formatters should have attributes to help selecting them. These are similar to the corresponding :class:`~pygments.lexer.Lexer` attributes. .. autoattribute:: name :no-value: .. autoattribute:: aliases :no-value: .. autoattribute:: filenames :no-value: You can pass options as keyword arguments to the constructor. All formatters accept these basic options: ``style`` The style to use, can be a string or a Style subclass (default: "default"). Not used by e.g. the TerminalFormatter. ``full`` Tells the formatter to output a "full" document, i.e. a complete self-contained document. This doesn't have any effect for some formatters (default: false). ``title`` If ``full`` is true, the title that should be used to caption the document (default: ''). ``encoding`` If given, must be an encoding name. This will be used to convert the Unicode token strings to byte strings in the output. If it is "" or None, Unicode strings will be written to the output file, which most file-like objects do not support (default: None). ``outencoding`` Overrides ``encoding`` if given.
class Formatter: """ Converts a token stream to text. Formatters should have attributes to help selecting them. These are similar to the corresponding :class:`~pygments.lexer.Lexer` attributes. .. autoattribute:: name :no-value: .. autoattribute:: aliases :no-value: .. autoattribute:: filenames :no-value: You can pass options as keyword arguments to the constructor. All formatters accept these basic options: ``style`` The style to use, can be a string or a Style subclass (default: "default"). Not used by e.g. the TerminalFormatter. ``full`` Tells the formatter to output a "full" document, i.e. a complete self-contained document. This doesn't have any effect for some formatters (default: false). ``title`` If ``full`` is true, the title that should be used to caption the document (default: ''). ``encoding`` If given, must be an encoding name. This will be used to convert the Unicode token strings to byte strings in the output. If it is "" or None, Unicode strings will be written to the output file, which most file-like objects do not support (default: None). ``outencoding`` Overrides ``encoding`` if given. """ #: Full name for the formatter, in human-readable form. name = None #: A list of short, unique identifiers that can be used to lookup #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. aliases = [] #: A list of fnmatch patterns that match filenames for which this #: formatter can produce output. The patterns in this list should be unique #: among all formatters. filenames = [] #: If True, this formatter outputs Unicode strings when no encoding #: option is given. unicodeoutput = True def __init__(self, **options): """ As with lexers, this constructor takes arbitrary optional arguments, and if you override it, you should first process your own options, then call the base class implementation. """ self.style = _lookup_style(options.get('style', 'default')) self.full = get_bool_opt(options, 'full', False) self.title = options.get('title', '') self.encoding = options.get('encoding', None) or None if self.encoding in ('guess', 'chardet'): # can happen for e.g. pygmentize -O encoding=guess self.encoding = 'utf-8' self.encoding = options.get('outencoding') or self.encoding self.options = options def get_style_defs(self, arg=''): """ This method must return statements or declarations suitable to define the current style for subsequent highlighted text (e.g. CSS classes in the `HTMLFormatter`). The optional argument `arg` can be used to modify the generation and is formatter dependent (it is standardized because it can be given on the command line). This method is called by the ``-S`` :doc:`command-line option <cmdline>`, the `arg` is then given by the ``-a`` option. """ return '' def format(self, tokensource, outfile): """ This method must format the tokens from the `tokensource` iterable and write the formatted version to the file object `outfile`. Formatter options can control how exactly the tokens are converted. """ if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile) # Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to # Formatter. This helps when using third-party type stubs from typeshed. def __class_getitem__(cls, name): return cls
(**options)
10,120
pygments.formatter
__init__
As with lexers, this constructor takes arbitrary optional arguments, and if you override it, you should first process your own options, then call the base class implementation.
def __init__(self, **options): """ As with lexers, this constructor takes arbitrary optional arguments, and if you override it, you should first process your own options, then call the base class implementation. """ self.style = _lookup_style(options.get('style', 'default')) self.full = get_bool_opt(options, 'full', False) self.title = options.get('title', '') self.encoding = options.get('encoding', None) or None if self.encoding in ('guess', 'chardet'): # can happen for e.g. pygmentize -O encoding=guess self.encoding = 'utf-8' self.encoding = options.get('outencoding') or self.encoding self.options = options
(self, **options)
10,121
pygments.formatter
format
This method must format the tokens from the `tokensource` iterable and write the formatted version to the file object `outfile`. Formatter options can control how exactly the tokens are converted.
def format(self, tokensource, outfile): """ This method must format the tokens from the `tokensource` iterable and write the formatted version to the file object `outfile`. Formatter options can control how exactly the tokens are converted. """ if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile)
(self, tokensource, outfile)
10,122
pygments.formatter
get_style_defs
This method must return statements or declarations suitable to define the current style for subsequent highlighted text (e.g. CSS classes in the `HTMLFormatter`). The optional argument `arg` can be used to modify the generation and is formatter dependent (it is standardized because it can be given on the command line). This method is called by the ``-S`` :doc:`command-line option <cmdline>`, the `arg` is then given by the ``-a`` option.
def get_style_defs(self, arg=''): """ This method must return statements or declarations suitable to define the current style for subsequent highlighted text (e.g. CSS classes in the `HTMLFormatter`). The optional argument `arg` can be used to modify the generation and is formatter dependent (it is standardized because it can be given on the command line). This method is called by the ``-S`` :doc:`command-line option <cmdline>`, the `arg` is then given by the ``-a`` option. """ return ''
(self, arg='')
10,123
pygments.formatters.html
HtmlFormatter
Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option. If the `linenos` option is set to ``"table"``, the ``<pre>`` is additionally wrapped inside a ``<table>`` which has one row and two cells: one containing the line numbers and one containing the code. Example: .. sourcecode:: html <div class="highlight" > <table><tr> <td class="linenos" title="click to toggle" onclick="with (this.firstChild.style) { display = (display == '') ? 'none' : '' }"> <pre>1 2</pre> </td> <td class="code"> <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar): <span class="Ke">pass</span> </pre> </td> </tr></table></div> (whitespace added to improve clarity). A list of lines can be specified using the `hl_lines` option to make these lines highlighted (as of Pygments 0.11). With the `full` option, a complete HTML 4 document is output, including the style definitions inside a ``<style>`` tag, or in a separate file if the `cssfile` option is given. When `tagsfile` is set to the path of a ctags index file, it is used to generate hyperlinks from names to their definition. You must enable `lineanchors` and run ctags with the `-n` option for this to work. The `python-ctags` module from PyPI must be installed to use this feature; otherwise a `RuntimeError` will be raised. The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string containing CSS rules for the CSS classes used by the formatter. The argument `arg` can be used to specify additional CSS selectors that are prepended to the classes. A call `fmter.get_style_defs('td .code')` would result in the following CSS classes: .. sourcecode:: css td .code .kw { font-weight: bold; color: #00FF00 } td .code .cm { color: #999999 } ... If you have Pygments 0.6 or higher, you can also pass a list or tuple to the `get_style_defs()` method to request multiple prefixes for the tokens: .. sourcecode:: python formatter.get_style_defs(['div.syntax pre', 'pre.syntax']) The output would then look like this: .. sourcecode:: css div.syntax pre .kw, pre.syntax .kw { font-weight: bold; color: #00FF00 } div.syntax pre .cm, pre.syntax .cm { color: #999999 } ... Additional options accepted: `nowrap` If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag around the tokens. This disables most other options (default: ``False``). `full` Tells the formatter to output a "full" document, i.e. a complete self-contained document (default: ``False``). `title` If `full` is true, the title that should be used to caption the document (default: ``''``). `style` The style to use, can be a string or a Style subclass (default: ``'default'``). This option has no effect if the `cssfile` and `noclobber_cssfile` option are given and the file specified in `cssfile` exists. `noclasses` If set to true, token ``<span>`` tags (as well as line number elements) will not use CSS classes, but inline styles. This is not recommended for larger pieces of code since it increases output size by quite a bit (default: ``False``). `classprefix` Since the token types use relatively short class names, they may clash with some of your own class names. In this case you can use the `classprefix` option to give a string to prepend to all Pygments-generated CSS class names for token types. Note that this option also affects the output of `get_style_defs()`. `cssclass` CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``). If you set this option, the default selector for `get_style_defs()` will be this class. .. versionadded:: 0.9 If you select the ``'table'`` line numbers, the wrapping table will have a CSS class of this string plus ``'table'``, the default is accordingly ``'highlighttable'``. `cssstyles` Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``). `prestyles` Inline CSS styles for the ``<pre>`` tag (default: ``''``). .. versionadded:: 0.11 `cssfile` If the `full` option is true and this option is given, it must be the name of an external file. If the filename does not include an absolute path, the file's path will be assumed to be relative to the main output file's path, if the latter can be found. The stylesheet is then written to this file instead of the HTML file. .. versionadded:: 0.6 `noclobber_cssfile` If `cssfile` is given and the specified file exists, the css file will not be overwritten. This allows the use of the `full` option in combination with a user specified css file. Default is ``False``. .. versionadded:: 1.1 `linenos` If set to ``'table'``, output line numbers as a table with two cells, one containing the line numbers, the other the whole code. This is copy-and-paste-friendly, but may cause alignment problems with some browsers or fonts. If set to ``'inline'``, the line numbers will be integrated in the ``<pre>`` tag that contains the code (that setting is *new in Pygments 0.8*). For compatibility with Pygments 0.7 and earlier, every true value except ``'inline'`` means the same as ``'table'`` (in particular, that means also ``True``). The default value is ``False``, which means no line numbers at all. **Note:** with the default ("table") line number mechanism, the line numbers and code can have different line heights in Internet Explorer unless you give the enclosing ``<pre>`` tags an explicit ``line-height`` CSS property (you get the default line spacing with ``line-height: 125%``). `hl_lines` Specify a list of lines to be highlighted. The line numbers are always relative to the input (i.e. the first line is line 1) and are independent of `linenostart`. .. versionadded:: 0.11 `linenostart` The line number for the first line (default: ``1``). `linenostep` If set to a number n > 1, only every nth line number is printed. `linenospecial` If set to a number n > 0, every nth line number is given the CSS class ``"special"`` (default: ``0``). `nobackground` If set to ``True``, the formatter won't output the background color for the wrapping element (this automatically defaults to ``False`` when there is no wrapping element [eg: no argument for the `get_syntax_defs` method given]) (default: ``False``). .. versionadded:: 0.6 `lineseparator` This string is output between lines of code. It defaults to ``"\n"``, which is enough to break a line inside ``<pre>`` tags, but you can e.g. set it to ``"<br>"`` to get HTML line breaks. .. versionadded:: 0.7 `lineanchors` If set to a nonempty string, e.g. ``foo``, the formatter will wrap each output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``. This allows easy linking to certain lines. .. versionadded:: 0.9 `linespans` If set to a nonempty string, e.g. ``foo``, the formatter will wrap each output line in a span tag with an ``id`` of ``foo-linenumber``. This allows easy access to lines via javascript. .. versionadded:: 1.6 `anchorlinenos` If set to `True`, will wrap line numbers in <a> tags. Used in combination with `linenos` and `lineanchors`. `tagsfile` If set to the path of a ctags file, wrap names in anchor tags that link to their definitions. `lineanchors` should be used, and the tags file should specify line numbers (see the `-n` option to ctags). The tags file is assumed to be encoded in UTF-8. .. versionadded:: 1.6 `tagurlformat` A string formatting pattern used to generate links to ctags definitions. Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`. Defaults to an empty string, resulting in just `#prefix-number` links. .. versionadded:: 1.6 `filename` A string used to generate a filename when rendering ``<pre>`` blocks, for example if displaying source code. If `linenos` is set to ``'table'`` then the filename will be rendered in an initial row containing a single `<th>` which spans both columns. .. versionadded:: 2.1 `wrapcode` Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended by the HTML5 specification. .. versionadded:: 2.4 `debug_token_types` Add ``title`` attributes to all token ``<span>`` tags that show the name of the token. .. versionadded:: 2.10 **Subclassing the HTML formatter** .. versionadded:: 0.7 The HTML formatter is now built in a way that allows easy subclassing, thus customizing the output HTML code. The `format()` method calls `self._format_lines()` which returns a generator that yields tuples of ``(1, line)``, where the ``1`` indicates that the ``line`` is a line of the formatted source code. If the `nowrap` option is set, the generator is the iterated over and the resulting HTML is output. Otherwise, `format()` calls `self.wrap()`, which wraps the generator with other generators. These may add some HTML code to the one generated by `_format_lines()`, either by modifying the lines generated by the latter, then yielding them again with ``(1, line)``, and/or by yielding other HTML code before or after the lines, with ``(0, html)``. The distinction between source lines and other code makes it possible to wrap the generator multiple times. The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag. A custom `HtmlFormatter` subclass could look like this: .. sourcecode:: python class CodeHtmlFormatter(HtmlFormatter): def wrap(self, source, *, include_div): return self._wrap_code(source) def _wrap_code(self, source): yield 0, '<code>' for i, t in source: if i == 1: # it's a line of formatted code t += '<br>' yield i, t yield 0, '</code>' This results in wrapping the formatted lines with a ``<code>`` tag, where the source lines are broken using ``<br>`` tags. After calling `wrap()`, the `format()` method also adds the "line numbers" and/or "full document" wrappers if the respective options are set. Then, all HTML yielded by the wrapped generator is output.
class HtmlFormatter(Formatter): r""" Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option. If the `linenos` option is set to ``"table"``, the ``<pre>`` is additionally wrapped inside a ``<table>`` which has one row and two cells: one containing the line numbers and one containing the code. Example: .. sourcecode:: html <div class="highlight" > <table><tr> <td class="linenos" title="click to toggle" onclick="with (this.firstChild.style) { display = (display == '') ? 'none' : '' }"> <pre>1 2</pre> </td> <td class="code"> <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar): <span class="Ke">pass</span> </pre> </td> </tr></table></div> (whitespace added to improve clarity). A list of lines can be specified using the `hl_lines` option to make these lines highlighted (as of Pygments 0.11). With the `full` option, a complete HTML 4 document is output, including the style definitions inside a ``<style>`` tag, or in a separate file if the `cssfile` option is given. When `tagsfile` is set to the path of a ctags index file, it is used to generate hyperlinks from names to their definition. You must enable `lineanchors` and run ctags with the `-n` option for this to work. The `python-ctags` module from PyPI must be installed to use this feature; otherwise a `RuntimeError` will be raised. The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string containing CSS rules for the CSS classes used by the formatter. The argument `arg` can be used to specify additional CSS selectors that are prepended to the classes. A call `fmter.get_style_defs('td .code')` would result in the following CSS classes: .. sourcecode:: css td .code .kw { font-weight: bold; color: #00FF00 } td .code .cm { color: #999999 } ... If you have Pygments 0.6 or higher, you can also pass a list or tuple to the `get_style_defs()` method to request multiple prefixes for the tokens: .. sourcecode:: python formatter.get_style_defs(['div.syntax pre', 'pre.syntax']) The output would then look like this: .. sourcecode:: css div.syntax pre .kw, pre.syntax .kw { font-weight: bold; color: #00FF00 } div.syntax pre .cm, pre.syntax .cm { color: #999999 } ... Additional options accepted: `nowrap` If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag around the tokens. This disables most other options (default: ``False``). `full` Tells the formatter to output a "full" document, i.e. a complete self-contained document (default: ``False``). `title` If `full` is true, the title that should be used to caption the document (default: ``''``). `style` The style to use, can be a string or a Style subclass (default: ``'default'``). This option has no effect if the `cssfile` and `noclobber_cssfile` option are given and the file specified in `cssfile` exists. `noclasses` If set to true, token ``<span>`` tags (as well as line number elements) will not use CSS classes, but inline styles. This is not recommended for larger pieces of code since it increases output size by quite a bit (default: ``False``). `classprefix` Since the token types use relatively short class names, they may clash with some of your own class names. In this case you can use the `classprefix` option to give a string to prepend to all Pygments-generated CSS class names for token types. Note that this option also affects the output of `get_style_defs()`. `cssclass` CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``). If you set this option, the default selector for `get_style_defs()` will be this class. .. versionadded:: 0.9 If you select the ``'table'`` line numbers, the wrapping table will have a CSS class of this string plus ``'table'``, the default is accordingly ``'highlighttable'``. `cssstyles` Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``). `prestyles` Inline CSS styles for the ``<pre>`` tag (default: ``''``). .. versionadded:: 0.11 `cssfile` If the `full` option is true and this option is given, it must be the name of an external file. If the filename does not include an absolute path, the file's path will be assumed to be relative to the main output file's path, if the latter can be found. The stylesheet is then written to this file instead of the HTML file. .. versionadded:: 0.6 `noclobber_cssfile` If `cssfile` is given and the specified file exists, the css file will not be overwritten. This allows the use of the `full` option in combination with a user specified css file. Default is ``False``. .. versionadded:: 1.1 `linenos` If set to ``'table'``, output line numbers as a table with two cells, one containing the line numbers, the other the whole code. This is copy-and-paste-friendly, but may cause alignment problems with some browsers or fonts. If set to ``'inline'``, the line numbers will be integrated in the ``<pre>`` tag that contains the code (that setting is *new in Pygments 0.8*). For compatibility with Pygments 0.7 and earlier, every true value except ``'inline'`` means the same as ``'table'`` (in particular, that means also ``True``). The default value is ``False``, which means no line numbers at all. **Note:** with the default ("table") line number mechanism, the line numbers and code can have different line heights in Internet Explorer unless you give the enclosing ``<pre>`` tags an explicit ``line-height`` CSS property (you get the default line spacing with ``line-height: 125%``). `hl_lines` Specify a list of lines to be highlighted. The line numbers are always relative to the input (i.e. the first line is line 1) and are independent of `linenostart`. .. versionadded:: 0.11 `linenostart` The line number for the first line (default: ``1``). `linenostep` If set to a number n > 1, only every nth line number is printed. `linenospecial` If set to a number n > 0, every nth line number is given the CSS class ``"special"`` (default: ``0``). `nobackground` If set to ``True``, the formatter won't output the background color for the wrapping element (this automatically defaults to ``False`` when there is no wrapping element [eg: no argument for the `get_syntax_defs` method given]) (default: ``False``). .. versionadded:: 0.6 `lineseparator` This string is output between lines of code. It defaults to ``"\n"``, which is enough to break a line inside ``<pre>`` tags, but you can e.g. set it to ``"<br>"`` to get HTML line breaks. .. versionadded:: 0.7 `lineanchors` If set to a nonempty string, e.g. ``foo``, the formatter will wrap each output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``. This allows easy linking to certain lines. .. versionadded:: 0.9 `linespans` If set to a nonempty string, e.g. ``foo``, the formatter will wrap each output line in a span tag with an ``id`` of ``foo-linenumber``. This allows easy access to lines via javascript. .. versionadded:: 1.6 `anchorlinenos` If set to `True`, will wrap line numbers in <a> tags. Used in combination with `linenos` and `lineanchors`. `tagsfile` If set to the path of a ctags file, wrap names in anchor tags that link to their definitions. `lineanchors` should be used, and the tags file should specify line numbers (see the `-n` option to ctags). The tags file is assumed to be encoded in UTF-8. .. versionadded:: 1.6 `tagurlformat` A string formatting pattern used to generate links to ctags definitions. Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`. Defaults to an empty string, resulting in just `#prefix-number` links. .. versionadded:: 1.6 `filename` A string used to generate a filename when rendering ``<pre>`` blocks, for example if displaying source code. If `linenos` is set to ``'table'`` then the filename will be rendered in an initial row containing a single `<th>` which spans both columns. .. versionadded:: 2.1 `wrapcode` Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended by the HTML5 specification. .. versionadded:: 2.4 `debug_token_types` Add ``title`` attributes to all token ``<span>`` tags that show the name of the token. .. versionadded:: 2.10 **Subclassing the HTML formatter** .. versionadded:: 0.7 The HTML formatter is now built in a way that allows easy subclassing, thus customizing the output HTML code. The `format()` method calls `self._format_lines()` which returns a generator that yields tuples of ``(1, line)``, where the ``1`` indicates that the ``line`` is a line of the formatted source code. If the `nowrap` option is set, the generator is the iterated over and the resulting HTML is output. Otherwise, `format()` calls `self.wrap()`, which wraps the generator with other generators. These may add some HTML code to the one generated by `_format_lines()`, either by modifying the lines generated by the latter, then yielding them again with ``(1, line)``, and/or by yielding other HTML code before or after the lines, with ``(0, html)``. The distinction between source lines and other code makes it possible to wrap the generator multiple times. The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag. A custom `HtmlFormatter` subclass could look like this: .. sourcecode:: python class CodeHtmlFormatter(HtmlFormatter): def wrap(self, source, *, include_div): return self._wrap_code(source) def _wrap_code(self, source): yield 0, '<code>' for i, t in source: if i == 1: # it's a line of formatted code t += '<br>' yield i, t yield 0, '</code>' This results in wrapping the formatted lines with a ``<code>`` tag, where the source lines are broken using ``<br>`` tags. After calling `wrap()`, the `format()` method also adds the "line numbers" and/or "full document" wrappers if the respective options are set. Then, all HTML yielded by the wrapped generator is output. """ name = 'HTML' aliases = ['html'] filenames = ['*.html', '*.htm'] def __init__(self, **options): Formatter.__init__(self, **options) self.title = self._decodeifneeded(self.title) self.nowrap = get_bool_opt(options, 'nowrap', False) self.noclasses = get_bool_opt(options, 'noclasses', False) self.classprefix = options.get('classprefix', '') self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight')) self.cssstyles = self._decodeifneeded(options.get('cssstyles', '')) self.prestyles = self._decodeifneeded(options.get('prestyles', '')) self.cssfile = self._decodeifneeded(options.get('cssfile', '')) self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False) self.tagsfile = self._decodeifneeded(options.get('tagsfile', '')) self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', '')) self.filename = self._decodeifneeded(options.get('filename', '')) self.wrapcode = get_bool_opt(options, 'wrapcode', False) self.span_element_openers = {} self.debug_token_types = get_bool_opt(options, 'debug_token_types', False) if self.tagsfile: if not ctags: raise RuntimeError('The "ctags" package must to be installed ' 'to be able to use the "tagsfile" feature.') self._ctags = ctags.CTags(self.tagsfile) linenos = options.get('linenos', False) if linenos == 'inline': self.linenos = 2 elif linenos: # compatibility with <= 0.7 self.linenos = 1 else: self.linenos = 0 self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) self.nobackground = get_bool_opt(options, 'nobackground', False) self.lineseparator = options.get('lineseparator', '\n') self.lineanchors = options.get('lineanchors', '') self.linespans = options.get('linespans', '') self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False) self.hl_lines = set() for lineno in get_list_opt(options, 'hl_lines', []): try: self.hl_lines.add(int(lineno)) except ValueError: pass self._create_stylesheet() def _get_css_class(self, ttype): """Return the css class of this token type prefixed with the classprefix option.""" ttypeclass = _get_ttype_class(ttype) if ttypeclass: return self.classprefix + ttypeclass return '' def _get_css_classes(self, ttype): """Return the CSS classes of this token type prefixed with the classprefix option.""" cls = self._get_css_class(ttype) while ttype not in STANDARD_TYPES: ttype = ttype.parent cls = self._get_css_class(ttype) + ' ' + cls return cls or '' def _get_css_inline_styles(self, ttype): """Return the inline CSS styles for this token type.""" cclass = self.ttype2class.get(ttype) while cclass is None: ttype = ttype.parent cclass = self.ttype2class.get(ttype) return cclass or '' def _create_stylesheet(self): t2c = self.ttype2class = {Token: ''} c2s = self.class2style = {} for ttype, ndef in self.style: name = self._get_css_class(ttype) style = '' if ndef['color']: style += 'color: {}; '.format(webify(ndef['color'])) if ndef['bold']: style += 'font-weight: bold; ' if ndef['italic']: style += 'font-style: italic; ' if ndef['underline']: style += 'text-decoration: underline; ' if ndef['bgcolor']: style += 'background-color: {}; '.format(webify(ndef['bgcolor'])) if ndef['border']: style += 'border: 1px solid {}; '.format(webify(ndef['border'])) if style: t2c[ttype] = name # save len(ttype) to enable ordering the styles by # hierarchy (necessary for CSS cascading rules!) c2s[name] = (style[:-2], ttype, len(ttype)) def get_style_defs(self, arg=None): """ Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes. """ style_lines = [] style_lines.extend(self.get_linenos_style_defs()) style_lines.extend(self.get_background_style_defs(arg)) style_lines.extend(self.get_token_style_defs(arg)) return '\n'.join(style_lines) def get_token_style_defs(self, arg=None): prefix = self.get_css_prefix(arg) styles = [ (level, ttype, cls, style) for cls, (style, ttype, level) in self.class2style.items() if cls and style ] styles.sort() lines = [ f'{prefix(cls)} {{ {style} }} /* {repr(ttype)[6:]} */' for (level, ttype, cls, style) in styles ] return lines def get_background_style_defs(self, arg=None): prefix = self.get_css_prefix(arg) bg_color = self.style.background_color hl_color = self.style.highlight_color lines = [] if arg and not self.nobackground and bg_color is not None: text_style = '' if Text in self.ttype2class: text_style = ' ' + self.class2style[self.ttype2class[Text]][0] lines.insert( 0, '{}{{ background: {};{} }}'.format( prefix(''), bg_color, text_style ) ) if hl_color is not None: lines.insert( 0, '{} {{ background-color: {} }}'.format(prefix('hll'), hl_color) ) return lines def get_linenos_style_defs(self): lines = [ f'pre {{ {self._pre_style} }}', f'td.linenos .normal {{ {self._linenos_style} }}', f'span.linenos {{ {self._linenos_style} }}', f'td.linenos .special {{ {self._linenos_special_style} }}', f'span.linenos.special {{ {self._linenos_special_style} }}', ] return lines def get_css_prefix(self, arg): if arg is None: arg = ('cssclass' in self.options and '.'+self.cssclass or '') if isinstance(arg, str): args = [arg] else: args = list(arg) def prefix(cls): if cls: cls = '.' + cls tmp = [] for arg in args: tmp.append((arg and arg + ' ' or '') + cls) return ', '.join(tmp) return prefix @property def _pre_style(self): return 'line-height: 125%;' @property def _linenos_style(self): color = self.style.line_number_color background_color = self.style.line_number_background_color return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;' @property def _linenos_special_style(self): color = self.style.line_number_special_color background_color = self.style.line_number_special_background_color return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;' def _decodeifneeded(self, value): if isinstance(value, bytes): if self.encoding: return value.decode(self.encoding) return value.decode() return value def _wrap_full(self, inner, outfile): if self.cssfile: if os.path.isabs(self.cssfile): # it's an absolute filename cssfilename = self.cssfile else: try: filename = outfile.name if not filename or filename[0] == '<': # pseudo files, e.g. name == '<fdopen>' raise AttributeError cssfilename = os.path.join(os.path.dirname(filename), self.cssfile) except AttributeError: print('Note: Cannot determine output file name, ' 'using current directory as base for the CSS file name', file=sys.stderr) cssfilename = self.cssfile # write CSS file only if noclobber_cssfile isn't given as an option. try: if not os.path.exists(cssfilename) or not self.noclobber_cssfile: with open(cssfilename, "w", encoding="utf-8") as cf: cf.write(CSSFILE_TEMPLATE % {'styledefs': self.get_style_defs('body')}) except OSError as err: err.strerror = 'Error writing CSS file: ' + err.strerror raise yield 0, (DOC_HEADER_EXTERNALCSS % dict(title=self.title, cssfile=self.cssfile, encoding=self.encoding)) else: yield 0, (DOC_HEADER % dict(title=self.title, styledefs=self.get_style_defs('body'), encoding=self.encoding)) yield from inner yield 0, DOC_FOOTER def _wrap_tablelinenos(self, inner): dummyoutfile = StringIO() lncount = 0 for t, line in inner: if t: lncount += 1 dummyoutfile.write(line) fl = self.linenostart mw = len(str(lncount + fl - 1)) sp = self.linenospecial st = self.linenostep anchor_name = self.lineanchors or self.linespans aln = self.anchorlinenos nocls = self.noclasses lines = [] for i in range(fl, fl+lncount): print_line = i % st == 0 special_line = sp and i % sp == 0 if print_line: line = '%*d' % (mw, i) if aln: line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line) else: line = ' ' * mw if nocls: if special_line: style = f' style="{self._linenos_special_style}"' else: style = f' style="{self._linenos_style}"' else: if special_line: style = ' class="special"' else: style = ' class="normal"' if style: line = f'<span{style}>{line}</span>' lines.append(line) ls = '\n'.join(lines) # If a filename was specified, we can't put it into the code table as it # would misalign the line numbers. Hence we emit a separate row for it. filename_tr = "" if self.filename: filename_tr = ( '<tr><th colspan="2" class="filename">' '<span class="filename">' + self.filename + '</span>' '</th></tr>') # in case you wonder about the seemingly redundant <div> here: since the # content in the other cell also is wrapped in a div, some browsers in # some configurations seem to mess up the formatting... yield 0, (f'<table class="{self.cssclass}table">' + filename_tr + '<tr><td class="linenos"><div class="linenodiv"><pre>' + ls + '</pre></div></td><td class="code">') yield 0, '<div>' yield 0, dummyoutfile.getvalue() yield 0, '</div>' yield 0, '</td></tr></table>' def _wrap_inlinelinenos(self, inner): # need a list of lines since we need the width of a single number :( inner_lines = list(inner) sp = self.linenospecial st = self.linenostep num = self.linenostart mw = len(str(len(inner_lines) + num - 1)) anchor_name = self.lineanchors or self.linespans aln = self.anchorlinenos nocls = self.noclasses for _, inner_line in inner_lines: print_line = num % st == 0 special_line = sp and num % sp == 0 if print_line: line = '%*d' % (mw, num) else: line = ' ' * mw if nocls: if special_line: style = f' style="{self._linenos_special_style}"' else: style = f' style="{self._linenos_style}"' else: if special_line: style = ' class="linenos special"' else: style = ' class="linenos"' if style: linenos = f'<span{style}>{line}</span>' else: linenos = line if aln: yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) + inner_line) else: yield 1, linenos + inner_line num += 1 def _wrap_lineanchors(self, inner): s = self.lineanchors # subtract 1 since we have to increment i *before* yielding i = self.linenostart - 1 for t, line in inner: if t: i += 1 href = "" if self.linenos else ' href="#%s-%d"' % (s, i) yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line else: yield 0, line def _wrap_linespans(self, inner): s = self.linespans i = self.linenostart - 1 for t, line in inner: if t: i += 1 yield 1, '<span id="%s-%d">%s</span>' % (s, i, line) else: yield 0, line def _wrap_div(self, inner): style = [] if (self.noclasses and not self.nobackground and self.style.background_color is not None): style.append(f'background: {self.style.background_color}') if self.cssstyles: style.append(self.cssstyles) style = '; '.join(style) yield 0, ('<div' + (self.cssclass and f' class="{self.cssclass}"') + (style and (f' style="{style}"')) + '>') yield from inner yield 0, '</div>\n' def _wrap_pre(self, inner): style = [] if self.prestyles: style.append(self.prestyles) if self.noclasses: style.append(self._pre_style) style = '; '.join(style) if self.filename and self.linenos != 1: yield 0, ('<span class="filename">' + self.filename + '</span>') # the empty span here is to keep leading empty lines from being # ignored by HTML parsers yield 0, ('<pre' + (style and f' style="{style}"') + '><span></span>') yield from inner yield 0, '</pre>' def _wrap_code(self, inner): yield 0, '<code>' yield from inner yield 0, '</code>' @functools.lru_cache(maxsize=100) def _translate_parts(self, value): """HTML-escape a value and split it by newlines.""" return value.translate(_escape_html_table).split('\n') def _format_lines(self, tokensource): """ Just format the tokens, without any wrapping tags. Yield individual lines. """ nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title="{}"'.format('.'.join(ttype)) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = f'<span style="{css_style}"{title}>' else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = f'<span class="{css_class}"{title}>' else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: # Also check for part being non-empty, so we avoid creating # empty <span> tags if lspan != cspan and part: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same, or the current part was empty line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line) def _lookup_ctag(self, token): entry = ctags.TagEntry() if self._ctags.find(entry, token.encode(), 0): return entry['file'].decode(), entry['lineNumber'] else: return None, None def _highlight_lines(self, tokensource): """ Highlighted the lines specified in the `hl_lines` option by post-processing the token stream coming from `_format_lines`. """ hls = self.hl_lines for i, (t, value) in enumerate(tokensource): if t != 1: yield t, value if i + 1 in hls: # i + 1 because Python indexes start at 0 if self.noclasses: style = '' if self.style.highlight_color is not None: style = (f' style="background-color: {self.style.highlight_color}"') yield 1, f'<span{style}>{value}</span>' else: yield 1, f'<span class="hll">{value}</span>' else: yield 1, value def wrap(self, source): """ Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden. """ output = source if self.wrapcode: output = self._wrap_code(output) output = self._wrap_pre(output) return output def format_unencoded(self, tokensource, outfile): """ The formatting process uses several nested generators; which of them are used is determined by the user's options. Each generator should take at least one argument, ``inner``, and wrap the pieces of text generated by this. Always yield 2-tuples: (code, text). If "code" is 1, the text is part of the original tokensource being highlighted, if it's 0, the text is some piece of wrapping. This makes it possible to use several different wrappers that process the original source linewise, e.g. line number generators. """ source = self._format_lines(tokensource) # As a special case, we wrap line numbers before line highlighting # so the line numbers get wrapped in the highlighting tag. if not self.nowrap and self.linenos == 2: source = self._wrap_inlinelinenos(source) if self.hl_lines: source = self._highlight_lines(source) if not self.nowrap: if self.lineanchors: source = self._wrap_lineanchors(source) if self.linespans: source = self._wrap_linespans(source) source = self.wrap(source) if self.linenos == 1: source = self._wrap_tablelinenos(source) source = self._wrap_div(source) if self.full: source = self._wrap_full(source, outfile) for t, piece in source: outfile.write(piece)
(**options)
10,124
pygments.formatters.html
__init__
null
def __init__(self, **options): Formatter.__init__(self, **options) self.title = self._decodeifneeded(self.title) self.nowrap = get_bool_opt(options, 'nowrap', False) self.noclasses = get_bool_opt(options, 'noclasses', False) self.classprefix = options.get('classprefix', '') self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight')) self.cssstyles = self._decodeifneeded(options.get('cssstyles', '')) self.prestyles = self._decodeifneeded(options.get('prestyles', '')) self.cssfile = self._decodeifneeded(options.get('cssfile', '')) self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False) self.tagsfile = self._decodeifneeded(options.get('tagsfile', '')) self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', '')) self.filename = self._decodeifneeded(options.get('filename', '')) self.wrapcode = get_bool_opt(options, 'wrapcode', False) self.span_element_openers = {} self.debug_token_types = get_bool_opt(options, 'debug_token_types', False) if self.tagsfile: if not ctags: raise RuntimeError('The "ctags" package must to be installed ' 'to be able to use the "tagsfile" feature.') self._ctags = ctags.CTags(self.tagsfile) linenos = options.get('linenos', False) if linenos == 'inline': self.linenos = 2 elif linenos: # compatibility with <= 0.7 self.linenos = 1 else: self.linenos = 0 self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) self.nobackground = get_bool_opt(options, 'nobackground', False) self.lineseparator = options.get('lineseparator', '\n') self.lineanchors = options.get('lineanchors', '') self.linespans = options.get('linespans', '') self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False) self.hl_lines = set() for lineno in get_list_opt(options, 'hl_lines', []): try: self.hl_lines.add(int(lineno)) except ValueError: pass self._create_stylesheet()
(self, **options)
10,125
pygments.formatters.html
_create_stylesheet
null
def _create_stylesheet(self): t2c = self.ttype2class = {Token: ''} c2s = self.class2style = {} for ttype, ndef in self.style: name = self._get_css_class(ttype) style = '' if ndef['color']: style += 'color: {}; '.format(webify(ndef['color'])) if ndef['bold']: style += 'font-weight: bold; ' if ndef['italic']: style += 'font-style: italic; ' if ndef['underline']: style += 'text-decoration: underline; ' if ndef['bgcolor']: style += 'background-color: {}; '.format(webify(ndef['bgcolor'])) if ndef['border']: style += 'border: 1px solid {}; '.format(webify(ndef['border'])) if style: t2c[ttype] = name # save len(ttype) to enable ordering the styles by # hierarchy (necessary for CSS cascading rules!) c2s[name] = (style[:-2], ttype, len(ttype))
(self)
10,126
pygments.formatters.html
_decodeifneeded
null
def _decodeifneeded(self, value): if isinstance(value, bytes): if self.encoding: return value.decode(self.encoding) return value.decode() return value
(self, value)
10,127
pygments.formatters.html
_format_lines
Just format the tokens, without any wrapping tags. Yield individual lines.
def _format_lines(self, tokensource): """ Just format the tokens, without any wrapping tags. Yield individual lines. """ nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title="{}"'.format('.'.join(ttype)) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = f'<span style="{css_style}"{title}>' else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = f'<span class="{css_class}"{title}>' else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: # Also check for part being non-empty, so we avoid creating # empty <span> tags if lspan != cspan and part: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same, or the current part was empty line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line)
(self, tokensource)
10,128
pygments.formatters.html
_get_css_class
Return the css class of this token type prefixed with the classprefix option.
def _get_css_class(self, ttype): """Return the css class of this token type prefixed with the classprefix option.""" ttypeclass = _get_ttype_class(ttype) if ttypeclass: return self.classprefix + ttypeclass return ''
(self, ttype)
10,129
pygments.formatters.html
_get_css_classes
Return the CSS classes of this token type prefixed with the classprefix option.
def _get_css_classes(self, ttype): """Return the CSS classes of this token type prefixed with the classprefix option.""" cls = self._get_css_class(ttype) while ttype not in STANDARD_TYPES: ttype = ttype.parent cls = self._get_css_class(ttype) + ' ' + cls return cls or ''
(self, ttype)
10,130
pygments.formatters.html
_get_css_inline_styles
Return the inline CSS styles for this token type.
def _get_css_inline_styles(self, ttype): """Return the inline CSS styles for this token type.""" cclass = self.ttype2class.get(ttype) while cclass is None: ttype = ttype.parent cclass = self.ttype2class.get(ttype) return cclass or ''
(self, ttype)
10,131
pygments.formatters.html
_highlight_lines
Highlighted the lines specified in the `hl_lines` option by post-processing the token stream coming from `_format_lines`.
def _highlight_lines(self, tokensource): """ Highlighted the lines specified in the `hl_lines` option by post-processing the token stream coming from `_format_lines`. """ hls = self.hl_lines for i, (t, value) in enumerate(tokensource): if t != 1: yield t, value if i + 1 in hls: # i + 1 because Python indexes start at 0 if self.noclasses: style = '' if self.style.highlight_color is not None: style = (f' style="background-color: {self.style.highlight_color}"') yield 1, f'<span{style}>{value}</span>' else: yield 1, f'<span class="hll">{value}</span>' else: yield 1, value
(self, tokensource)
10,132
pygments.formatters.html
_lookup_ctag
null
def _lookup_ctag(self, token): entry = ctags.TagEntry() if self._ctags.find(entry, token.encode(), 0): return entry['file'].decode(), entry['lineNumber'] else: return None, None
(self, token)
10,133
pygments.formatters.html
_wrap_code
null
def _wrap_code(self, inner): yield 0, '<code>' yield from inner yield 0, '</code>'
(self, inner)
10,134
pygments.formatters.html
_wrap_div
null
def _wrap_div(self, inner): style = [] if (self.noclasses and not self.nobackground and self.style.background_color is not None): style.append(f'background: {self.style.background_color}') if self.cssstyles: style.append(self.cssstyles) style = '; '.join(style) yield 0, ('<div' + (self.cssclass and f' class="{self.cssclass}"') + (style and (f' style="{style}"')) + '>') yield from inner yield 0, '</div>\n'
(self, inner)
10,135
pygments.formatters.html
_wrap_full
null
def _wrap_full(self, inner, outfile): if self.cssfile: if os.path.isabs(self.cssfile): # it's an absolute filename cssfilename = self.cssfile else: try: filename = outfile.name if not filename or filename[0] == '<': # pseudo files, e.g. name == '<fdopen>' raise AttributeError cssfilename = os.path.join(os.path.dirname(filename), self.cssfile) except AttributeError: print('Note: Cannot determine output file name, ' 'using current directory as base for the CSS file name', file=sys.stderr) cssfilename = self.cssfile # write CSS file only if noclobber_cssfile isn't given as an option. try: if not os.path.exists(cssfilename) or not self.noclobber_cssfile: with open(cssfilename, "w", encoding="utf-8") as cf: cf.write(CSSFILE_TEMPLATE % {'styledefs': self.get_style_defs('body')}) except OSError as err: err.strerror = 'Error writing CSS file: ' + err.strerror raise yield 0, (DOC_HEADER_EXTERNALCSS % dict(title=self.title, cssfile=self.cssfile, encoding=self.encoding)) else: yield 0, (DOC_HEADER % dict(title=self.title, styledefs=self.get_style_defs('body'), encoding=self.encoding)) yield from inner yield 0, DOC_FOOTER
(self, inner, outfile)
10,136
pygments.formatters.html
_wrap_inlinelinenos
null
def _wrap_inlinelinenos(self, inner): # need a list of lines since we need the width of a single number :( inner_lines = list(inner) sp = self.linenospecial st = self.linenostep num = self.linenostart mw = len(str(len(inner_lines) + num - 1)) anchor_name = self.lineanchors or self.linespans aln = self.anchorlinenos nocls = self.noclasses for _, inner_line in inner_lines: print_line = num % st == 0 special_line = sp and num % sp == 0 if print_line: line = '%*d' % (mw, num) else: line = ' ' * mw if nocls: if special_line: style = f' style="{self._linenos_special_style}"' else: style = f' style="{self._linenos_style}"' else: if special_line: style = ' class="linenos special"' else: style = ' class="linenos"' if style: linenos = f'<span{style}>{line}</span>' else: linenos = line if aln: yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) + inner_line) else: yield 1, linenos + inner_line num += 1
(self, inner)
10,137
pygments.formatters.html
_wrap_lineanchors
null
def _wrap_lineanchors(self, inner): s = self.lineanchors # subtract 1 since we have to increment i *before* yielding i = self.linenostart - 1 for t, line in inner: if t: i += 1 href = "" if self.linenos else ' href="#%s-%d"' % (s, i) yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line else: yield 0, line
(self, inner)
10,138
pygments.formatters.html
_wrap_linespans
null
def _wrap_linespans(self, inner): s = self.linespans i = self.linenostart - 1 for t, line in inner: if t: i += 1 yield 1, '<span id="%s-%d">%s</span>' % (s, i, line) else: yield 0, line
(self, inner)
10,139
pygments.formatters.html
_wrap_pre
null
def _wrap_pre(self, inner): style = [] if self.prestyles: style.append(self.prestyles) if self.noclasses: style.append(self._pre_style) style = '; '.join(style) if self.filename and self.linenos != 1: yield 0, ('<span class="filename">' + self.filename + '</span>') # the empty span here is to keep leading empty lines from being # ignored by HTML parsers yield 0, ('<pre' + (style and f' style="{style}"') + '><span></span>') yield from inner yield 0, '</pre>'
(self, inner)
10,140
pygments.formatters.html
_wrap_tablelinenos
null
def _wrap_tablelinenos(self, inner): dummyoutfile = StringIO() lncount = 0 for t, line in inner: if t: lncount += 1 dummyoutfile.write(line) fl = self.linenostart mw = len(str(lncount + fl - 1)) sp = self.linenospecial st = self.linenostep anchor_name = self.lineanchors or self.linespans aln = self.anchorlinenos nocls = self.noclasses lines = [] for i in range(fl, fl+lncount): print_line = i % st == 0 special_line = sp and i % sp == 0 if print_line: line = '%*d' % (mw, i) if aln: line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line) else: line = ' ' * mw if nocls: if special_line: style = f' style="{self._linenos_special_style}"' else: style = f' style="{self._linenos_style}"' else: if special_line: style = ' class="special"' else: style = ' class="normal"' if style: line = f'<span{style}>{line}</span>' lines.append(line) ls = '\n'.join(lines) # If a filename was specified, we can't put it into the code table as it # would misalign the line numbers. Hence we emit a separate row for it. filename_tr = "" if self.filename: filename_tr = ( '<tr><th colspan="2" class="filename">' '<span class="filename">' + self.filename + '</span>' '</th></tr>') # in case you wonder about the seemingly redundant <div> here: since the # content in the other cell also is wrapped in a div, some browsers in # some configurations seem to mess up the formatting... yield 0, (f'<table class="{self.cssclass}table">' + filename_tr + '<tr><td class="linenos"><div class="linenodiv"><pre>' + ls + '</pre></div></td><td class="code">') yield 0, '<div>' yield 0, dummyoutfile.getvalue() yield 0, '</div>' yield 0, '</td></tr></table>'
(self, inner)
10,142
pygments.formatters.html
format_unencoded
The formatting process uses several nested generators; which of them are used is determined by the user's options. Each generator should take at least one argument, ``inner``, and wrap the pieces of text generated by this. Always yield 2-tuples: (code, text). If "code" is 1, the text is part of the original tokensource being highlighted, if it's 0, the text is some piece of wrapping. This makes it possible to use several different wrappers that process the original source linewise, e.g. line number generators.
def format_unencoded(self, tokensource, outfile): """ The formatting process uses several nested generators; which of them are used is determined by the user's options. Each generator should take at least one argument, ``inner``, and wrap the pieces of text generated by this. Always yield 2-tuples: (code, text). If "code" is 1, the text is part of the original tokensource being highlighted, if it's 0, the text is some piece of wrapping. This makes it possible to use several different wrappers that process the original source linewise, e.g. line number generators. """ source = self._format_lines(tokensource) # As a special case, we wrap line numbers before line highlighting # so the line numbers get wrapped in the highlighting tag. if not self.nowrap and self.linenos == 2: source = self._wrap_inlinelinenos(source) if self.hl_lines: source = self._highlight_lines(source) if not self.nowrap: if self.lineanchors: source = self._wrap_lineanchors(source) if self.linespans: source = self._wrap_linespans(source) source = self.wrap(source) if self.linenos == 1: source = self._wrap_tablelinenos(source) source = self._wrap_div(source) if self.full: source = self._wrap_full(source, outfile) for t, piece in source: outfile.write(piece)
(self, tokensource, outfile)
10,143
pygments.formatters.html
get_background_style_defs
null
def get_background_style_defs(self, arg=None): prefix = self.get_css_prefix(arg) bg_color = self.style.background_color hl_color = self.style.highlight_color lines = [] if arg and not self.nobackground and bg_color is not None: text_style = '' if Text in self.ttype2class: text_style = ' ' + self.class2style[self.ttype2class[Text]][0] lines.insert( 0, '{}{{ background: {};{} }}'.format( prefix(''), bg_color, text_style ) ) if hl_color is not None: lines.insert( 0, '{} {{ background-color: {} }}'.format(prefix('hll'), hl_color) ) return lines
(self, arg=None)
10,144
pygments.formatters.html
get_css_prefix
null
def get_css_prefix(self, arg): if arg is None: arg = ('cssclass' in self.options and '.'+self.cssclass or '') if isinstance(arg, str): args = [arg] else: args = list(arg) def prefix(cls): if cls: cls = '.' + cls tmp = [] for arg in args: tmp.append((arg and arg + ' ' or '') + cls) return ', '.join(tmp) return prefix
(self, arg)
10,145
pygments.formatters.html
get_linenos_style_defs
null
def get_linenos_style_defs(self): lines = [ f'pre {{ {self._pre_style} }}', f'td.linenos .normal {{ {self._linenos_style} }}', f'span.linenos {{ {self._linenos_style} }}', f'td.linenos .special {{ {self._linenos_special_style} }}', f'span.linenos.special {{ {self._linenos_special_style} }}', ] return lines
(self)
10,146
pygments.formatters.html
get_style_defs
Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes.
def get_style_defs(self, arg=None): """ Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes. """ style_lines = [] style_lines.extend(self.get_linenos_style_defs()) style_lines.extend(self.get_background_style_defs(arg)) style_lines.extend(self.get_token_style_defs(arg)) return '\n'.join(style_lines)
(self, arg=None)
10,147
pygments.formatters.html
get_token_style_defs
null
def get_token_style_defs(self, arg=None): prefix = self.get_css_prefix(arg) styles = [ (level, ttype, cls, style) for cls, (style, ttype, level) in self.class2style.items() if cls and style ] styles.sort() lines = [ f'{prefix(cls)} {{ {style} }} /* {repr(ttype)[6:]} */' for (level, ttype, cls, style) in styles ] return lines
(self, arg=None)
10,148
pygments.formatters.html
wrap
Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden.
def wrap(self, source): """ Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden. """ output = source if self.wrapcode: output = self._wrap_code(output) output = self._wrap_pre(output) return output
(self, source)
10,216
wsdiff
RecordFormatter
null
class RecordFormatter(Formatter): def __init__(self, side, diff): self.side = side if side == 'right': diff = [(right, left, change) for left, right, change in diff] self.diff = diff def format(self, tokensource, outfile): diff = iter(self.diff) self.lines = [] for lineno, tokens in groupby(iter_token_lines(tokensource), key=lambda arg: arg[0]): for (lineno_ours, diff_ours), (lineno_theirs, _diff_theirs), change in diff: if lineno_ours == lineno: break else: self.lines.append(f'<span class="wsd-lineno wsd-{self.side} wsd-empty"></span><span class="wsd-line wsd-{self.side} wsd-empty"></span>') if not change: change_class = '' elif not lineno_ours or not lineno_theirs: change_class = ' wsd-insert' else: change_class = ' wsd-change' line = f'<span class="wsd-lineno wsd-{self.side}{change_class}">{lineno}</span><span class="wsd-line wsd-{self.side}{change_class}">' parts = re.split(r'(\00.|\01|$)', diff_ours) source_pos = 0 diff_markers = [] if lineno_theirs: # Do not highlight word changes if the whole line got added or removed. for span, sep in zip(parts[0:-2:2], parts[1:-2:2]): source_pos += len(span) diff_markers.append((source_pos, sep)) diff_class = '' source_pos = 0 for _lineno, ttype, value in tokens: css_class = get_token_class(ttype) while diff_markers: next_marker_pos, next_marker_type = diff_markers[0] if source_pos <= next_marker_pos < source_pos + len(value): split_pos = next_marker_pos - source_pos left, value = value[:split_pos], value[split_pos:] line += f'<span class="wsd-{css_class}{diff_class}">{html.escape(left)}</span>' source_pos += len(left) diff_class = ' wsd-word-change' if next_marker_type.startswith('\0') else '' diff_markers = diff_markers[1:] else: break line += f'<span class="{css_class}{diff_class}">{html.escape(value)}</span>' source_pos += len(value) if css_class is not None: line += '</span>' line += '</span>' self.lines.append(line) for _ours_empty, (lineno_theirs, _diff_theirs), change in diff: self.lines.append(f'<span class="wsd-lineno wsd-{self.side} wsd-empty"></span><span class="wsd-line wsd-{self.side} wsd-empty"></span>')
(side, diff)
10,217
wsdiff
__init__
null
def __init__(self, side, diff): self.side = side if side == 'right': diff = [(right, left, change) for left, right, change in diff] self.diff = diff
(self, side, diff)
10,218
wsdiff
format
null
def format(self, tokensource, outfile): diff = iter(self.diff) self.lines = [] for lineno, tokens in groupby(iter_token_lines(tokensource), key=lambda arg: arg[0]): for (lineno_ours, diff_ours), (lineno_theirs, _diff_theirs), change in diff: if lineno_ours == lineno: break else: self.lines.append(f'<span class="wsd-lineno wsd-{self.side} wsd-empty"></span><span class="wsd-line wsd-{self.side} wsd-empty"></span>') if not change: change_class = '' elif not lineno_ours or not lineno_theirs: change_class = ' wsd-insert' else: change_class = ' wsd-change' line = f'<span class="wsd-lineno wsd-{self.side}{change_class}">{lineno}</span><span class="wsd-line wsd-{self.side}{change_class}">' parts = re.split(r'(\00.|\01|$)', diff_ours) source_pos = 0 diff_markers = [] if lineno_theirs: # Do not highlight word changes if the whole line got added or removed. for span, sep in zip(parts[0:-2:2], parts[1:-2:2]): source_pos += len(span) diff_markers.append((source_pos, sep)) diff_class = '' source_pos = 0 for _lineno, ttype, value in tokens: css_class = get_token_class(ttype) while diff_markers: next_marker_pos, next_marker_type = diff_markers[0] if source_pos <= next_marker_pos < source_pos + len(value): split_pos = next_marker_pos - source_pos left, value = value[:split_pos], value[split_pos:] line += f'<span class="wsd-{css_class}{diff_class}">{html.escape(left)}</span>' source_pos += len(left) diff_class = ' wsd-word-change' if next_marker_type.startswith('\0') else '' diff_markers = diff_markers[1:] else: break line += f'<span class="{css_class}{diff_class}">{html.escape(value)}</span>' source_pos += len(value) if css_class is not None: line += '</span>' line += '</span>' self.lines.append(line) for _ours_empty, (lineno_theirs, _diff_theirs), change in diff: self.lines.append(f'<span class="wsd-lineno wsd-{self.side} wsd-empty"></span><span class="wsd-line wsd-{self.side} wsd-empty"></span>')
(self, tokensource, outfile)
10,220
pygments.lexer
RegexLexer
Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions.
class RegexLexer(Lexer, metaclass=RegexLexerMeta): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: At all time there is a stack of states. Initially, the stack contains #: a single state 'root'. The top of the stack is called "the current state". #: #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: ``new_state`` can be omitted to signify no state transition. #: If ``new_state`` is a string, it is pushed on the stack. This ensure #: the new current state is ``new_state``. #: If ``new_state`` is a tuple of strings, all of those strings are pushed #: on the stack and the current state will be the last element of the list. #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. Note that if you push while in a combined state, the combined #: state itself is pushed, and not only the state in which the rule is #: defined. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the initial stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if action is not None: if type(action) is _TokenType: yield pos, action, m.group() else: yield from action(self, m) pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(statestack) > 1: statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop, but keep at least one state on the stack # (random code leading to unexpected pops should # not allow exceptions) if abs(new_state) >= len(statestack): del statestack[1:] else: del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, f"wrong state def: {new_state!r}" statetokens = tokendefs[statestack[-1]] break else: # We are here only if all state tokens have been considered # and there was not a match on any of them. try: if text[pos] == '\n': # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Whitespace, '\n' pos += 1 continue yield pos, Error, text[pos] pos += 1 except IndexError: break
(*args, **kwds)
10,221
pygments.lexer
__init__
This constructor takes arbitrary options as keyword arguments. Every subclass must first process its own options and then call the `Lexer` constructor, since it processes the basic options like `stripnl`. An example looks like this: .. sourcecode:: python def __init__(self, **options): self.compress = options.get('compress', '') Lexer.__init__(self, **options) As these options must all be specifiable as strings (due to the command line usage), there are various utility functions available to help with that, see `Utilities`_.
def __init__(self, **options): """ This constructor takes arbitrary options as keyword arguments. Every subclass must first process its own options and then call the `Lexer` constructor, since it processes the basic options like `stripnl`. An example looks like this: .. sourcecode:: python def __init__(self, **options): self.compress = options.get('compress', '') Lexer.__init__(self, **options) As these options must all be specifiable as strings (due to the command line usage), there are various utility functions available to help with that, see `Utilities`_. """ self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.ensurenl = get_bool_opt(options, 'ensurenl', True) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'guess') self.encoding = options.get('inencoding') or self.encoding self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_)
(self, **options)
10,222
pygments.lexer
__repr__
null
def __repr__(self): if self.options: return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>' else: return f'<pygments.lexers.{self.__class__.__name__}>'
(self)
10,223
pygments.lexer
_preprocess_lexer_input
Apply preprocessing such as decoding the input, removing BOM and normalizing newlines.
def _preprocess_lexer_input(self, text): """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines.""" if not isinstance(text, str): if self.encoding == 'guess': text, _ = guess_decode(text) elif self.encoding == 'chardet': try: import chardet except ImportError as e: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') from e # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = text[len(bom):].decode(encoding, 'replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = text.decode(enc.get('encoding') or 'utf-8', 'replace') text = decoded else: text = text.decode(self.encoding) if text.startswith('\ufeff'): text = text[len('\ufeff'):] else: if text.startswith('\ufeff'): text = text[len('\ufeff'):] # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' return text
(self, text)