repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/base.py
""" Base class for ensemble-based estimators. """ # Authors: Gilles Louppe # License: BSD 3 clause import numpy as np import numbers from ..base import clone from ..base import BaseEstimator from ..base import MetaEstimatorMixin from ..utils import _get_n_jobs, check_random_state from ..externals import six from abc import ABCMeta, abstractmethod MAX_RAND_SEED = np.iinfo(np.int32).max def _set_random_states(estimator, random_state=None): """Sets fixed random_state parameters for an estimator Finds all parameters ending ``random_state`` and sets them to integers derived from ``random_state``. Parameters ---------- estimator : estimator supporting get/set_params Estimator with potential randomness managed by random_state parameters. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Notes ----- This does not necessarily set *all* ``random_state`` attributes that control an estimator's randomness, only those accessible through ``estimator.get_params()``. ``random_state``s not controlled include those belonging to: * cross-validation splitters * ``scipy.stats`` rvs """ random_state = check_random_state(random_state) to_set = {} for key in sorted(estimator.get_params(deep=True)): if key == 'random_state' or key.endswith('__random_state'): to_set[key] = random_state.randint(MAX_RAND_SEED) if to_set: estimator.set_params(**to_set) class BaseEnsemble(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for all ensemble classes. Warning: This class should not be used directly. Use derived classes instead. Parameters ---------- base_estimator : object, optional (default=None) The base estimator from which the ensemble is built. n_estimators : integer The number of estimators in the ensemble. estimator_params : list of strings The list of attributes to use as parameters when instantiating a new base estimator. If none are given, default parameters are used. Attributes ---------- base_estimator_ : estimator The base estimator from which the ensemble is grown. estimators_ : list of estimators The collection of fitted base estimators. """ @abstractmethod def __init__(self, base_estimator, n_estimators=10, estimator_params=tuple()): # Set parameters self.base_estimator = base_estimator self.n_estimators = n_estimators self.estimator_params = estimator_params # Don't instantiate estimators now! Parameters of base_estimator might # still change. Eg., when grid-searching with the nested object syntax. # self.estimators_ needs to be filled by the derived classes in fit. def _validate_estimator(self, default=None): """Check the estimator and the n_estimator attribute, set the `base_estimator_` attribute.""" if not isinstance(self.n_estimators, (numbers.Integral, np.integer)): raise ValueError("n_estimators must be an integer, " "got {0}.".format(type(self.n_estimators))) if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than zero, " "got {0}.".format(self.n_estimators)) if self.base_estimator is not None: self.base_estimator_ = self.base_estimator else: self.base_estimator_ = default if self.base_estimator_ is None: raise ValueError("base_estimator cannot be None") def _make_estimator(self, append=True, random_state=None): """Make and configure a copy of the `base_estimator_` attribute. Warning: This method should be used to properly instantiate new sub-estimators. """ estimator = clone(self.base_estimator_) estimator.set_params(**dict((p, getattr(self, p)) for p in self.estimator_params)) if random_state is not None: _set_random_states(estimator, random_state) if append: self.estimators_.append(estimator) return estimator def __len__(self): """Returns the number of estimators in the ensemble.""" return len(self.estimators_) def __getitem__(self, index): """Returns the index'th estimator in the ensemble.""" return self.estimators_[index] def __iter__(self): """Returns iterator over estimators in the ensemble.""" return iter(self.estimators_) def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs n_jobs = min(_get_n_jobs(n_jobs), n_estimators) # Partition estimators between jobs n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
5,551
33.271605
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/iforest.py
# Authors: Nicolas Goix <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD 3 clause from __future__ import division import numpy as np import scipy as sp from warnings import warn from sklearn.utils.fixes import euler_gamma from scipy.sparse import issparse import numbers from ..externals import six from ..tree import ExtraTreeRegressor from ..utils import check_random_state, check_array from .bagging import BaseBagging __all__ = ["IsolationForest"] INTEGER_TYPES = (numbers.Integral, np.integer) class IsolationForest(BaseBagging): """Isolation Forest Algorithm Return the anomaly score of each sample using the IsolationForest algorithm The IsolationForest 'isolates' observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature. Since recursive partitioning can be represented by a tree structure, the number of splittings required to isolate a sample is equivalent to the path length from the root node to the terminating node. This path length, averaged over a forest of such random trees, is a measure of normality and our decision function. Random partitioning produces noticeably shorter paths for anomalies. Hence, when a forest of random trees collectively produce shorter path lengths for particular samples, they are highly likely to be anomalies. Read more in the :ref:`User Guide <isolation_forest>`. .. versionadded:: 0.18 Parameters ---------- n_estimators : int, optional (default=100) The number of base estimators in the ensemble. max_samples : int or float, optional (default="auto") The number of samples to draw from X to train each base estimator. - If int, then draw `max_samples` samples. - If float, then draw `max_samples * X.shape[0]` samples. - If "auto", then `max_samples=min(256, n_samples)`. If max_samples is larger than the number of samples provided, all samples will be used for all trees (no sampling). contamination : float in (0., 0.5), optional (default=0.1) The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Used when fitting to define the threshold on the decision function. max_features : int or float, optional (default=1.0) The number of features to draw from X to train each base estimator. - If int, then draw `max_features` features. - If float, then draw `max_features * X.shape[1]` features. bootstrap : boolean, optional (default=False) If True, individual trees are fit on random subsets of the training data sampled with replacement. If False, sampling without replacement is performed. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. Attributes ---------- estimators_ : list of DecisionTreeClassifier The collection of fitted sub-estimators. estimators_samples_ : list of arrays The subset of drawn samples (i.e., the in-bag samples) for each base estimator. max_samples_ : integer The actual number of samples References ---------- .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest." Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on. .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based anomaly detection." ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3. """ def __init__(self, n_estimators=100, max_samples="auto", contamination=0.1, max_features=1., bootstrap=False, n_jobs=1, random_state=None, verbose=0): super(IsolationForest, self).__init__( base_estimator=ExtraTreeRegressor( max_features=1, splitter='random', random_state=random_state), # here above max_features has no links with self.max_features bootstrap=bootstrap, bootstrap_features=False, n_estimators=n_estimators, max_samples=max_samples, max_features=max_features, n_jobs=n_jobs, random_state=random_state, verbose=verbose) self.contamination = contamination def _set_oob_score(self, X, y): raise NotImplementedError("OOB score not supported by iforest") def fit(self, X, y=None, sample_weight=None): """Fit estimator. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Sparse matrices are also supported, use sparse ``csc_matrix`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse=['csc']) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() rnd = check_random_state(self.random_state) y = rnd.uniform(size=X.shape[0]) # ensure that max_sample is in [1, n_samples]: n_samples = X.shape[0] if isinstance(self.max_samples, six.string_types): if self.max_samples == 'auto': max_samples = min(256, n_samples) else: raise ValueError('max_samples (%s) is not supported.' 'Valid choices are: "auto", int or' 'float' % self.max_samples) elif isinstance(self.max_samples, INTEGER_TYPES): if self.max_samples > n_samples: warn("max_samples (%s) is greater than the " "total number of samples (%s). max_samples " "will be set to n_samples for estimation." % (self.max_samples, n_samples)) max_samples = n_samples else: max_samples = self.max_samples else: # float if not (0. < self.max_samples <= 1.): raise ValueError("max_samples must be in (0, 1], got %r" % self.max_samples) max_samples = int(self.max_samples * X.shape[0]) self.max_samples_ = max_samples max_depth = int(np.ceil(np.log2(max(max_samples, 2)))) super(IsolationForest, self)._fit(X, y, max_samples, max_depth=max_depth, sample_weight=sample_weight) self.threshold_ = -sp.stats.scoreatpercentile( -self.decision_function(X), 100. * (1. - self.contamination)) return self def predict(self, X): """Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- is_inlier : array, shape (n_samples,) For each observations, tells whether or not (+1 or -1) it should be considered as an inlier according to the fitted model. """ X = check_array(X, accept_sparse='csr') is_inlier = np.ones(X.shape[0], dtype=int) is_inlier[self.decision_function(X) <= self.threshold_] = -1 return is_inlier def decision_function(self, X): """Average anomaly score of X of the base classifiers. The anomaly score of an input sample is computed as the mean anomaly score of the trees in the forest. The measure of normality of an observation given a tree is the depth of the leaf containing this observation, which is equivalent to the number of splittings required to isolate this point. In case of several observations n_left in the leaf, the average path length of a n_left samples isolation tree is added. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- scores : array of shape (n_samples,) The anomaly score of the input samples. The lower, the more abnormal. """ # code structure from ForestClassifier/predict_proba # Check data X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] n_samples_leaf = np.zeros((n_samples, self.n_estimators), order="f") depths = np.zeros((n_samples, self.n_estimators), order="f") if self._max_features == X.shape[1]: subsample_features = False else: subsample_features = True for i, (tree, features) in enumerate(zip(self.estimators_, self.estimators_features_)): if subsample_features: X_subset = X[:, features] else: X_subset = X leaves_index = tree.apply(X_subset) node_indicator = tree.decision_path(X_subset) n_samples_leaf[:, i] = tree.tree_.n_node_samples[leaves_index] depths[:, i] = np.ravel(node_indicator.sum(axis=1)) depths[:, i] -= 1 depths += _average_path_length(n_samples_leaf) scores = 2 ** (-depths.mean(axis=1) / _average_path_length(self.max_samples_)) # Take the opposite of the scores as bigger is better (here less # abnormal) and add 0.5 (this value plays a special role as described # in the original paper) to give a sense to scores = 0: return 0.5 - scores def _average_path_length(n_samples_leaf): """ The average path length in a n_samples iTree, which is equal to the average path length of an unsuccessful BST search since the latter has the same structure as an isolation tree. Parameters ---------- n_samples_leaf : array-like of shape (n_samples, n_estimators), or int. The number of training samples in each test sample leaf, for each estimators. Returns ------- average_path_length : array, same shape as n_samples_leaf """ if isinstance(n_samples_leaf, INTEGER_TYPES): if n_samples_leaf <= 1: return 1. else: return 2. * (np.log(n_samples_leaf - 1.) + euler_gamma) - 2. * ( n_samples_leaf - 1.) / n_samples_leaf else: n_samples_leaf_shape = n_samples_leaf.shape n_samples_leaf = n_samples_leaf.reshape((1, -1)) average_path_length = np.zeros(n_samples_leaf.shape) mask = (n_samples_leaf <= 1) not_mask = np.logical_not(mask) average_path_length[mask] = 1. average_path_length[not_mask] = 2. * ( np.log(n_samples_leaf[not_mask] - 1.) + euler_gamma) - 2. * ( n_samples_leaf[not_mask] - 1.) / n_samples_leaf[not_mask] return average_path_length.reshape(n_samples_leaf_shape)
12,336
36.843558
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/setup.py
import numpy from numpy.distutils.misc_util import Configuration def configuration(parent_package="", top_path=None): config = Configuration("ensemble", parent_package, top_path) config.add_extension("_gradient_boosting", sources=["_gradient_boosting.pyx"], include_dirs=[numpy.get_include()]) config.add_subpackage("tests") return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
518
27.833333
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/voting_classifier.py
""" Soft Voting/Majority Rule classifier. This module contains a Soft Voting/Majority Rule classifier for classification estimators. """ # Authors: Sebastian Raschka <[email protected]>, # Gilles Louppe <[email protected]> # # License: BSD 3 clause import numpy as np import warnings from ..base import ClassifierMixin from ..base import TransformerMixin from ..base import clone from ..preprocessing import LabelEncoder from ..externals.joblib import Parallel, delayed from ..utils.validation import has_fit_parameter, check_is_fitted from ..utils.metaestimators import _BaseComposition def _parallel_fit_estimator(estimator, X, y, sample_weight=None): """Private function used to fit an estimator within a job.""" if sample_weight is not None: estimator.fit(X, y, sample_weight=sample_weight) else: estimator.fit(X, y) return estimator class VotingClassifier(_BaseComposition, ClassifierMixin, TransformerMixin): """Soft Voting/Majority Rule classifier for unfitted estimators. .. versionadded:: 0.17 Read more in the :ref:`User Guide <voting_classifier>`. Parameters ---------- estimators : list of (string, estimator) tuples Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones of those original estimators that will be stored in the class attribute ``self.estimators_``. An estimator can be set to `None` using ``set_params``. voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default=`None`) Sequence of weights (`float` or `int`) to weight the occurrences of predicted class labels (`hard` voting) or class probabilities before averaging (`soft` voting). Uses uniform weights if `None`. n_jobs : int, optional (default=1) The number of jobs to run in parallel for ``fit``. If -1, then the number of jobs is set to the number of cores. flatten_transform : bool, optional (default=None) Affects shape of transform output only when voting='soft' If voting='soft' and flatten_transform=True, transform method returns matrix with shape (n_samples, n_classifiers * n_classes). If flatten_transform=False, it returns (n_classifiers, n_samples, n_classes). Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators as defined in ``estimators`` that are not `None`. classes_ : array-like, shape = [n_predictions] The classes labels. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier >>> clf1 = LogisticRegression(random_state=1) >>> clf2 = RandomForestClassifier(random_state=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft', weights=[2,1,1], ... flatten_transform=True) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> print(eclf3.transform(X).shape) (6, 6) >>> """ def __init__(self, estimators, voting='hard', weights=None, n_jobs=1, flatten_transform=None): self.estimators = estimators self.voting = voting self.weights = weights self.n_jobs = n_jobs self.flatten_transform = flatten_transform @property def named_estimators(self): return dict(self.estimators) def fit(self, X, y, sample_weight=None): """ Fit the estimators. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. Returns ------- self : object """ if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1: raise NotImplementedError('Multilabel and multi-output' ' classification is not supported.') if self.voting not in ('soft', 'hard'): raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)" % self.voting) if self.estimators is None or len(self.estimators) == 0: raise AttributeError('Invalid `estimators` attribute, `estimators`' ' should be a list of (string, estimator)' ' tuples') if (self.weights is not None and len(self.weights) != len(self.estimators)): raise ValueError('Number of classifiers and weights must be equal' '; got %d weights, %d estimators' % (len(self.weights), len(self.estimators))) if sample_weight is not None: for name, step in self.estimators: if not has_fit_parameter(step, 'sample_weight'): raise ValueError('Underlying estimator \'%s\' does not' ' support sample weights.' % name) names, clfs = zip(*self.estimators) self._validate_names(names) n_isnone = np.sum([clf is None for _, clf in self.estimators]) if n_isnone == len(self.estimators): raise ValueError('All estimators are None. At least one is ' 'required to be a classifier!') self.le_ = LabelEncoder().fit(y) self.classes_ = self.le_.classes_ self.estimators_ = [] transformed_y = self.le_.transform(y) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y, sample_weight=sample_weight) for clf in clfs if clf is not None) return self @property def _weights_not_none(self): """Get the weights of not `None` estimators""" if self.weights is None: return None return [w for est, w in zip(self.estimators, self.weights) if est[1] is not None] def predict(self, X): """ Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- maj : array-like, shape = [n_samples] Predicted class labels. """ check_is_fitted(self, 'estimators_') if self.voting == 'soft': maj = np.argmax(self.predict_proba(X), axis=1) else: # 'hard' voting predictions = self._predict(X) maj = np.apply_along_axis( lambda x: np.argmax( np.bincount(x, weights=self._weights_not_none)), axis=1, arr=predictions) maj = self.le_.inverse_transform(maj) return maj def _collect_probas(self, X): """Collect results from clf.predict calls. """ return np.asarray([clf.predict_proba(X) for clf in self.estimators_]) def _predict_proba(self, X): """Predict class probabilities for X in 'soft' voting """ if self.voting == 'hard': raise AttributeError("predict_proba is not available when" " voting=%r" % self.voting) check_is_fitted(self, 'estimators_') avg = np.average(self._collect_probas(X), axis=0, weights=self._weights_not_none) return avg @property def predict_proba(self): """Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. """ return self._predict_proba def transform(self, X): """Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- If `voting='soft'` and `flatten_transform=True`: array-like = (n_classifiers, n_samples * n_classes) otherwise array-like = (n_classifiers, n_samples, n_classes) Class probabilities calculated by each classifier. If `voting='hard'`: array-like = [n_samples, n_classifiers] Class labels predicted by each classifier. """ check_is_fitted(self, 'estimators_') if self.voting == 'soft': probas = self._collect_probas(X) if self.flatten_transform is None: warnings.warn("'flatten_transform' default value will be " "changed to True in 0.21." "To silence this warning you may" " explicitly set flatten_transform=False.", DeprecationWarning) return probas elif not self.flatten_transform: return probas else: return np.hstack(probas) else: return self._predict(X) def set_params(self, **params): """ Setting the parameters for the voting classifier Valid parameter keys can be listed with get_params(). Parameters ---------- params: keyword arguments Specific parameters using e.g. set_params(parameter_name=new_value) In addition, to setting the parameters of the ``VotingClassifier``, the individual classifiers of the ``VotingClassifier`` can also be set or replaced by setting them to None. Examples -------- # In this example, the RandomForestClassifier is removed clf1 = LogisticRegression() clf2 = RandomForestClassifier() eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)] eclf.set_params(rf=None) """ super(VotingClassifier, self)._set_params('estimators', **params) return self def get_params(self, deep=True): """ Get the parameters of the VotingClassifier Parameters ---------- deep: bool Setting it to True gets the various classifiers and the parameters of the classifiers as well """ return super(VotingClassifier, self)._get_params('estimators', deep=deep) def _predict(self, X): """Collect results from clf.predict calls. """ return np.asarray([clf.predict(X) for clf in self.estimators_]).T
12,554
35.926471
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py
"""Weight Boosting This module contains weight boosting estimators for both classification and regression. The module structure is the following: - The ``BaseWeightBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ from each other in the loss function that is optimized. - ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for classification problems. - ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for regression problems. """ # Authors: Noel Dawe <[email protected]> # Gilles Louppe <[email protected]> # Hamzeh Alsalhi <[email protected]> # Arnaud Joly <[email protected]> # # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from numpy.core.umath_tests import inner1d from .base import BaseEnsemble from ..base import ClassifierMixin, RegressorMixin, is_regressor, is_classifier from ..externals import six from ..externals.six.moves import zip from ..externals.six.moves import xrange as range from .forest import BaseForest from ..tree import DecisionTreeClassifier, DecisionTreeRegressor from ..tree.tree import BaseDecisionTree from ..tree._tree import DTYPE from ..utils import check_array, check_X_y, check_random_state from ..utils.extmath import stable_cumsum from ..metrics import accuracy_score, r2_score from sklearn.utils.validation import has_fit_parameter, check_is_fitted __all__ = [ 'AdaBoostClassifier', 'AdaBoostRegressor', ] class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)): """Base class for AdaBoost estimators. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator=None, n_estimators=50, estimator_params=tuple(), learning_rate=1., random_state=None): super(BaseWeightBoosting, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, estimator_params=estimator_params) self.learning_rate = learning_rate self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted classifier/regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is forced to DTYPE from tree._tree if the base classifier of this ensemble weighted boosting classifier is a tree or forest. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check parameters if self.learning_rate <= 0: raise ValueError("learning_rate must be greater than zero") if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): dtype = DTYPE accept_sparse = 'csc' else: dtype = None accept_sparse = ['csr', 'csc'] X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype, y_numeric=is_regressor(self)) if sample_weight is None: # Initialize weights to 1 / n_samples sample_weight = np.empty(X.shape[0], dtype=np.float64) sample_weight[:] = 1. / X.shape[0] else: sample_weight = check_array(sample_weight, ensure_2d=False) # Normalize existing weights sample_weight = sample_weight / sample_weight.sum(dtype=np.float64) # Check that the sample weights sum is positive if sample_weight.sum() <= 0: raise ValueError( "Attempting to fit with a non-positive " "weighted number of samples.") # Check parameters self._validate_estimator() # Clear any previous fit results self.estimators_ = [] self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) random_state = check_random_state(self.random_state) for iboost in range(self.n_estimators): # Boosting step sample_weight, estimator_weight, estimator_error = self._boost( iboost, X, y, sample_weight, random_state) # Early termination if sample_weight is None: break self.estimator_weights_[iboost] = estimator_weight self.estimator_errors_[iboost] = estimator_error # Stop if error is zero if estimator_error == 0: break sample_weight_sum = np.sum(sample_weight) # Stop if the sum of sample weights has become non-positive if sample_weight_sum <= 0: break if iboost < self.n_estimators - 1: # Normalize sample_weight /= sample_weight_sum return self @abstractmethod def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost. Warning: This method needs to be overridden by subclasses. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. error : float The classification error for the current boost. If None then boosting has terminated early. """ pass def staged_score(self, X, y, sample_weight=None): """Return staged scores for X, y. This generator method yields the ensemble score after each iteration of boosting and therefore allows monitoring, such as to determine the score on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like, shape = [n_samples] Labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- z : float """ for y_pred in self.staged_predict(X): if is_classifier(self): yield accuracy_score(y, y_pred, sample_weight=sample_weight) else: yield r2_score(y, y_pred, sample_weight=sample_weight) @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, " "call `fit` before `feature_importances_`.") try: norm = self.estimator_weights_.sum() return (sum(weight * clf.feature_importances_ for weight, clf in zip(self.estimator_weights_, self.estimators_)) / norm) except AttributeError: raise AttributeError( "Unable to compute feature importances " "since base_estimator does not have a " "feature_importances_ attribute") def _validate_X_predict(self, X): """Ensure that X is in the proper format""" if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): X = check_array(X, accept_sparse='csr', dtype=DTYPE) else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return X def _samme_proba(estimator, n_classes, X): """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ proba = estimator.predict_proba(X) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps log_proba = np.log(proba) return (n_classes - 1) * (log_proba - (1. / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]) class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin): """An AdaBoost classifier. An AdaBoost [1] classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases. This class implements the algorithm known as AdaBoost-SAMME [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeClassifier) The base estimator from which the boosted ensemble is built. Support for sample weighting is required, as well as proper `classes_` and `n_classes_` attributes. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each classifier by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R') If 'SAMME.R' then use the SAMME.R real boosting algorithm. ``base_estimator`` must support calculation of class probabilities. If 'SAMME' then use the SAMME discrete boosting algorithm. The SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. classes_ : array of shape = [n_classes] The classes labels. n_classes_ : int The number of classes. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Classification error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., algorithm='SAMME.R', random_state=None): super(AdaBoostClassifier, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.algorithm = algorithm def fit(self, X, y, sample_weight=None): """Build a boosted classifier from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to ``1 / n_samples``. Returns ------- self : object Returns self. """ # Check that algorithm is supported if self.algorithm not in ('SAMME', 'SAMME.R'): raise ValueError("algorithm %s is not supported" % self.algorithm) # Fit return super(AdaBoostClassifier, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostClassifier, self)._validate_estimator( default=DecisionTreeClassifier(max_depth=1)) # SAMME-R requires predict_proba-enabled base estimators if self.algorithm == 'SAMME.R': if not hasattr(self.base_estimator_, 'predict_proba'): raise TypeError( "AdaBoostClassifier with algorithm='SAMME.R' requires " "that the weak learner supports the calculation of class " "probabilities with a predict_proba method.\n" "Please change the base estimator or set " "algorithm='SAMME' instead.") if not has_fit_parameter(self.base_estimator_, "sample_weight"): raise ValueError("%s doesn't support sample_weight." % self.base_estimator_.__class__.__name__) def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost. Perform a single boost according to the real multi-class SAMME.R algorithm or to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early. """ if self.algorithm == 'SAMME.R': return self._boost_real(iboost, X, y, sample_weight, random_state) else: # elif self.algorithm == "SAMME": return self._boost_discrete(iboost, X, y, sample_weight, random_state) def _boost_real(self, iboost, X, y, sample_weight, random_state): """Implement a single boost using the SAMME.R real algorithm.""" estimator = self._make_estimator(random_state=random_state) estimator.fit(X, y, sample_weight=sample_weight) y_predict_proba = estimator.predict_proba(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. # Construct y coding as described in Zhu et al [2]: # # y_k = 1 if c == k else -1 / (K - 1) # # where K == n_classes_ and c, k in [0, K) are indices along the second # axis of the y coding with c being the index corresponding to the true # class label. n_classes = self.n_classes_ classes = self.classes_ y_codes = np.array([-1. / (n_classes - 1), 1.]) y_coding = y_codes.take(classes == y[:, np.newaxis]) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. proba = y_predict_proba # alias for readability proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps # Boost weight using multi-class AdaBoost SAMME.R alg estimator_weight = (-1. * self.learning_rate * (((n_classes - 1.) / n_classes) * inner1d(y_coding, np.log(y_predict_proba)))) # Only boost the weights if it will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight *= np.exp(estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, 1., estimator_error def _boost_discrete(self, iboost, X, y, sample_weight, random_state): """Implement a single boost using the SAMME discrete algorithm.""" estimator = self._make_estimator(random_state=random_state) estimator.fit(X, y, sample_weight=sample_weight) y_predict = estimator.predict(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. n_classes = self.n_classes_ # Stop if the error is at least as bad as random guessing if estimator_error >= 1. - (1. / n_classes): self.estimators_.pop(-1) if len(self.estimators_) == 0: raise ValueError('BaseClassifier in AdaBoostClassifier ' 'ensemble is worse than random, ensemble ' 'can not be fit.') return None, None, None # Boost weight using multi-class AdaBoost SAMME alg estimator_weight = self.learning_rate * ( np.log((1. - estimator_error) / estimator_error) + np.log(n_classes - 1.)) # Only boost the weights if I will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight *= np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, estimator_weight, estimator_error def predict(self, X): """Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted classes. """ pred = self.decision_function(X) if self.n_classes_ == 2: return self.classes_.take(pred > 0, axis=0) return self.classes_.take(np.argmax(pred, axis=1), axis=0) def staged_predict(self, X): """Return staged predictions for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : generator of array, shape = [n_samples] The predicted classes. """ n_classes = self.n_classes_ classes = self.classes_ if n_classes == 2: for pred in self.staged_decision_function(X): yield np.array(classes.take(pred > 0, axis=0)) else: for pred in self.staged_decision_function(X): yield np.array(classes.take( np.argmax(pred, axis=1), axis=0)) def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R pred = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" pred = sum((estimator.predict(X) == classes).T * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) pred /= self.estimator_weights_.sum() if n_classes == 2: pred[:, 0] *= -1 return pred.sum(axis=1) return pred def staged_decision_function(self, X): """Compute decision function of ``X`` for each boosting iteration. This method allows monitoring (i.e. determine error on testing set) after each boosting iteration. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_pred = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_pred = estimator.predict(X) current_pred = (current_pred == classes).T * weight if pred is None: pred = current_pred else: pred += current_pred if n_classes == 2: tmp_pred = np.copy(pred) tmp_pred[:, 0] *= -1 yield (tmp_pred / norm).sum(axis=1) else: yield pred / norm def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ check_is_fitted(self, "n_classes_") n_classes = self.n_classes_ X = self._validate_X_predict(X) if n_classes == 1: return np.ones((X.shape[0], 1)) if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R proba = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" proba = sum(estimator.predict_proba(X) * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) proba /= self.estimator_weights_.sum() proba = np.exp((1. / (n_classes - 1)) * proba) normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba def staged_predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. This generator method yields the ensemble predicted class probabilities after each iteration of boosting and therefore allows monitoring, such as to determine the predicted class probabilities on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : generator of array, shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ X = self._validate_X_predict(X) n_classes = self.n_classes_ proba = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_proba = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_proba = estimator.predict_proba(X) * weight if proba is None: proba = current_proba else: proba += current_proba real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm)) normalizer = real_proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 real_proba /= normalizer yield real_proba def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the weighted mean predicted class log-probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ return np.log(self.predict_proba(X)) class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin): """An AdaBoost regressor. An AdaBoost [1] regressor is a meta-estimator that begins by fitting a regressor on the original dataset and then fits additional copies of the regressor on the same dataset but where the weights of instances are adjusted according to the error of the current prediction. As such, subsequent regressors focus more on difficult cases. This class implements the algorithm known as AdaBoost.R2 [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeRegressor) The base estimator from which the boosted ensemble is built. Support for sample weighting is required. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each regressor by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. loss : {'linear', 'square', 'exponential'}, optional (default='linear') The loss function to use when updating the weights after each boosting iteration. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Regression error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., loss='linear', random_state=None): super(AdaBoostRegressor, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.loss = loss self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (real numbers). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check loss if self.loss not in ('linear', 'square', 'exponential'): raise ValueError( "loss must be 'linear', 'square', or 'exponential'") # Fit return super(AdaBoostRegressor, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostRegressor, self)._validate_estimator( default=DecisionTreeRegressor(max_depth=3)) def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost for regression Perform a single boost according to the AdaBoost.R2 algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The regression error for the current boost. If None then boosting has terminated early. """ estimator = self._make_estimator(random_state=random_state) # Weighted sampling of the training set with replacement # For NumPy >= 1.7.0 use np.random.choice cdf = stable_cumsum(sample_weight) cdf /= cdf[-1] uniform_samples = random_state.random_sample(X.shape[0]) bootstrap_idx = cdf.searchsorted(uniform_samples, side='right') # searchsorted returns a scalar bootstrap_idx = np.array(bootstrap_idx, copy=False) # Fit on the bootstrapped sample and obtain a prediction # for all samples in the training set estimator.fit(X[bootstrap_idx], y[bootstrap_idx]) y_predict = estimator.predict(X) error_vect = np.abs(y_predict - y) error_max = error_vect.max() if error_max != 0.: error_vect /= error_max if self.loss == 'square': error_vect **= 2 elif self.loss == 'exponential': error_vect = 1. - np.exp(- error_vect) # Calculate the average loss estimator_error = (sample_weight * error_vect).sum() if estimator_error <= 0: # Stop if fit is perfect return sample_weight, 1., 0. elif estimator_error >= 0.5: # Discard current estimator only if it isn't the only one if len(self.estimators_) > 1: self.estimators_.pop(-1) return None, None, None beta = estimator_error / (1. - estimator_error) # Boost weight using AdaBoost.R2 alg estimator_weight = self.learning_rate * np.log(1. / beta) if not iboost == self.n_estimators - 1: sample_weight *= np.power( beta, (1. - error_vect) * self.learning_rate) return sample_weight, estimator_weight, estimator_error def _get_median_predict(self, X, limit): # Evaluate predictions of all estimators predictions = np.array([ est.predict(X) for est in self.estimators_[:limit]]).T # Sort the predictions sorted_idx = np.argsort(predictions, axis=1) # Find index of median prediction for each sample weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1) median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] median_idx = median_or_above.argmax(axis=1) median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx] # Return median predictions return predictions[np.arange(X.shape[0]), median_estimators] def predict(self, X): """Predict regression value for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) return self._get_median_predict(X, len(self.estimators_)) def staged_predict(self, X): """Return staged predictions for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : generator of array, shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) for i, _ in enumerate(self.estimators_, 1): yield self._get_median_predict(X, limit=i)
41,091
35.558719
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/partial_dependence.py
"""Partial dependence plots for tree ensembles. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from itertools import count import numbers import numpy as np from scipy.stats.mstats import mquantiles from ..utils.extmath import cartesian from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import map, range, zip from ..utils import check_array from ..utils.validation import check_is_fitted from ..tree._tree import DTYPE from ._gradient_boosting import _partial_dependence_tree from .gradient_boosting import BaseGradientBoosting def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100): """Generate a grid of points based on the ``percentiles of ``X``. The grid is generated by placing ``grid_resolution`` equally spaced points between the ``percentiles`` of each column of ``X``. Parameters ---------- X : ndarray The data percentiles : tuple of floats The percentiles which are used to construct the extreme values of the grid axes. grid_resolution : int The number of equally spaced points that are placed on the grid. Returns ------- grid : ndarray All data points on the grid; ``grid.shape[1] == X.shape[1]`` and ``grid.shape[0] == grid_resolution * X.shape[1]``. axes : seq of ndarray The axes with which the grid has been created. """ if len(percentiles) != 2: raise ValueError('percentile must be tuple of len 2') if not all(0. <= x <= 1. for x in percentiles): raise ValueError('percentile values must be in [0, 1]') axes = [] emp_percentiles = mquantiles(X, prob=percentiles, axis=0) for col in range(X.shape[1]): uniques = np.unique(X[:, col]) if uniques.shape[0] < grid_resolution: # feature has low resolution use unique vals axis = uniques else: # create axis based on percentiles and grid resolution axis = np.linspace(emp_percentiles[0, col], emp_percentiles[1, col], num=grid_resolution, endpoint=True) axes.append(axis) return cartesian(axes), axes def partial_dependence(gbrt, target_variables, grid=None, X=None, percentiles=(0.05, 0.95), grid_resolution=100): """Partial dependence of ``target_variables``. Partial dependence plots show the dependence between the joint values of the ``target_variables`` and the function represented by the ``gbrt``. Read more in the :ref:`User Guide <partial_dependence>`. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. target_variables : array-like, dtype=int The target features for which the partial dependecy should be computed (size should be smaller than 3 for visual renderings). grid : array-like, shape=(n_points, len(target_variables)) The grid of ``target_variables`` values for which the partial dependecy should be evaluated (either ``grid`` or ``X`` must be specified). X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. It is used to generate a ``grid`` for the ``target_variables``. The ``grid`` comprises ``grid_resolution`` equally spaced points between the two ``percentiles``. percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the ``grid``. Only if ``X`` is not None. grid_resolution : int, default=100 The number of equally spaced points on the ``grid``. Returns ------- pdp : array, shape=(n_classes, n_points) The partial dependence function evaluated on the ``grid``. For regression and binary classification ``n_classes==1``. axes : seq of ndarray or None The axes with which the grid has been created or None if the grid has been given. Examples -------- >>> samples = [[0, 0, 2], [1, 0, 0]] >>> labels = [0, 1] >>> from sklearn.ensemble import GradientBoostingClassifier >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels) >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2) >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) """ if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') check_is_fitted(gbrt, 'estimators_') if (grid is None and X is None) or (grid is not None and X is not None): raise ValueError('Either grid or X must be specified') target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel() if any([not (0 <= fx < gbrt.n_features_) for fx in target_variables]): raise ValueError('target_variables must be in [0, %d]' % (gbrt.n_features_ - 1)) if X is not None: X = check_array(X, dtype=DTYPE, order='C') grid, axes = _grid_from_X(X[:, target_variables], percentiles, grid_resolution) else: assert grid is not None # dont return axes if grid is given axes = None # grid must be 2d if grid.ndim == 1: grid = grid[:, np.newaxis] if grid.ndim != 2: raise ValueError('grid must be 2d but is %dd' % grid.ndim) grid = np.asarray(grid, dtype=DTYPE, order='C') assert grid.shape[1] == target_variables.shape[0] n_trees_per_stage = gbrt.estimators_.shape[1] n_estimators = gbrt.estimators_.shape[0] pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = gbrt.estimators_[stage, k].tree_ _partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k]) return pdp, axes def plot_partial_dependence(gbrt, X, features, feature_names=None, label=None, n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), n_jobs=1, verbose=0, ax=None, line_kw=None, contour_kw=None, **fig_kw): """Partial dependence plots for ``features``. The ``len(features)`` plots are arranged in a grid with ``n_cols`` columns. Two-way partial dependence plots are plotted as contour plots. Read more in the :ref:`User Guide <partial_dependence>`. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. features : seq of ints, strings, or tuples of ints or strings If seq[i] is an int or a tuple with one int value, a one-way PDP is created; if seq[i] is a tuple of two ints, a two-way PDP is created. If feature_names is specified and seq[i] is an int, seq[i] must be < len(feature_names). If seq[i] is a string, feature_names must be specified, and seq[i] must be in feature_names. feature_names : seq of str Name of each feature; feature_names[i] holds the name of the feature with index i. label : object The class label for which the PDPs should be computed. Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``. n_cols : int The number of columns in the grid plot (default: 3). percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used to create the extreme values for the PDP axes. grid_resolution : int, default=100 The number of equally spaced points on the axes. n_jobs : int The number of CPUs to use to compute the PDs. -1 means 'all CPUs'. Defaults to 1. verbose : int Verbose output during PD computations. Defaults to 0. ax : Matplotlib axis object, default None An axis object onto which the plots will be drawn. line_kw : dict Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. For one-way partial dependence plots. contour_kw : dict Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. For two-way partial dependence plots. fig_kw : dict Dict with keywords passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig : figure The Matplotlib Figure object. axs : seq of Axis objects A seq of Axis objects, one for each subplot. Examples -------- >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP ... """ import matplotlib.pyplot as plt from matplotlib import transforms from matplotlib.ticker import MaxNLocator from matplotlib.ticker import ScalarFormatter if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') check_is_fitted(gbrt, 'estimators_') # set label_idx for multi-class GBRT if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2: if label is None: raise ValueError('label is not given for multi-class PDP') label_idx = np.searchsorted(gbrt.classes_, label) if gbrt.classes_[label_idx] != label: raise ValueError('label %s not in ``gbrt.classes_``' % str(label)) else: # regression and binary classification label_idx = 0 X = check_array(X, dtype=DTYPE, order='C') if gbrt.n_features_ != X.shape[1]: raise ValueError('X.shape[1] does not match gbrt.n_features_') if line_kw is None: line_kw = {'color': 'green'} if contour_kw is None: contour_kw = {} # convert feature_names to list if feature_names is None: # if not feature_names use fx indices as name feature_names = [str(i) for i in range(gbrt.n_features_)] elif isinstance(feature_names, np.ndarray): feature_names = feature_names.tolist() def convert_feature(fx): if isinstance(fx, six.string_types): try: fx = feature_names.index(fx) except ValueError: raise ValueError('Feature %s not in feature_names' % fx) return fx # convert features into a seq of int tuples tmp_features = [] for fxs in features: if isinstance(fxs, (numbers.Integral,) + six.string_types): fxs = (fxs,) try: fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32) except TypeError: raise ValueError('features must be either int, str, or tuple ' 'of int/str') if not (1 <= np.size(fxs) <= 2): raise ValueError('target features must be either one or two') tmp_features.append(fxs) features = tmp_features names = [] try: for fxs in features: l = [] # explicit loop so "i" is bound for exception below for i in fxs: l.append(feature_names[i]) names.append(l) except IndexError: raise ValueError('All entries of features must be less than ' 'len(feature_names) = {0}, got {1}.' .format(len(feature_names), i)) # compute PD functions pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)(gbrt, fxs, X=X, grid_resolution=grid_resolution, percentiles=percentiles) for fxs in features) # get global min and max values of PD grouped by plot type pdp_lim = {} for pdp, axes in pd_result: min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max() n_fx = len(axes) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) if ax is None: fig = plt.figure(**fig_kw) else: fig = ax.get_figure() fig.clear() n_cols = min(n_cols, len(features)) n_rows = int(np.ceil(len(features) / float(n_cols))) axs = [] for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result): ax = fig.add_subplot(n_rows, n_cols, i + 1) if len(axes) == 1: ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw) else: # make contour plot assert len(axes) == 2 XX, YY = np.meshgrid(axes[0], axes[1]) Z = pdp[label_idx].reshape(list(map(np.size, axes))).T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], alpha=0.75, **contour_kw) ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) # plot data deciles + axes labels deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) ylim = ax.get_ylim() ax.vlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_xlabel(name[0]) ax.set_ylim(ylim) # prevent x-axis ticks from overlapping ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower')) tick_formatter = ScalarFormatter() tick_formatter.set_powerlimits((-3, 4)) ax.xaxis.set_major_formatter(tick_formatter) if len(axes) > 1: # two-way PDP - y-axis deciles + labels deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) xlim = ax.get_xlim() ax.hlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_ylabel(name[1]) # hline erases xlim ax.set_xlim(xlim) else: ax.set_ylabel('Partial dependence') if len(axes) == 1: ax.set_ylim(pdp_lim[1]) axs.append(ax) fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4, hspace=0.3) return fig, axs
15,257
37.530303
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/__init__.py
""" The :mod:`sklearn.ensemble` module includes ensemble-based methods for classification, regression and anomaly detection. """ from .base import BaseEnsemble from .forest import RandomForestClassifier from .forest import RandomForestRegressor from .forest import RandomTreesEmbedding from .forest import ExtraTreesClassifier from .forest import ExtraTreesRegressor from .bagging import BaggingClassifier from .bagging import BaggingRegressor from .iforest import IsolationForest from .weight_boosting import AdaBoostClassifier from .weight_boosting import AdaBoostRegressor from .gradient_boosting import GradientBoostingClassifier from .gradient_boosting import GradientBoostingRegressor from .voting_classifier import VotingClassifier from . import bagging from . import forest from . import weight_boosting from . import gradient_boosting from . import partial_dependence __all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor", "RandomTreesEmbedding", "ExtraTreesClassifier", "ExtraTreesRegressor", "BaggingClassifier", "BaggingRegressor", "IsolationForest", "GradientBoostingClassifier", "GradientBoostingRegressor", "AdaBoostClassifier", "AdaBoostRegressor", "VotingClassifier", "bagging", "forest", "gradient_boosting", "partial_dependence", "weight_boosting"]
1,382
37.416667
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/forest.py
"""Forest of trees-based ensemble methods Those methods include random forests and extremely randomized trees. The module structure is the following: - The ``BaseForest`` base class implements a common ``fit`` method for all the estimators in the module. The ``fit`` method of the base ``Forest`` class calls the ``fit`` method of each sub-estimator on random samples (with replacement, a.k.a. bootstrap) of the training set. The init of the sub-estimator is further delegated to the ``BaseEnsemble`` constructor. - The ``ForestClassifier`` and ``ForestRegressor`` base classes further implement the prediction logic by computing an average of the predicted outcomes of the sub-estimators. - The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived classes provide the user with concrete implementations of the forest ensemble method using classical, deterministic ``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as sub-estimator implementations. - The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived classes provide the user with concrete implementations of the forest ensemble method using the extremely randomized trees ``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as sub-estimator implementations. Single and multi-output problems are both handled. """ # Authors: Gilles Louppe <[email protected]> # Brian Holt <[email protected]> # Joly Arnaud <[email protected]> # Fares Hedayati <[email protected]> # # License: BSD 3 clause from __future__ import division import warnings from warnings import warn import threading from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from scipy.sparse import hstack as sparse_hstack from ..base import ClassifierMixin, RegressorMixin from ..externals.joblib import Parallel, delayed from ..externals import six from ..metrics import r2_score from ..preprocessing import OneHotEncoder from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeClassifier, ExtraTreeRegressor) from ..tree._tree import DTYPE, DOUBLE from ..utils import check_random_state, check_array, compute_sample_weight from ..exceptions import DataConversionWarning, NotFittedError from .base import BaseEnsemble, _partition_estimators from ..utils.fixes import parallel_helper from ..utils.multiclass import check_classification_targets from ..utils.validation import check_is_fitted __all__ = ["RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier", "ExtraTreesRegressor", "RandomTreesEmbedding"] MAX_INT = np.iinfo(np.int32).max def _generate_sample_indices(random_state, n_samples): """Private function used to _parallel_build_trees function.""" random_instance = check_random_state(random_state) sample_indices = random_instance.randint(0, n_samples, n_samples) return sample_indices def _generate_unsampled_indices(random_state, n_samples): """Private function used to forest._set_oob_score function.""" sample_indices = _generate_sample_indices(random_state, n_samples) sample_counts = np.bincount(sample_indices, minlength=n_samples) unsampled_mask = sample_counts == 0 indices_range = np.arange(n_samples) unsampled_indices = indices_range[unsampled_mask] return unsampled_indices def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees, verbose=0, class_weight=None): """Private function used to fit a single tree in parallel.""" if verbose > 1: print("building tree %d of %d" % (tree_idx + 1, n_trees)) if forest.bootstrap: n_samples = X.shape[0] if sample_weight is None: curr_sample_weight = np.ones((n_samples,), dtype=np.float64) else: curr_sample_weight = sample_weight.copy() indices = _generate_sample_indices(tree.random_state, n_samples) sample_counts = np.bincount(indices, minlength=n_samples) curr_sample_weight *= sample_counts if class_weight == 'subsample': with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) curr_sample_weight *= compute_sample_weight('auto', y, indices) elif class_weight == 'balanced_subsample': curr_sample_weight *= compute_sample_weight('balanced', y, indices) tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False) else: tree.fit(X, y, sample_weight=sample_weight, check_input=False) return tree class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)): """Base class for forests of trees. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator, n_estimators=10, estimator_params=tuple(), bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None): super(BaseForest, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, estimator_params=estimator_params) self.bootstrap = bootstrap self.oob_score = oob_score self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose self.warm_start = warm_start self.class_weight = class_weight def apply(self, X): """Apply trees in the forest to X, return leaf indices. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- X_leaves : array_like, shape = [n_samples, n_estimators] For each datapoint x in X and for each tree in the forest, return the index of the leaf x ends up in. """ X = self._validate_X_predict(X) results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")( delayed(parallel_helper)(tree, 'apply', X, check_input=False) for tree in self.estimators_) return np.array(results).T def decision_path(self, X): """Return the decision path in the forest .. versionadded:: 0.18 Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- indicator : sparse csr array, shape = [n_samples, n_nodes] Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes. n_nodes_ptr : array of size (n_estimators + 1, ) The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]] gives the indicator value for the i-th estimator. """ X = self._validate_X_predict(X) indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")( delayed(parallel_helper)(tree, 'decision_path', X, check_input=False) for tree in self.estimators_) n_nodes = [0] n_nodes.extend([i.shape[1] for i in indicators]) n_nodes_ptr = np.array(n_nodes).cumsum() return sparse_hstack(indicators).tocsr(), n_nodes_ptr def fit(self, X, y, sample_weight=None): """Build a forest of trees from the training set (X, y). Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The training input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels in classification, real numbers in regression). sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- self : object Returns self. """ # Validate or convert input data X = check_array(X, accept_sparse="csc", dtype=DTYPE) y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() # Remap output n_samples, self.n_features_ = X.shape y = np.atleast_1d(y) if y.ndim == 2 and y.shape[1] == 1: warn("A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples,), for example using ravel().", DataConversionWarning, stacklevel=2) if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] y, expanded_class_weight = self._validate_y_class_weight(y) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Check parameters self._validate_estimator() if not self.bootstrap and self.oob_score: raise ValueError("Out of bag estimation only available" " if bootstrap=True") random_state = check_random_state(self.random_state) if not self.warm_start or not hasattr(self, "estimators_"): # Free allocated memory, if any self.estimators_ = [] n_more_estimators = self.n_estimators - len(self.estimators_) if n_more_estimators < 0: raise ValueError('n_estimators=%d must be larger or equal to ' 'len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))) elif n_more_estimators == 0: warn("Warm-start fitting without increasing n_estimators does not " "fit new trees.") else: if self.warm_start and len(self.estimators_) > 0: # We draw from the random state to get the random state we # would have got if we hadn't used a warm_start. random_state.randint(MAX_INT, size=len(self.estimators_)) trees = [] for i in range(n_more_estimators): tree = self._make_estimator(append=False, random_state=random_state) trees.append(tree) # Parallel loop: we use the threading backend as the Cython code # for fitting the trees is internally releasing the Python GIL # making threading always more efficient than multiprocessing in # that case. trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")( delayed(_parallel_build_trees)( t, self, X, y, sample_weight, i, len(trees), verbose=self.verbose, class_weight=self.class_weight) for i, t in enumerate(trees)) # Collect newly grown trees self.estimators_.extend(trees) if self.oob_score: self._set_oob_score(X, y) # Decapsulate classes_ attributes if hasattr(self, "classes_") and self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self @abstractmethod def _set_oob_score(self, X, y): """Calculate out of bag predictions and score.""" def _validate_y_class_weight(self, y): # Default implementation return y, None def _validate_X_predict(self, X): """Validate X whenever one tries to predict, apply, predict_proba""" if self.estimators_ is None or len(self.estimators_) == 0: raise NotFittedError("Estimator not fitted, " "call `fit` before exploiting the model.") return self.estimators_[0]._validate_X_predict(X, check_input=True) @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ check_is_fitted(self, 'estimators_') all_importances = Parallel(n_jobs=self.n_jobs, backend="threading")( delayed(getattr)(tree, 'feature_importances_') for tree in self.estimators_) return sum(all_importances) / len(self.estimators_) # This is a utility function for joblib's Parallel. It can't go locally in # ForestClassifier or ForestRegressor, because joblib complains that it cannot # pickle it when placed there. def accumulate_prediction(predict, X, out, lock): prediction = predict(X, check_input=False) with lock: if len(out) == 1: out[0] += prediction else: for i in range(len(out)): out[i] += prediction[i] class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest, ClassifierMixin)): """Base class for forest of trees-based classifiers. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator, n_estimators=10, estimator_params=tuple(), bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None): super(ForestClassifier, self).__init__( base_estimator, n_estimators=n_estimators, estimator_params=estimator_params, bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start, class_weight=class_weight) def _set_oob_score(self, X, y): """Compute out-of-bag score""" X = check_array(X, dtype=DTYPE, accept_sparse='csr') n_classes_ = self.n_classes_ n_samples = y.shape[0] oob_decision_function = [] oob_score = 0.0 predictions = [] for k in range(self.n_outputs_): predictions.append(np.zeros((n_samples, n_classes_[k]))) for estimator in self.estimators_: unsampled_indices = _generate_unsampled_indices( estimator.random_state, n_samples) p_estimator = estimator.predict_proba(X[unsampled_indices, :], check_input=False) if self.n_outputs_ == 1: p_estimator = [p_estimator] for k in range(self.n_outputs_): predictions[k][unsampled_indices, :] += p_estimator[k] for k in range(self.n_outputs_): if (predictions[k].sum(axis=1) == 0).any(): warn("Some inputs do not have OOB scores. " "This probably means too few trees were used " "to compute any reliable oob estimates.") decision = (predictions[k] / predictions[k].sum(axis=1)[:, np.newaxis]) oob_decision_function.append(decision) oob_score += np.mean(y[:, k] == np.argmax(predictions[k], axis=1), axis=0) if self.n_outputs_ == 1: self.oob_decision_function_ = oob_decision_function[0] else: self.oob_decision_function_ = oob_decision_function self.oob_score_ = oob_score / self.n_outputs_ def _validate_y_class_weight(self, y): check_classification_targets(y) y = np.copy(y) expanded_class_weight = None if self.class_weight is not None: y_original = np.copy(y) self.classes_ = [] self.n_classes_ = [] y_store_unique_indices = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_store_unique_indices if self.class_weight is not None: valid_presets = ('balanced', 'balanced_subsample') if isinstance(self.class_weight, six.string_types): if self.class_weight not in valid_presets: raise ValueError('Valid presets for class_weight include ' '"balanced" and "balanced_subsample". Given "%s".' % self.class_weight) if self.warm_start: warn('class_weight presets "balanced" or "balanced_subsample" are ' 'not recommended for warm_start if the fitted data ' 'differs from the full dataset. In order to use ' '"balanced" weights, use compute_class_weight("balanced", ' 'classes, y). In place of y you can use a large ' 'enough sample of the full training set target to ' 'properly estimate the class frequency ' 'distributions. Pass the resulting weights as the ' 'class_weight parameter.') if (self.class_weight != 'balanced_subsample' or not self.bootstrap): if self.class_weight == "balanced_subsample": class_weight = "balanced" else: class_weight = self.class_weight expanded_class_weight = compute_sample_weight(class_weight, y_original) return y, expanded_class_weight def predict(self, X): """Predict class for X. The predicted class of an input sample is a vote by the trees in the forest, weighted by their probability estimates. That is, the predicted class is the one with highest mean probability estimate across the trees. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted classes. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: n_samples = proba[0].shape[0] predictions = np.zeros((n_samples, self.n_outputs_)) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take(np.argmax(proba[k], axis=1), axis=0) return predictions def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample are computed as the mean predicted class probabilities of the trees in the forest. The class probability of a single tree is the fraction of samples of the same class in a leaf. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ check_is_fitted(self, 'estimators_') # Check data X = self._validate_X_predict(X) # Assign chunk of trees to jobs n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # avoid storing the output of every estimator by summing them here all_proba = [np.zeros((X.shape[0], j), dtype=np.float64) for j in np.atleast_1d(self.n_classes_)] lock = threading.Lock() Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")( delayed(accumulate_prediction)(e.predict_proba, X, all_proba, lock) for e in self.estimators_) for proba in all_proba: proba /= len(self.estimators_) if len(all_proba) == 1: return all_proba[0] else: return all_proba def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the trees in the forest. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)): """Base class for forest of trees-based regressors. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator, n_estimators=10, estimator_params=tuple(), bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False): super(ForestRegressor, self).__init__( base_estimator, n_estimators=n_estimators, estimator_params=estimator_params, bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start) def predict(self, X): """Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the trees in the forest. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted values. """ check_is_fitted(self, 'estimators_') # Check data X = self._validate_X_predict(X) # Assign chunk of trees to jobs n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # avoid storing the output of every estimator by summing them here if self.n_outputs_ > 1: y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) else: y_hat = np.zeros((X.shape[0]), dtype=np.float64) # Parallel loop lock = threading.Lock() Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")( delayed(accumulate_prediction)(e.predict, X, [y_hat], lock) for e in self.estimators_) y_hat /= len(self.estimators_) return y_hat def _set_oob_score(self, X, y): """Compute out-of-bag scores""" X = check_array(X, dtype=DTYPE, accept_sparse='csr') n_samples = y.shape[0] predictions = np.zeros((n_samples, self.n_outputs_)) n_predictions = np.zeros((n_samples, self.n_outputs_)) for estimator in self.estimators_: unsampled_indices = _generate_unsampled_indices( estimator.random_state, n_samples) p_estimator = estimator.predict( X[unsampled_indices, :], check_input=False) if self.n_outputs_ == 1: p_estimator = p_estimator[:, np.newaxis] predictions[unsampled_indices, :] += p_estimator n_predictions[unsampled_indices, :] += 1 if (n_predictions == 0).any(): warn("Some inputs do not have OOB scores. " "This probably means too few trees were used " "to compute any reliable oob estimates.") n_predictions[n_predictions == 0] = 1 predictions /= n_predictions self.oob_prediction_ = predictions if self.n_outputs_ == 1: self.oob_prediction_ = \ self.oob_prediction_.reshape((n_samples, )) self.oob_score_ = 0.0 for k in range(self.n_outputs_): self.oob_score_ += r2_score(y[:, k], predictions[:, k]) self.oob_score_ /= self.n_outputs_ class RandomForestClassifier(ForestClassifier): """A random forest classifier. A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement if `bootstrap=True` (default). Read more in the :ref:`User Guide <forest>`. Parameters ---------- n_estimators : integer, optional (default=10) The number of trees in the forest. criterion : string, optional (default="gini") The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "entropy" for the information gain. Note: this parameter is tree-specific. max_features : int, float, string or None, optional (default="auto") The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)` (same as "auto"). - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : integer or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : boolean, optional (default=True) Whether bootstrap samples are used when building trees. oob_score : bool (default=False) Whether to use out-of-bag samples to estimate the generalization accuracy. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. warm_start : bool, optional (default=False) When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None) Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of [{1:1}, {2:5}, {3:1}, {4:1}]. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` The "balanced_subsample" mode is the same as "balanced" except that weights are computed based on the bootstrap sample for every tree grown. For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. Attributes ---------- estimators_ : list of DecisionTreeClassifier The collection of fitted sub-estimators. classes_ : array of shape = [n_classes] or a list of such arrays The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). n_classes_ : int or list The number of classes (single output problem), or a list containing the number of classes for each output (multi-output problem). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. feature_importances_ : array of shape = [n_features] The feature importances (the higher, the more important the feature). oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_decision_function_ : array of shape = [n_samples, n_classes] Decision function computed with out-of-bag estimate on the training set. If n_estimators is small it might be possible that a data point was never left out during the bootstrap. In this case, `oob_decision_function_` might contain NaN. Examples -------- >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.datasets import make_classification >>> >>> X, y = make_classification(n_samples=1000, n_features=4, ... n_informative=2, n_redundant=0, ... random_state=0, shuffle=False) >>> clf = RandomForestClassifier(max_depth=2, random_state=0) >>> clf.fit(X, y) RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=2, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False, random_state=0, verbose=0, warm_start=False) >>> print(clf.feature_importances_) [ 0.17287856 0.80608704 0.01884792 0.00218648] >>> print(clf.predict([[0, 0, 0, 0]])) [1] Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data, ``max_features=n_features`` and ``bootstrap=False``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. References ---------- .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. See also -------- DecisionTreeClassifier, ExtraTreesClassifier """ def __init__(self, n_estimators=10, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None): super(RandomForestClassifier, self).__init__( base_estimator=DecisionTreeClassifier(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start, class_weight=class_weight) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split class RandomForestRegressor(ForestRegressor): """A random forest regressor. A random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement if `bootstrap=True` (default). Read more in the :ref:`User Guide <forest>`. Parameters ---------- n_estimators : integer, optional (default=10) The number of trees in the forest. criterion : string, optional (default="mse") The function to measure the quality of a split. Supported criteria are "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion, and "mae" for the mean absolute error. .. versionadded:: 0.18 Mean Absolute Error (MAE) criterion. max_features : int, float, string or None, optional (default="auto") The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : integer or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : boolean, optional (default=True) Whether bootstrap samples are used when building trees. oob_score : bool, optional (default=False) whether to use out-of-bag samples to estimate the R^2 on unseen data. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. warm_start : bool, optional (default=False) When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. Attributes ---------- estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. feature_importances_ : array of shape = [n_features] The feature importances (the higher, the more important the feature). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_prediction_ : array of shape = [n_samples] Prediction computed with out-of-bag estimate on the training set. Examples -------- >>> from sklearn.ensemble import RandomForestRegressor >>> from sklearn.datasets import make_regression >>> >>> X, y = make_regression(n_features=4, n_informative=2, ... random_state=0, shuffle=False) >>> regr = RandomForestRegressor(max_depth=2, random_state=0) >>> regr.fit(X, y) RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False, random_state=0, verbose=0, warm_start=False) >>> print(regr.feature_importances_) [ 0.17339552 0.81594114 0. 0.01066333] >>> print(regr.predict([[0, 0, 0, 0]])) [-2.50699856] Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data, ``max_features=n_features`` and ``bootstrap=False``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. References ---------- .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. See also -------- DecisionTreeRegressor, ExtraTreesRegressor """ def __init__(self, n_estimators=10, criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False): super(RandomForestRegressor, self).__init__( base_estimator=DecisionTreeRegressor(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split class ExtraTreesClassifier(ForestClassifier): """An extra-trees classifier. This class implements a meta estimator that fits a number of randomized decision trees (a.k.a. extra-trees) on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. Read more in the :ref:`User Guide <forest>`. Parameters ---------- n_estimators : integer, optional (default=10) The number of trees in the forest. criterion : string, optional (default="gini") The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "entropy" for the information gain. max_features : int, float, string or None, optional (default="auto") The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : integer or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : boolean, optional (default=False) Whether bootstrap samples are used when building trees. oob_score : bool, optional (default=False) Whether to use out-of-bag samples to estimate the generalization accuracy. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. warm_start : bool, optional (default=False) When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None) Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of [{1:1}, {2:5}, {3:1}, {4:1}]. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` The "balanced_subsample" mode is the same as "balanced" except that weights are computed based on the bootstrap sample for every tree grown. For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. Attributes ---------- estimators_ : list of DecisionTreeClassifier The collection of fitted sub-estimators. classes_ : array of shape = [n_classes] or a list of such arrays The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). n_classes_ : int or list The number of classes (single output problem), or a list containing the number of classes for each output (multi-output problem). feature_importances_ : array of shape = [n_features] The feature importances (the higher, the more important the feature). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_decision_function_ : array of shape = [n_samples, n_classes] Decision function computed with out-of-bag estimate on the training set. If n_estimators is small it might be possible that a data point was never left out during the bootstrap. In this case, `oob_decision_function_` might contain NaN. Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. See also -------- sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble. RandomForestClassifier : Ensemble Classifier based on trees with optimal splits. """ def __init__(self, n_estimators=10, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None): super(ExtraTreesClassifier, self).__init__( base_estimator=ExtraTreeClassifier(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start, class_weight=class_weight) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split class ExtraTreesRegressor(ForestRegressor): """An extra-trees regressor. This class implements a meta estimator that fits a number of randomized decision trees (a.k.a. extra-trees) on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. Read more in the :ref:`User Guide <forest>`. Parameters ---------- n_estimators : integer, optional (default=10) The number of trees in the forest. criterion : string, optional (default="mse") The function to measure the quality of a split. Supported criteria are "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion, and "mae" for the mean absolute error. .. versionadded:: 0.18 Mean Absolute Error (MAE) criterion. max_features : int, float, string or None, optional (default="auto") The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : integer or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : boolean, optional (default=False) Whether bootstrap samples are used when building trees. oob_score : bool, optional (default=False) Whether to use out-of-bag samples to estimate the R^2 on unseen data. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. warm_start : bool, optional (default=False) When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. Attributes ---------- estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. feature_importances_ : array of shape = [n_features] The feature importances (the higher, the more important the feature). n_features_ : int The number of features. n_outputs_ : int The number of outputs. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_prediction_ : array of shape = [n_samples] Prediction computed with out-of-bag estimate on the training set. Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. See also -------- sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble. RandomForestRegressor: Ensemble regressor using trees with optimal splits. """ def __init__(self, n_estimators=10, criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False): super(ExtraTreesRegressor, self).__init__( base_estimator=ExtraTreeRegressor(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split class RandomTreesEmbedding(BaseForest): """An ensemble of totally random trees. An unsupervised transformation of a dataset to a high-dimensional sparse representation. A datapoint is coded according to which leaf of each tree it is sorted into. Using a one-hot encoding of the leaves, this leads to a binary coding with as many ones as there are trees in the forest. The dimensionality of the resulting representation is ``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``, the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``. Read more in the :ref:`User Guide <random_trees_embedding>`. Parameters ---------- n_estimators : integer, optional (default=10) Number of trees in the forest. max_depth : integer, optional (default=5) The maximum depth of each tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` is the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` is the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 bootstrap : boolean, optional (default=True) Whether bootstrap samples are used when building trees. sparse_output : bool, optional (default=True) Whether or not to return a sparse CSR matrix, as default behavior, or to return a dense array compatible with dense pipeline operators. n_jobs : integer, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the tree building process. warm_start : bool, optional (default=False) When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest. Attributes ---------- estimators_ : list of DecisionTreeClassifier The collection of fitted sub-estimators. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. .. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative visual codebooks using randomized clustering forests" NIPS 2007 """ def __init__(self, n_estimators=10, max_depth=5, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, sparse_output=True, n_jobs=1, random_state=None, verbose=0, warm_start=False): super(RandomTreesEmbedding, self).__init__( base_estimator=ExtraTreeRegressor(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "max_leaf_nodes", "min_impurity_decrease", "min_impurity_split", "random_state"), bootstrap=False, oob_score=False, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start) self.criterion = 'mse' self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = 1 self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split self.sparse_output = sparse_output def _set_oob_score(self, X, y): raise NotImplementedError("OOB score not supported by tree embedding") def fit(self, X, y=None, sample_weight=None): """Fit estimator. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Sparse matrices are also supported, use sparse ``csc_matrix`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- self : object Returns self. """ self.fit_transform(X, y, sample_weight=sample_weight) return self def fit_transform(self, X, y=None, sample_weight=None): """Fit estimator and transform dataset. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Input data used to build forests. Use ``dtype=np.float32`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- X_transformed : sparse matrix, shape=(n_samples, n_out) Transformed dataset. """ X = check_array(X, accept_sparse=['csc']) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() rnd = check_random_state(self.random_state) y = rnd.uniform(size=X.shape[0]) super(RandomTreesEmbedding, self).fit(X, y, sample_weight=sample_weight) self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output) return self.one_hot_encoder_.fit_transform(self.apply(X)) def transform(self, X): """Transform dataset. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Input data to be transformed. Use ``dtype=np.float32`` for maximum efficiency. Sparse matrices are also supported, use sparse ``csr_matrix`` for maximum efficiency. Returns ------- X_transformed : sparse matrix, shape=(n_samples, n_out) Transformed dataset. """ return self.one_hot_encoder_.transform(self.apply(X))
79,027
39.423529
105
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/gradient_boosting.py
"""Gradient Boosted Regression Trees This module contains methods for fitting gradient boosted regression trees for both classification and regression. The module structure is the following: - The ``BaseGradientBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ in the concrete ``LossFunction`` used. - ``GradientBoostingClassifier`` implements gradient boosting for classification problems. - ``GradientBoostingRegressor`` implements gradient boosting for regression problems. """ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, # Arnaud Joly, Jacob Schreiber # License: BSD 3 clause from __future__ import print_function from __future__ import division from abc import ABCMeta from abc import abstractmethod from .base import BaseEnsemble from ..base import ClassifierMixin from ..base import RegressorMixin from ..externals import six from ._gradient_boosting import predict_stages from ._gradient_boosting import predict_stage from ._gradient_boosting import _random_sample_mask import numbers import numpy as np from scipy import stats from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import issparse from scipy.special import expit from time import time from ..tree.tree import DecisionTreeRegressor from ..tree._tree import DTYPE from ..tree._tree import TREE_LEAF from ..utils import check_random_state from ..utils import check_array from ..utils import check_X_y from ..utils import column_or_1d from ..utils import check_consistent_length from ..utils import deprecated from ..utils.fixes import logsumexp from ..utils.stats import _weighted_percentile from ..utils.validation import check_is_fitted from ..utils.multiclass import check_classification_targets from ..exceptions import NotFittedError class QuantileEstimator(object): """An estimator predicting the alpha-quantile of the training targets.""" def __init__(self, alpha=0.9): if not 0 < alpha < 1.0: raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha) self.alpha = alpha def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0) def predict(self, X): check_is_fitted(self, 'quantile') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.quantile) return y class MeanEstimator(object): """An estimator predicting the mean of the training targets.""" def fit(self, X, y, sample_weight=None): if sample_weight is None: self.mean = np.mean(y) else: self.mean = np.average(y, weights=sample_weight) def predict(self, X): check_is_fitted(self, 'mean') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.mean) return y class LogOddsEstimator(object): """An estimator predicting the log odds ratio.""" scale = 1.0 def fit(self, X, y, sample_weight=None): # pre-cond: pos, neg are encoded as 1, 0 if sample_weight is None: pos = np.sum(y) neg = y.shape[0] - pos else: pos = np.sum(sample_weight * y) neg = np.sum(sample_weight * (1 - y)) if neg == 0 or pos == 0: raise ValueError('y contains non binary labels.') self.prior = self.scale * np.log(pos / neg) def predict(self, X): check_is_fitted(self, 'prior') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.prior) return y class ScaledLogOddsEstimator(LogOddsEstimator): """Log odds ratio scaled by 0.5 -- for exponential loss. """ scale = 0.5 class PriorProbabilityEstimator(object): """An estimator predicting the probability of each class in the training data. """ def fit(self, X, y, sample_weight=None): if sample_weight is None: sample_weight = np.ones_like(y, dtype=np.float64) class_counts = np.bincount(y, weights=sample_weight) self.priors = class_counts / class_counts.sum() def predict(self, X): check_is_fitted(self, 'priors') y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64) y[:] = self.priors return y class ZeroEstimator(object): """An estimator that simply predicts zero. """ def fit(self, X, y, sample_weight=None): if np.issubdtype(y.dtype, np.signedinteger): # classification self.n_classes = np.unique(y).shape[0] if self.n_classes == 2: self.n_classes = 1 else: # regression self.n_classes = 1 def predict(self, X): check_is_fitted(self, 'n_classes') y = np.empty((X.shape[0], self.n_classes), dtype=np.float64) y.fill(0.0) return y class LossFunction(six.with_metaclass(ABCMeta, object)): """Abstract base class for various loss functions. Attributes ---------- K : int The number of regression trees to be induced; 1 for regression and binary classification; ``n_classes`` for multi-class classification. """ is_multi_class = False def __init__(self, n_classes): self.K = n_classes def init_estimator(self): """Default ``init`` estimator for loss function. """ raise NotImplementedError() @abstractmethod def __call__(self, y, pred, sample_weight=None): """Compute the loss of prediction ``pred`` and ``y``. """ @abstractmethod def negative_gradient(self, y, y_pred, **kargs): """Compute the negative gradient. Parameters --------- y : np.ndarray, shape=(n,) The target labels. y_pred : np.ndarray, shape=(n,): The predictions. """ def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Update the terminal regions (=leaves) of the given tree and updates the current predictions of the model. Traverses tree and invokes template method `_update_terminal_region`. Parameters ---------- tree : tree.Tree The tree object. X : ndarray, shape=(n, m) The data array. y : ndarray, shape=(n,) The target labels. residual : ndarray, shape=(n,) The residuals (usually the negative gradient). y_pred : ndarray, shape=(n,) The predictions. sample_weight : ndarray, shape=(n,) The weight of each sample. sample_mask : ndarray, shape=(n,) The sample mask to be used. learning_rate : float, default=0.1 learning rate shrinks the contribution of each tree by ``learning_rate``. k : int, default 0 The index of the estimator being updated. """ # compute leaf for each sample in ``X``. terminal_regions = tree.apply(X) # mask all which are not in sample mask. masked_terminal_regions = terminal_regions.copy() masked_terminal_regions[~sample_mask] = -1 # update each leaf (= perform line search) for leaf in np.where(tree.children_left == TREE_LEAF)[0]: self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_weight) # update predictions (both in-bag and out-of-bag) y_pred[:, k] += (learning_rate * tree.value[:, 0, 0].take(terminal_regions, axis=0)) @abstractmethod def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Template method for updating terminal regions (=leaves). """ class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for regression loss functions. """ def __init__(self, n_classes): if n_classes != 1: raise ValueError("``n_classes`` must be 1 for regression but " "was %r" % n_classes) super(RegressionLossFunction, self).__init__(n_classes) class LeastSquaresError(RegressionLossFunction): """Loss function for least squares (LS) estimation. Terminal regions need not to be updated for least squares. """ def init_estimator(self): return MeanEstimator() def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.mean((y - pred.ravel()) ** 2.0) else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * ((y - pred.ravel()) ** 2.0))) def negative_gradient(self, y, pred, **kargs): return y - pred.ravel() def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Least squares does not need to update terminal regions. But it has to update the predictions. """ # update predictions y_pred[:, k] += learning_rate * tree.predict(X).ravel() def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): pass class LeastAbsoluteError(RegressionLossFunction): """Loss function for least absolute deviation (LAD) regression. """ def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.abs(y - pred.ravel()).mean() else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * np.abs(y - pred.ravel()))) def negative_gradient(self, y, pred, **kargs): """1.0 if y - pred > 0.0 else -1.0""" pred = pred.ravel() return 2.0 * (y - pred > 0.0) - 1.0 def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """LAD updates terminal regions to median estimates. """ terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0) tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50) class HuberLossFunction(RegressionLossFunction): """Huber loss function for robust regression. M-Regression proposed in Friedman 2001. References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. """ def __init__(self, n_classes, alpha=0.9): super(HuberLossFunction, self).__init__(n_classes) self.alpha = alpha self.gamma = None def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) gamma = self.gamma diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) median = _weighted_percentile(diff, sample_weight, percentile=50) diff_minus_median = diff - median tree.value[leaf, 0] = median + np.mean( np.sign(diff_minus_median) * np.minimum(np.abs(diff_minus_median), gamma)) class QuantileLossFunction(RegressionLossFunction): """Loss function for quantile regression. Quantile regression allows to estimate the percentiles of the conditional distribution of the target. """ def __init__(self, n_classes, alpha=0.9): super(QuantileLossFunction, self).__init__(n_classes) assert 0 < alpha < 1.0 self.alpha = alpha self.percentile = alpha * 100.0 def init_estimator(self): return QuantileEstimator(self.alpha) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred alpha = self.alpha mask = y > pred if sample_weight is None: loss = (alpha * diff[mask].sum() - (1.0 - alpha) * diff[~mask].sum()) / y.shape[0] else: loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) - (1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) / sample_weight.sum()) return loss def negative_gradient(self, y, pred, **kargs): alpha = self.alpha pred = pred.ravel() mask = y > pred return (alpha * mask) - ((1.0 - alpha) * ~mask) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) sample_weight = sample_weight.take(terminal_region, axis=0) val = _weighted_percentile(diff, sample_weight, self.percentile) tree.value[leaf, 0] = val class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for classification loss functions. """ def _score_to_proba(self, score): """Template method to convert scores to probabilities. the does not support probabilities raises AttributeError. """ raise TypeError('%s does not support predict_proba' % type(self).__name__) @abstractmethod def _score_to_decision(self, score): """Template method to convert scores to decisions. Returns int arrays. """ class BinomialDeviance(ClassificationLossFunction): """Binomial deviance loss function for binary classification. Binary classification is a special case; here, we only need to fit one tree instead of ``n_classes`` trees. """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(BinomialDeviance, self).__init__(1) def init_estimator(self): return LogOddsEstimator() def __call__(self, y, pred, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). """ # logaddexp(0, v) == log(1.0 + exp(v)) pred = pred.ravel() if sample_weight is None: return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred)) else: return (-2.0 / sample_weight.sum() * np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))) def negative_gradient(self, y, pred, **kargs): """Compute the residual (= negative gradient). """ return y - expit(pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. our node estimate is given by: sum(w * (y - prob)) / sum(w * prob * (1 - prob)) we take advantage that: y - prob = residual """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual)) # prevents overflow and division by zero if abs(denominator) < 1e-150: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = expit(score.ravel()) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class MultinomialDeviance(ClassificationLossFunction): """Multinomial deviance loss function for multi-class classification. For multi-class classification we need to fit ``n_classes`` trees at each stage. """ is_multi_class = True def __init__(self, n_classes): if n_classes < 3: raise ValueError("{0:s} requires more than 2 classes.".format( self.__class__.__name__)) super(MultinomialDeviance, self).__init__(n_classes) def init_estimator(self): return PriorProbabilityEstimator() def __call__(self, y, pred, sample_weight=None): # create one-hot label encoding Y = np.zeros((y.shape[0], self.K), dtype=np.float64) for k in range(self.K): Y[:, k] = y == k if sample_weight is None: return np.sum(-1 * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) else: return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) def negative_gradient(self, y, pred, k=0, **kwargs): """Compute negative gradient for the ``k``-th class. """ return y - np.nan_to_num(np.exp(pred[:, k] - logsumexp(pred, axis=1))) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) numerator *= (self.K - 1) / self.K denominator = np.sum(sample_weight * (y - residual) * (1.0 - y + residual)) # prevents overflow and division by zero if abs(denominator) < 1e-150: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): return np.nan_to_num( np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis]))) def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class ExponentialLoss(ClassificationLossFunction): """Exponential loss function for binary classification. Same loss as AdaBoost. References ---------- Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007 """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(ExponentialLoss, self).__init__(1) def init_estimator(self): return ScaledLogOddsEstimator() def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() if sample_weight is None: return np.mean(np.exp(-(2. * y - 1.) * pred)) else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * np.exp(-(2 * y - 1) * pred))) def negative_gradient(self, y, pred, **kargs): y_ = -(2. * y - 1.) return y_ * np.exp(y_ * pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] pred = pred.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) y_ = 2. * y - 1. numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred)) denominator = np.sum(sample_weight * np.exp(-y_ * pred)) # prevents overflow and division by zero if abs(denominator) < 1e-150: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = expit(2.0 * score.ravel()) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): return (score.ravel() >= 0.0).astype(np.int) LOSS_FUNCTIONS = {'ls': LeastSquaresError, 'lad': LeastAbsoluteError, 'huber': HuberLossFunction, 'quantile': QuantileLossFunction, 'deviance': None, # for both, multinomial and binomial 'exponential': ExponentialLoss, } INIT_ESTIMATORS = {'zero': ZeroEstimator} class VerboseReporter(object): """Reports verbose output to stdout. If ``verbose==1`` output is printed once in a while (when iteration mod verbose_mod is zero).; if larger than 1 then output is printed for each update. """ def __init__(self, verbose): self.verbose = verbose def init(self, est, begin_at_stage=0): # header fields and line format str header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}'] # do oob? if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr:>16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time:>16s}') # print the header line print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) # plot verbose info each time i % verbose_mod == 0 self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage def update(self, j, est): """Update reporter with new iteration. """ do_oob = est.subsample < 1 # we need to take into account if we fit additional estimators. i = j - self.begin_at_stage # iteration relative to the start iter if (i + 1) % self.verbose_mod == 0: oob_impr = est.oob_improvement_[j] if do_oob else 0 remaining_time = ((est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)) if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time)) if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): # adjust verbose frequency (powers of 10) self.verbose_mod *= 10 class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)): """Abstract base class for Gradient Boosting. """ @abstractmethod def __init__(self, loss, learning_rate, n_estimators, criterion, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, min_impurity_decrease, min_impurity_split, init, subsample, max_features, random_state, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): self.n_estimators = n_estimators self.learning_rate = learning_rate self.loss = loss self.criterion = criterion self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.subsample = subsample self.max_features = max_features self.max_depth = max_depth self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split self.init = init self.random_state = random_state self.alpha = alpha self.verbose = verbose self.max_leaf_nodes = max_leaf_nodes self.warm_start = warm_start self.presort = presort def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc=None, X_csr=None): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == np.bool loss = self.loss_ original_y = y for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_decrease=self.min_impurity_decrease, min_impurity_split=self.min_impurity_split, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, presort=self.presort) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(np.float64) if X_csc is not None: tree.fit(X_csc, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) else: tree.fit(X, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) # update tree leaves if X_csr is not None: loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) else: loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) # add tree to ensemble self.estimators_[i, k] = tree return y_pred def _check_params(self): """Check validity of parameters and raise ValueError if not valid. """ if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than 0 but " "was %r" % self.n_estimators) if self.learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0 but " "was %r" % self.learning_rate) if (self.loss not in self._SUPPORTED_LOSS or self.loss not in LOSS_FUNCTIONS): raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) if self.loss == 'deviance': loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance) else: loss_class = LOSS_FUNCTIONS[self.loss] if self.loss in ('huber', 'quantile'): self.loss_ = loss_class(self.n_classes_, self.alpha) else: self.loss_ = loss_class(self.n_classes_) if not (0.0 < self.subsample <= 1.0): raise ValueError("subsample must be in (0,1] but " "was %r" % self.subsample) if self.init is not None: if isinstance(self.init, six.string_types): if self.init not in INIT_ESTIMATORS: raise ValueError('init="%s" is not supported' % self.init) else: if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')): raise ValueError("init=%r must be valid BaseEstimator " "and support both fit and " "predict" % self.init) if not (0.0 < self.alpha < 1.0): raise ValueError("alpha must be in (0.0, 1.0) but " "was %r" % self.alpha) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": # if is_classification if self.n_classes_ > 1: max_features = max(1, int(np.sqrt(self.n_features_))) else: # is regression max_features = self.n_features_ elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: raise ValueError("Invalid value for max_features: %r. " "Allowed string values are 'auto', 'sqrt' " "or 'log2'." % self.max_features) elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float if 0. < self.max_features <= 1.: max_features = max(int(self.max_features * self.n_features_), 1) else: raise ValueError("max_features must be in (0, n_features]") self.max_features_ = max_features def _init_state(self): """Initialize model state and allocate model state data structures. """ if self.init is None: self.init_ = self.loss_.init_estimator() elif isinstance(self.init, six.string_types): self.init_ = INIT_ESTIMATORS[self.init]() else: self.init_ = self.init self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object) self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) # do oob? if self.subsample < 1.0: self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) def _clear_state(self): """Clear the state of the gradient boosting model. """ if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=np.object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ def _resize_state(self): """Add additional ``n_estimators`` entries to all attributes. """ # self.n_estimators is the number of additional est to fit total_n_estimators = self.n_estimators if total_n_estimators < self.estimators_.shape[0]: raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])) self.estimators_.resize((total_n_estimators, self.loss_.K)) self.train_score_.resize(total_n_estimators) if (self.subsample < 1 or hasattr(self, 'oob_improvement_')): # if do oob resize arrays or create new if not available if hasattr(self, 'oob_improvement_'): self.oob_improvement_.resize(total_n_estimators) else: self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64) def _is_initialized(self): return len(getattr(self, 'estimators_', [])) > 0 def _check_initialized(self): """Check that the estimator is initialized, raising an error if not.""" check_is_fitted(self, 'estimators_') @property @deprecated("Attribute n_features was deprecated in version 0.19 and " "will be removed in 0.21.") def n_features(self): return self.n_features_ def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ # if not warmstart - clear the estimator state if not self.warm_start: self._clear_state() # Check input X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) n_samples, self.n_features_ = X.shape if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) check_consistent_length(X, y, sample_weight) y = self._validate_y(y) random_state = check_random_state(self.random_state) self._check_params() if not self._is_initialized(): # init state self._init_state() # fit initial model - FIXME make sample_weight optional self.init_.fit(X, y, sample_weight) # init predictions y_pred = self.init_.predict(X) begin_at_stage = 0 else: # add more estimators to fitted model # invariant: warm_start = True if self.n_estimators < self.estimators_.shape[0]: raise ValueError('n_estimators=%d must be larger or equal to ' 'estimators_.shape[0]=%d when ' 'warm_start==True' % (self.n_estimators, self.estimators_.shape[0])) begin_at_stage = self.estimators_.shape[0] y_pred = self._decision_function(X) self._resize_state() X_idx_sorted = None presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto' and issparse(X): presort = False elif presort == 'auto': presort = True if presort == True: if issparse(X): raise ValueError("Presorting is not supported for sparse matrices.") else: X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0), dtype=np.int32) # fit the boosting stages n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor, X_idx_sorted) # change shape of arrays after fit (early-stopping or additional ests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] return self def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None, X_idx_sorted=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples, ), dtype=np.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ # Set min_weight_leaf from min_weight_fraction_leaf if self.min_weight_fraction_leaf != 0. and sample_weight is not None: min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) else: min_weight_leaf = 0. if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage old_oob_score = loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = ( old_oob_score - loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break return i + 1 def _make_estimator(self, append=True): # we don't need _make_estimator raise NotImplementedError() def _init_decision_function(self, X): """Check input and compute prediction of ``init``. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) if X.shape[1] != self.n_features_: raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format( self.n_features_, X.shape[1])) score = self.init_.predict(X).astype(np.float64) return score def _decision_function(self, X): # for use in inner loop, not raveling the output in single-class case, # not doing input validation. score = self._init_decision_function(X) predict_stages(self.estimators_, X, self.learning_rate, score) return score def _staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') score = self._init_decision_function(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, score) yield score.copy() @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ self._check_initialized() total_sum = np.zeros((self.n_features_, ), dtype=np.float64) for stage in self.estimators_: stage_sum = sum(tree.feature_importances_ for tree in stage) / len(stage) total_sum += stage_sum importances = total_sum / len(self.estimators_) return importances def _validate_y(self, y): self.n_classes_ = 1 if y.dtype.kind == 'O': y = y.astype(np.float64) # Default implementation return y def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array_like, shape = [n_samples, n_estimators, n_classes] For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. In the case of binary classification n_classes is 1. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) # n_classes will be equal to 1 in the binary classification or the # regression case. n_estimators, n_classes = self.estimators_.shape leaves = np.zeros((X.shape[0], n_estimators, n_classes)) for i in range(n_estimators): for j in range(n_classes): estimator = self.estimators_[i, j] leaves[:, i, j] = estimator.apply(X, check_input=False) return leaves class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin): """Gradient Boosting for classification. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage ``n_classes_`` regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. Binary classification is a special case where only a single regression tree is induced. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'deviance', 'exponential'}, optional (default='deviance') loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm. learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. criterion : string, optional (default="friedman_mse") The function to measure the quality of a split. Supported criteria are "friedman_mse" for the mean squared error with improvement score by Friedman, "mse" for mean squared error, and "mae" for the mean absolute error. The default value of "friedman_mse" is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. presort : bool or 'auto', optional (default='auto') Whether to presort the data to speed up the finding of best splits in fitting. Auto mode by default will use presorting on dense data and default to normal sorting on sparse data. Setting presort to true on sparse data will raise an error. .. versionadded:: 0.17 *presort* parameter. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. init : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``] The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary classification, otherwise n_classes. Notes ----- The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. See also -------- sklearn.tree.DecisionTreeClassifier, RandomForestClassifier AdaBoostClassifier References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('deviance', 'exponential') def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_decrease=0., min_impurity_split=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): super(GradientBoostingClassifier, self).__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, random_state=random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, warm_start=warm_start, presort=presort) def _validate_y(self, y): check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) return y def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification produce an array of shape [n_samples]. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') score = self._decision_function(X) if score.shape[1] == 1: return score.ravel() return score def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ for dec in self._staged_decision_function(X): # no yield from in Python2.X yield dec def predict(self, X): """Predict class for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array of shape = [n_samples] The predicted values. """ score = self.decision_function(X) decisions = self.loss_._score_to_decision(score) return self.classes_.take(decisions, axis=0) def staged_predict(self, X): """Predict class at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ for score in self._staged_decision_function(X): decisions = self.loss_._score_to_decision(score) yield self.classes_.take(decisions, axis=0) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ score = self.decision_function(X) try: return self.loss_._score_to_proba(score) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) return np.log(proba) def staged_predict_proba(self, X): """Predict class probabilities at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ try: for score in self._staged_decision_function(X): yield self.loss_._score_to_proba(score) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin): """Gradient Boosting for regression. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls') loss function to be optimized. 'ls' refers to least squares regression. 'lad' (least absolute deviation) is a highly robust loss function solely based on order information of the input variables. 'huber' is a combination of the two. 'quantile' allows quantile regression (use `alpha` to specify the quantile). learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. criterion : string, optional (default="friedman_mse") The function to measure the quality of a split. Supported criteria are "friedman_mse" for the mean squared error with improvement score by Friedman, "mse" for mean squared error, and "mae" for the mean absolute error. The default value of "friedman_mse" is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 alpha : float (default=0.9) The alpha-quantile of the huber loss function and the quantile loss function. Only if ``loss='huber'`` or ``loss='quantile'``. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. presort : bool or 'auto', optional (default='auto') Whether to presort the data to speed up the finding of best splits in fitting. Auto mode by default will use presorting on dense data and default to normal sorting on sparse data. Setting presort to true on sparse data will raise an error. .. versionadded:: 0.17 optional parameter *presort*. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. init : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1] The collection of fitted sub-estimators. Notes ----- The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. See also -------- DecisionTreeRegressor, RandomForestRegressor References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile') def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_decrease=0., min_impurity_split=None, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): super(GradientBoostingRegressor, self).__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, random_state=random_state, alpha=alpha, verbose=verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start, presort=presort) def predict(self, X): """Predict regression target for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array of shape = [n_samples] The predicted values. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') return self._decision_function(X).ravel() def staged_predict(self, X): """Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ for y in self._staged_decision_function(X): yield y.ravel() def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array_like, shape = [n_samples, n_estimators] For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. """ leaves = super(GradientBoostingRegressor, self).apply(X) leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0]) return leaves
76,292
38.387197
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/bagging.py
"""Bagging meta-estimator.""" # Author: Gilles Louppe <[email protected]> # License: BSD 3 clause from __future__ import division import itertools import numbers import numpy as np from warnings import warn from abc import ABCMeta, abstractmethod from ..base import ClassifierMixin, RegressorMixin from ..externals.joblib import Parallel, delayed from ..externals.six import with_metaclass from ..externals.six.moves import zip from ..metrics import r2_score, accuracy_score from ..tree import DecisionTreeClassifier, DecisionTreeRegressor from ..utils import check_random_state, check_X_y, check_array, column_or_1d from ..utils.random import sample_without_replacement from ..utils.validation import has_fit_parameter, check_is_fitted from ..utils import indices_to_mask, check_consistent_length from ..utils.metaestimators import if_delegate_has_method from ..utils.multiclass import check_classification_targets from .base import BaseEnsemble, _partition_estimators __all__ = ["BaggingClassifier", "BaggingRegressor"] MAX_INT = np.iinfo(np.int32).max def _generate_indices(random_state, bootstrap, n_population, n_samples): """Draw randomly sampled indices.""" # Draw sample indices if bootstrap: indices = random_state.randint(0, n_population, n_samples) else: indices = sample_without_replacement(n_population, n_samples, random_state=random_state) return indices def _generate_bagging_indices(random_state, bootstrap_features, bootstrap_samples, n_features, n_samples, max_features, max_samples): """Randomly draw feature and sample indices.""" # Get valid random state random_state = check_random_state(random_state) # Draw indices feature_indices = _generate_indices(random_state, bootstrap_features, n_features, max_features) sample_indices = _generate_indices(random_state, bootstrap_samples, n_samples, max_samples) return feature_indices, sample_indices def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight, seeds, total_n_estimators, verbose): """Private function used to build a batch of estimators within a job.""" # Retrieve settings n_samples, n_features = X.shape max_features = ensemble._max_features max_samples = ensemble._max_samples bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features support_sample_weight = has_fit_parameter(ensemble.base_estimator_, "sample_weight") if not support_sample_weight and sample_weight is not None: raise ValueError("The base estimator doesn't support sample weight") # Build estimators estimators = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print("Building estimator %d of %d for this parallel run " "(total %d)..." % (i + 1, n_estimators, total_n_estimators)) random_state = np.random.RandomState(seeds[i]) estimator = ensemble._make_estimator(append=False, random_state=random_state) # Draw random feature, sample indices features, indices = _generate_bagging_indices(random_state, bootstrap_features, bootstrap, n_features, n_samples, max_features, max_samples) # Draw samples, using sample weights, and then fit if support_sample_weight: if sample_weight is None: curr_sample_weight = np.ones((n_samples,)) else: curr_sample_weight = sample_weight.copy() if bootstrap: sample_counts = np.bincount(indices, minlength=n_samples) curr_sample_weight *= sample_counts else: not_indices_mask = ~indices_to_mask(indices, n_samples) curr_sample_weight[not_indices_mask] = 0 estimator.fit(X[:, features], y, sample_weight=curr_sample_weight) # Draw samples, using a mask, and then fit else: estimator.fit((X[indices])[:, features], y[indices]) estimators.append(estimator) estimators_features.append(features) return estimators, estimators_features def _parallel_predict_proba(estimators, estimators_features, X, n_classes): """Private function used to compute (proba-)predictions within a job.""" n_samples = X.shape[0] proba = np.zeros((n_samples, n_classes)) for estimator, features in zip(estimators, estimators_features): if hasattr(estimator, "predict_proba"): proba_estimator = estimator.predict_proba(X[:, features]) if n_classes == len(estimator.classes_): proba += proba_estimator else: proba[:, estimator.classes_] += \ proba_estimator[:, range(len(estimator.classes_))] else: # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): proba[i, predictions[i]] += 1 return proba def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): """Private function used to compute log probabilities within a job.""" n_samples = X.shape[0] log_proba = np.empty((n_samples, n_classes)) log_proba.fill(-np.inf) all_classes = np.arange(n_classes, dtype=np.int) for estimator, features in zip(estimators, estimators_features): log_proba_estimator = estimator.predict_log_proba(X[:, features]) if n_classes == len(estimator.classes_): log_proba = np.logaddexp(log_proba, log_proba_estimator) else: log_proba[:, estimator.classes_] = np.logaddexp( log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))]) missing = np.setdiff1d(all_classes, estimator.classes_) log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) return log_proba def _parallel_decision_function(estimators, estimators_features, X): """Private function used to compute decisions within a job.""" return sum(estimator.decision_function(X[:, features]) for estimator, features in zip(estimators, estimators_features)) def _parallel_predict_regression(estimators, estimators_features, X): """Private function used to compute predictions within a job.""" return sum(estimator.predict(X[:, features]) for estimator, features in zip(estimators, estimators_features)) class BaseBagging(with_metaclass(ABCMeta, BaseEnsemble)): """Base class for Bagging meta-estimator. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0): super(BaseBagging, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators) self.max_samples = max_samples self.max_features = max_features self.bootstrap = bootstrap self.bootstrap_features = bootstrap_features self.oob_score = oob_score self.warm_start = warm_start self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose def fit(self, X, y, sample_weight=None): """Build a Bagging ensemble of estimators from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Note that this is supported only if the base estimator supports sample weighting. Returns ------- self : object Returns self. """ return self._fit(X, y, self.max_samples, sample_weight=sample_weight) def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None): """Build a Bagging ensemble of estimators from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). max_samples : int or float, optional (default=None) Argument to use instead of self.max_samples. max_depth : int, optional (default=None) Override value used when constructing base estimator. Only supported if the base estimator has a max_depth parameter. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Note that this is supported only if the base estimator supports sample weighting. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) # Convert data X, y = check_X_y(X, y, ['csr', 'csc']) if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) # Remap output n_samples, self.n_features_ = X.shape self._n_samples = n_samples y = self._validate_y(y) # Check parameters self._validate_estimator() if max_depth is not None: self.base_estimator_.max_depth = max_depth # Validate max_samples if max_samples is None: max_samples = self.max_samples elif not isinstance(max_samples, (numbers.Integral, np.integer)): max_samples = int(max_samples * X.shape[0]) if not (0 < max_samples <= X.shape[0]): raise ValueError("max_samples must be in (0, n_samples]") # Store validated integer row sampling value self._max_samples = max_samples # Validate max_features if isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float max_features = int(self.max_features * self.n_features_) if not (0 < max_features <= self.n_features_): raise ValueError("max_features must be in (0, n_features]") # Store validated integer feature sampling value self._max_features = max_features # Other checks if not self.bootstrap and self.oob_score: raise ValueError("Out of bag estimation only available" " if bootstrap=True") if self.warm_start and self.oob_score: raise ValueError("Out of bag estimate only available" " if warm_start=False") if hasattr(self, "oob_score_") and self.warm_start: del self.oob_score_ if not self.warm_start or not hasattr(self, 'estimators_'): # Free allocated memory, if any self.estimators_ = [] self.estimators_features_ = [] n_more_estimators = self.n_estimators - len(self.estimators_) if n_more_estimators < 0: raise ValueError('n_estimators=%d must be larger or equal to ' 'len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))) elif n_more_estimators == 0: warn("Warm-start fitting without increasing n_estimators does not " "fit new trees.") return self # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators, self.n_jobs) total_n_estimators = sum(n_estimators) # Advance random state to state after training # the first n_estimators if self.warm_start and len(self.estimators_) > 0: random_state.randint(MAX_INT, size=len(self.estimators_)) seeds = random_state.randint(MAX_INT, size=n_more_estimators) self._seeds = seeds all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_build_estimators)( n_estimators[i], self, X, y, sample_weight, seeds[starts[i]:starts[i + 1]], total_n_estimators, verbose=self.verbose) for i in range(n_jobs)) # Reduce self.estimators_ += list(itertools.chain.from_iterable( t[0] for t in all_results)) self.estimators_features_ += list(itertools.chain.from_iterable( t[1] for t in all_results)) if self.oob_score: self._set_oob_score(X, y) return self @abstractmethod def _set_oob_score(self, X, y): """Calculate out of bag predictions and score.""" def _validate_y(self, y): # Default implementation return column_or_1d(y, warn=True) def _get_estimators_indices(self): # Get drawn indices along both sample and feature axes for seed in self._seeds: # Operations accessing random_state must be performed identically # to those in `_parallel_build_estimators()` random_state = np.random.RandomState(seed) feature_indices, sample_indices = _generate_bagging_indices( random_state, self.bootstrap_features, self.bootstrap, self.n_features_, self._n_samples, self._max_features, self._max_samples) yield feature_indices, sample_indices @property def estimators_samples_(self): """The subset of drawn samples for each base estimator. Returns a dynamically generated list of boolean masks identifying the samples used for fitting each member of the ensemble, i.e., the in-bag samples. Note: the list is re-created at each call to the property in order to reduce the object memory footprint by not storing the sampling data. Thus fetching the property may be slower than expected. """ sample_masks = [] for _, sample_indices in self._get_estimators_indices(): mask = indices_to_mask(sample_indices, self._n_samples) sample_masks.append(mask) return sample_masks class BaggingClassifier(BaseBagging, ClassifierMixin): """A Bagging classifier. A Bagging classifier is an ensemble meta-estimator that fits base classifiers each on random subsets of the original dataset and then aggregate their individual predictions (either by voting or by averaging) to form a final prediction. Such a meta-estimator can typically be used as a way to reduce the variance of a black-box estimator (e.g., a decision tree), by introducing randomization into its construction procedure and then making an ensemble out of it. This algorithm encompasses several works from the literature. When random subsets of the dataset are drawn as random subsets of the samples, then this algorithm is known as Pasting [1]_. If samples are drawn with replacement, then the method is known as Bagging [2]_. When random subsets of the dataset are drawn as random subsets of the features, then the method is known as Random Subspaces [3]_. Finally, when base estimators are built on subsets of both samples and features, then the method is known as Random Patches [4]_. Read more in the :ref:`User Guide <bagging>`. Parameters ---------- base_estimator : object or None, optional (default=None) The base estimator to fit on random subsets of the dataset. If None, then the base estimator is a decision tree. n_estimators : int, optional (default=10) The number of base estimators in the ensemble. max_samples : int or float, optional (default=1.0) The number of samples to draw from X to train each base estimator. - If int, then draw `max_samples` samples. - If float, then draw `max_samples * X.shape[0]` samples. max_features : int or float, optional (default=1.0) The number of features to draw from X to train each base estimator. - If int, then draw `max_features` features. - If float, then draw `max_features * X.shape[1]` features. bootstrap : boolean, optional (default=True) Whether samples are drawn with replacement. bootstrap_features : boolean, optional (default=False) Whether features are drawn with replacement. oob_score : bool Whether to use out-of-bag samples to estimate the generalization error. warm_start : bool, optional (default=False) When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new ensemble. .. versionadded:: 0.17 *warm_start* constructor parameter. n_jobs : int, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the building process. Attributes ---------- base_estimator_ : estimator The base estimator from which the ensemble is grown. estimators_ : list of estimators The collection of fitted base estimators. estimators_samples_ : list of arrays The subset of drawn samples (i.e., the in-bag samples) for each base estimator. Each subset is defined by a boolean mask. estimators_features_ : list of arrays The subset of drawn features for each base estimator. classes_ : array of shape = [n_classes] The classes labels. n_classes_ : int or list The number of classes. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_decision_function_ : array of shape = [n_samples, n_classes] Decision function computed with out-of-bag estimate on the training set. If n_estimators is small it might be possible that a data point was never left out during the bootstrap. In this case, `oob_decision_function_` might contain NaN. References ---------- .. [1] L. Breiman, "Pasting small votes for classification in large databases and on-line", Machine Learning, 36(1), 85-103, 1999. .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140, 1996. .. [3] T. Ho, "The random subspace method for constructing decision forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844, 1998. .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine Learning and Knowledge Discovery in Databases, 346-361, 2012. """ def __init__(self, base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0): super(BaggingClassifier, self).__init__( base_estimator, n_estimators=n_estimators, max_samples=max_samples, max_features=max_features, bootstrap=bootstrap, bootstrap_features=bootstrap_features, oob_score=oob_score, warm_start=warm_start, n_jobs=n_jobs, random_state=random_state, verbose=verbose) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(BaggingClassifier, self)._validate_estimator( default=DecisionTreeClassifier()) def _set_oob_score(self, X, y): n_samples = y.shape[0] n_classes_ = self.n_classes_ classes_ = self.classes_ predictions = np.zeros((n_samples, n_classes_)) for estimator, samples, features in zip(self.estimators_, self.estimators_samples_, self.estimators_features_): # Create mask for OOB samples mask = ~samples if hasattr(estimator, "predict_proba"): predictions[mask, :] += estimator.predict_proba( (X[mask, :])[:, features]) else: p = estimator.predict((X[mask, :])[:, features]) j = 0 for i in range(n_samples): if mask[i]: predictions[i, p[j]] += 1 j += 1 if (predictions.sum(axis=1) == 0).any(): warn("Some inputs do not have OOB scores. " "This probably means too few estimators were used " "to compute any reliable oob estimates.") oob_decision_function = (predictions / predictions.sum(axis=1)[:, np.newaxis]) oob_score = accuracy_score(y, np.argmax(predictions, axis=1)) self.oob_decision_function_ = oob_decision_function self.oob_score_ = oob_score def _validate_y(self, y): y = column_or_1d(y, warn=True) check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) return y def predict(self, X): """Predict class for X. The predicted class of an input sample is computed as the class with the highest mean predicted probability. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- y : array of shape = [n_samples] The predicted classes. """ predicted_probabilitiy = self.predict_proba(X) return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0) def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ check_is_fitted(self, "classes_") # Check data X = check_array(X, accept_sparse=['csr', 'csc']) if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_proba)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_) for i in range(n_jobs)) # Reduce proba = sum(all_proba) / self.n_estimators return proba def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the base estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ check_is_fitted(self, "classes_") if hasattr(self.base_estimator_, "predict_log_proba"): # Check data X = check_array(X, accept_sparse=['csr', 'csc']) if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} " "and input n_features is {1} " "".format(self.n_features_, X.shape[1])) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators( self.n_estimators, self.n_jobs) all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_log_proba)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_) for i in range(n_jobs)) # Reduce log_proba = all_log_proba[0] for j in range(1, len(all_log_proba)): log_proba = np.logaddexp(log_proba, all_log_proba[j]) log_proba -= np.log(self.n_estimators) return log_proba else: return np.log(self.predict_proba(X)) @if_delegate_has_method(delegate='base_estimator') def decision_function(self, X): """Average of the decision functions of the base classifiers. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- score : array, shape = [n_samples, k] The decision function of the input samples. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ check_is_fitted(self, "classes_") # Check data X = check_array(X, accept_sparse=['csr', 'csc']) if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1} " "".format(self.n_features_, X.shape[1])) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_decision_function)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X) for i in range(n_jobs)) # Reduce decisions = sum(all_decisions) / self.n_estimators return decisions class BaggingRegressor(BaseBagging, RegressorMixin): """A Bagging regressor. A Bagging regressor is an ensemble meta-estimator that fits base regressors each on random subsets of the original dataset and then aggregate their individual predictions (either by voting or by averaging) to form a final prediction. Such a meta-estimator can typically be used as a way to reduce the variance of a black-box estimator (e.g., a decision tree), by introducing randomization into its construction procedure and then making an ensemble out of it. This algorithm encompasses several works from the literature. When random subsets of the dataset are drawn as random subsets of the samples, then this algorithm is known as Pasting [1]_. If samples are drawn with replacement, then the method is known as Bagging [2]_. When random subsets of the dataset are drawn as random subsets of the features, then the method is known as Random Subspaces [3]_. Finally, when base estimators are built on subsets of both samples and features, then the method is known as Random Patches [4]_. Read more in the :ref:`User Guide <bagging>`. Parameters ---------- base_estimator : object or None, optional (default=None) The base estimator to fit on random subsets of the dataset. If None, then the base estimator is a decision tree. n_estimators : int, optional (default=10) The number of base estimators in the ensemble. max_samples : int or float, optional (default=1.0) The number of samples to draw from X to train each base estimator. - If int, then draw `max_samples` samples. - If float, then draw `max_samples * X.shape[0]` samples. max_features : int or float, optional (default=1.0) The number of features to draw from X to train each base estimator. - If int, then draw `max_features` features. - If float, then draw `max_features * X.shape[1]` features. bootstrap : boolean, optional (default=True) Whether samples are drawn with replacement. bootstrap_features : boolean, optional (default=False) Whether features are drawn with replacement. oob_score : bool Whether to use out-of-bag samples to estimate the generalization error. warm_start : bool, optional (default=False) When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new ensemble. n_jobs : int, optional (default=1) The number of jobs to run in parallel for both `fit` and `predict`. If -1, then the number of jobs is set to the number of cores. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : int, optional (default=0) Controls the verbosity of the building process. Attributes ---------- estimators_ : list of estimators The collection of fitted sub-estimators. estimators_samples_ : list of arrays The subset of drawn samples (i.e., the in-bag samples) for each base estimator. Each subset is defined by a boolean mask. estimators_features_ : list of arrays The subset of drawn features for each base estimator. oob_score_ : float Score of the training dataset obtained using an out-of-bag estimate. oob_prediction_ : array of shape = [n_samples] Prediction computed with out-of-bag estimate on the training set. If n_estimators is small it might be possible that a data point was never left out during the bootstrap. In this case, `oob_prediction_` might contain NaN. References ---------- .. [1] L. Breiman, "Pasting small votes for classification in large databases and on-line", Machine Learning, 36(1), 85-103, 1999. .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140, 1996. .. [3] T. Ho, "The random subspace method for constructing decision forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844, 1998. .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine Learning and Knowledge Discovery in Databases, 346-361, 2012. """ def __init__(self, base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0): super(BaggingRegressor, self).__init__( base_estimator, n_estimators=n_estimators, max_samples=max_samples, max_features=max_features, bootstrap=bootstrap, bootstrap_features=bootstrap_features, oob_score=oob_score, warm_start=warm_start, n_jobs=n_jobs, random_state=random_state, verbose=verbose) def predict(self, X): """Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- y : array of shape = [n_samples] The predicted values. """ check_is_fitted(self, "estimators_features_") # Check data X = check_array(X, accept_sparse=['csr', 'csc']) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_regression)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X) for i in range(n_jobs)) # Reduce y_hat = sum(all_y_hat) / self.n_estimators return y_hat def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(BaggingRegressor, self)._validate_estimator( default=DecisionTreeRegressor()) def _set_oob_score(self, X, y): n_samples = y.shape[0] predictions = np.zeros((n_samples,)) n_predictions = np.zeros((n_samples,)) for estimator, samples, features in zip(self.estimators_, self.estimators_samples_, self.estimators_features_): # Create mask for OOB samples mask = ~samples predictions[mask] += estimator.predict((X[mask, :])[:, features]) n_predictions[mask] += 1 if (n_predictions == 0).any(): warn("Some inputs do not have OOB scores. " "This probably means too few estimators were used " "to compute any reliable oob estimates.") n_predictions[n_predictions == 0] = 1 predictions /= n_predictions self.oob_prediction_ = predictions self.oob_score_ = r2_score(y, predictions)
38,286
37.440763
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
""" Testing for the gradient boosting loss functions and initial estimators. """ import numpy as np from numpy.testing import assert_array_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_equal from sklearn.utils import check_random_state from sklearn.utils.testing import assert_raises from sklearn.ensemble.gradient_boosting import BinomialDeviance from sklearn.ensemble.gradient_boosting import LogOddsEstimator from sklearn.ensemble.gradient_boosting import LeastSquaresError from sklearn.ensemble.gradient_boosting import RegressionLossFunction from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS from sklearn.ensemble.gradient_boosting import _weighted_percentile from sklearn.ensemble.gradient_boosting import QuantileLossFunction def test_binomial_deviance(): # Check binomial deviance loss. # Check against alternative definitions in ESLII. bd = BinomialDeviance(2) # pred has the same BD for y in {0, 1} assert_equal(bd(np.array([0.0]), np.array([0.0])), bd(np.array([1.0]), np.array([0.0]))) assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])), 0.0) assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]), np.array([100.0, -100.0, -100.0])), 0) # check if same results as alternative definition of deviance (from ESLII) alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 * (2.0 * y - 1) * pred)) test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])), (np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])), (np.array([0.0, 0.0, 0.0]), np.array([-100.0, -100.0, -100.0])), (np.array([1.0, 1.0, 1.0]), np.array([-100.0, -100.0, -100.0]))] for datum in test_data: assert_almost_equal(bd(*datum), alt_dev(*datum)) # check the gradient against the alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred)) for datum in test_data: assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum)) def test_log_odds_estimator(): # Check log odds estimator. est = LogOddsEstimator() assert_raises(ValueError, est.fit, None, np.array([1])) est.fit(None, np.array([1.0, 0.0])) assert_equal(est.prior, 0.0) assert_array_equal(est.predict(np.array([[1.0], [1.0]])), np.array([[0.0], [0.0]])) def test_sample_weight_smoke(): rng = check_random_state(13) y = rng.rand(100) pred = rng.rand(100) # least squares loss = LeastSquaresError(1) loss_wo_sw = loss(y, pred) loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32)) assert_almost_equal(loss_wo_sw, loss_w_sw) def test_sample_weight_init_estimators(): # Smoke test for init estimators with sample weights. rng = check_random_state(13) X = rng.rand(100, 2) sample_weight = np.ones(100) reg_y = rng.rand(100) clf_y = rng.randint(0, 2, size=100) for Loss in LOSS_FUNCTIONS.values(): if Loss is None: continue if issubclass(Loss, RegressionLossFunction): k = 1 y = reg_y else: k = 2 y = clf_y if Loss.is_multi_class: # skip multiclass continue loss = Loss(k) init_est = loss.init_estimator() init_est.fit(X, y) out = init_est.predict(X) assert_equal(out.shape, (y.shape[0], 1)) sw_init_est = loss.init_estimator() sw_init_est.fit(X, y, sample_weight=sample_weight) sw_out = init_est.predict(X) assert_equal(sw_out.shape, (y.shape[0], 1)) # check if predictions match assert_array_equal(out, sw_out) def test_weighted_percentile(): y = np.empty(102, dtype=np.float64) y[:50] = 0 y[-51:] = 2 y[-1] = 100000 y[50] = 1 sw = np.ones(102, dtype=np.float64) sw[-1] = 0.0 score = _weighted_percentile(y, sw, 50) assert score == 1 def test_weighted_percentile_equal(): y = np.empty(102, dtype=np.float64) y.fill(0.0) sw = np.ones(102, dtype=np.float64) sw[-1] = 0.0 score = _weighted_percentile(y, sw, 50) assert score == 0 def test_weighted_percentile_zero_weight(): y = np.empty(102, dtype=np.float64) y.fill(1.0) sw = np.ones(102, dtype=np.float64) sw.fill(0.0) score = _weighted_percentile(y, sw, 50) assert score == 1.0 def test_quantile_loss_function(): # Non regression test for the QuantileLossFunction object # There was a sign problem when evaluating the function # for negative values of 'ytrue - ypred' x = np.asarray([-1.0, 0.0, 1.0]) y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x)) y_expected = np.asarray([0.1, 0.0, 0.9]).mean() np.testing.assert_allclose(y_found, y_expected) def test_sample_weight_deviance(): # Test if deviance supports sample weights. rng = check_random_state(13) X = rng.rand(100, 2) sample_weight = np.ones(100) reg_y = rng.rand(100) clf_y = rng.randint(0, 2, size=100) mclf_y = rng.randint(0, 3, size=100) for Loss in LOSS_FUNCTIONS.values(): if Loss is None: continue if issubclass(Loss, RegressionLossFunction): k = 1 y = reg_y p = reg_y else: k = 2 y = clf_y p = clf_y if Loss.is_multi_class: k = 3 y = mclf_y # one-hot encoding p = np.zeros((y.shape[0], k), dtype=np.float64) for i in range(k): p[:, i] = y == i loss = Loss(k) deviance_w_w = loss(y, p, sample_weight) deviance_wo_w = loss(y, p) assert deviance_wo_w == deviance_w_w
6,016
31.176471
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_bagging.py
""" Testing for the bagging ensemble module (sklearn.ensemble.bagging). """ # Author: Gilles Louppe # License: BSD 3 clause import numpy as np from sklearn.base import BaseEstimator from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_raise_message from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.model_selection import GridSearchCV, ParameterGrid from sklearn.ensemble import BaggingClassifier, BaggingRegressor from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.svm import SVC, SVR from sklearn.pipeline import make_pipeline from sklearn.feature_selection import SelectKBest from sklearn.model_selection import train_test_split from sklearn.datasets import load_boston, load_iris, make_hastie_10_2 from sklearn.utils import check_random_state from scipy.sparse import csc_matrix, csr_matrix rng = check_random_state(0) # also load the iris dataset # and randomly permute it iris = load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_classification(): # Check classification for various parameter settings on sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVC, self).fit(X, y) self.data_type_ = type(X) return self rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']: # Trained on sparse format sparse_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = getattr(sparse_classifier, f)(X_test_sparse) # Trained on dense format dense_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train, y_train) dense_results = getattr(dense_classifier, f)(X_test) assert_array_equal(sparse_results, dense_results) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([t == sparse_type for t in types]) def test_regression(): # Check regression for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [0.5, 1.0], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyRegressor(), DecisionTreeRegressor(), KNeighborsRegressor(), SVR()]: for params in grid: BaggingRegressor(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_regression(): # Check regression for various parameter settings on sparse input. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) class CustomSVR(SVR): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVR, self).fit(X, y) self.data_type_ = type(X) return self parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: # Trained on sparse format sparse_classifier = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_results = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train, y_train).predict(X_test) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert_array_equal(sparse_results, dense_results) assert all([t == sparse_type for t in types]) assert_array_equal(sparse_results, dense_results) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) base_estimator = DecisionTreeRegressor().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=False, random_state=rng).fit(X_train, y_train) assert_equal(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) # with bootstrap, trees are no longer perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=True, random_state=rng).fit(X_train, y_train) assert_greater(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=False, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_equal(boston.data.shape[1], np.unique(features).shape[0]) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=True, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_greater(boston.data.shape[1], np.unique(features).shape[0]) def test_probability(): # Predict probabilities. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(), random_state=rng).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) # Degenerate case, where some classes are missing ensemble = BaggingClassifier(base_estimator=LogisticRegression(), random_state=rng, max_samples=5).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) for base_estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier(base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingClassifier(base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_oob_score_regression(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=50, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_single_estimator(): # Check singleton ensembles. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=rng).fit(X_train, y_train) clf2 = KNeighborsRegressor().fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) def test_error(): # Test that it gives proper exception on deficient input. X, y = iris.data, iris.target base = DecisionTreeClassifier() # Test max_samples assert_raises(ValueError, BaggingClassifier(base, max_samples=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=1000).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples="foobar").fit, X, y) # Test max_features assert_raises(ValueError, BaggingClassifier(base, max_features=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=5).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features="foobar").fit, X, y) # Test support of decision_function assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function')) def test_parallel_classification(): # Check parallel classification. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) # predict_proba ensemble.set_params(n_jobs=1) y1 = ensemble.predict_proba(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y3) # decision_function ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) decisions1 = ensemble.decision_function(X_test) ensemble.set_params(n_jobs=2) decisions2 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions2) X_err = np.hstack((X_test, np.zeros((X_test.shape[0], 1)))) assert_raise_message(ValueError, "Number of features of the model " "must match the input. Model n_features is {0} " "and input n_features is {1} " "".format(X_test.shape[1], X_err.shape[1]), ensemble.decision_function, X_err) ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=1, random_state=0).fit(X_train, y_train) decisions3 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions3) def test_parallel_regression(): # Check parallel regression. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) y1 = ensemble.predict(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict(X_test) assert_array_almost_equal(y1, y3) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {'n_estimators': (1, 2), 'base_estimator__C': (1, 2)} GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(Perceptron(tol=1e-3), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, Perceptron)) # Regression X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, SVR)) def test_bagging_with_pipeline(): estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2) estimator.fit(iris.data, iris.target) assert_true(isinstance(estimator[0].steps[-1][1].random_state, int)) class DummyZeroEstimator(BaseEstimator): def fit(self, X, y): self.classes_ = np.unique(y) return self def predict(self, X): return self.classes_[np.zeros(X.shape[0], dtype=int)] def test_bagging_sample_weight_unsupported_but_passed(): estimator = BaggingClassifier(DummyZeroEstimator()) rng = check_random_state(0) estimator.fit(iris.data, iris.target).predict(iris.data) assert_raises(ValueError, estimator.fit, iris.data, iris.target, sample_weight=rng.randint(10, size=(iris.data.shape[0]))) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BaggingClassifier(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1. assert_warns_message(UserWarning, "Warm-start fitting without increasing n_estimators does not", clf.fit, X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) assert_raises(ValueError, clf.fit, X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) assert_raises(AttributeError, getattr, clf, "oob_score_") def test_oob_score_consistency(): # Make sure OOB scores are identical when random_state, estimator, and # training data are fixed and fitting is done twice X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5, oob_score=True, random_state=1) assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_) def test_estimators_samples(): # Check that format of estimators_samples_ is correct and that results # generated at fit time can be identically reproduced at a later time # using data saved in object attributes. X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5, max_features=0.5, random_state=1, bootstrap=False) bagging.fit(X, y) # Get relevant attributes estimators_samples = bagging.estimators_samples_ estimators_features = bagging.estimators_features_ estimators = bagging.estimators_ # Test for correct formatting assert_equal(len(estimators_samples), len(estimators)) assert_equal(len(estimators_samples[0]), len(X)) assert_equal(estimators_samples[0].dtype.kind, 'b') # Re-fit single estimator to test for consistent sampling estimator_index = 0 estimator_samples = estimators_samples[estimator_index] estimator_features = estimators_features[estimator_index] estimator = estimators[estimator_index] X_train = (X[estimator_samples])[:, estimator_features] y_train = y[estimator_samples] orig_coefs = estimator.coef_ estimator.fit(X_train, y_train) new_coefs = estimator.coef_ assert_array_almost_equal(orig_coefs, new_coefs) def test_max_samples_consistency(): # Make sure validated max_samples and original max_samples are identical # when valid integer max_samples supplied by user max_samples = 100 X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=max_samples, max_features=0.5, random_state=1) bagging.fit(X, y) assert_equal(bagging._max_samples, max_samples) def test_set_oob_score_label_encoding(): # Make sure the oob_score doesn't change when the labels change # See: https://github.com/scikit-learn/scikit-learn/issues/8933 random_state = 5 X = [[-1], [0], [1]] * 5 Y1 = ['A', 'B', 'C'] * 5 Y2 = [-1, 0, 1] * 5 Y3 = [0, 1, 2] * 5 x1 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y1).oob_score_ x2 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y2).oob_score_ x3 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y3).oob_score_ assert_equal([x1, x2], [x3, x3])
29,340
38.069241
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_forest.py
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import combinations from itertools import product import numpy as np from scipy.sparse import csr_matrix from scipy.sparse import csc_matrix from scipy.sparse import coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.model_selection import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.validation import check_random_state from sklearn.utils.fixes import comb from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # Larger classification sample used for testing feature importances X_large, y_large = datasets.make_classification( n_samples=500, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] # also make a hastie_10_2 dataset hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1) hastie_X = hastie_X.astype(np.float32) FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): # Check consistency on dataset iris. ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): # Check consistency on dataset boston house prices. ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.94, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): # Regression models should not have a classes_ attribute. r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): # Predict probabilities. ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, criterion, dtype, tolerance): # cast as dype X = X_large.astype(dtype, copy=False) y = y_large.astype(dtype, copy=False) ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0) est.fit(X, y) importances = est.feature_importances_ # The forest estimator can detect that only the first 3 features of the # dataset are informative: n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) assert np.all(importances[:3] > 0.1) # Check with parallel importances = est.feature_importances_ est.set_params(n_jobs=2) importances_parrallel = est.feature_importances_ assert_array_almost_equal(importances, importances_parrallel) # Check with sample weights sample_weight = check_random_state(0).randint(1, 10, len(X)) est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) est.fit(X, y, sample_weight=sample_weight) importances = est.feature_importances_ assert_true(np.all(importances >= 0.0)) for scale in [0.5, 100]: est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) est.fit(X, y, sample_weight=scale * sample_weight) importances_bis = est.feature_importances_ assert_less(np.abs(importances - importances_bis).mean(), tolerance) def test_importances(): for dtype in (np.float64, np.float32): tolerance = 0.01 for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]): yield check_importances, name, criterion, dtype, tolerance for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]): tolerance = 0.05 if criterion == "mae" else 0.01 yield check_importances, name, criterion, dtype, tolerance def test_importances_asymptotic(): # Check whether variable importances of totally randomized trees # converge towards their theoretical values (See Louppe et al, # Understanding variable importances in forests of randomized trees, 2013). def binomial(k, n): return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True) def entropy(samples): n_samples = len(samples) entropy = 0. for count in np.bincount(samples): p = 1. * count / n_samples if p > 0: entropy -= p * np.log2(p) return entropy def mdi_importance(X_m, X, y): n_samples, n_features = X.shape features = list(range(n_features)) features.pop(X_m) values = [np.unique(X[:, i]) for i in range(n_features)] imp = 0. for k in range(n_features): # Weight of each B of size k coef = 1. / (binomial(k, n_features) * (n_features - k)) # For all B of size k for B in combinations(features, k): # For all values B=b for b in product(*[values[B[j]] for j in range(k)]): mask_b = np.ones(n_samples, dtype=np.bool) for j in range(k): mask_b &= X[:, B[j]] == b[j] X_, y_ = X[mask_b, :], y[mask_b] n_samples_b = len(X_) if n_samples_b > 0: children = [] for xi in values[X_m]: mask_xi = X_[:, X_m] == xi children.append(y_[mask_xi]) imp += (coef * (1. * n_samples_b / n_samples) # P(B=b) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))) return imp data = np.array([[0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 0, 1, 2], [1, 0, 1, 1, 0, 1, 1, 3], [0, 1, 1, 1, 0, 1, 0, 4], [1, 1, 0, 1, 0, 1, 1, 5], [1, 1, 0, 1, 1, 1, 1, 6], [1, 0, 1, 0, 0, 1, 0, 7], [1, 1, 1, 1, 1, 1, 1, 8], [1, 1, 1, 1, 0, 1, 1, 9], [1, 1, 1, 0, 1, 1, 1, 0]]) X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7] n_features = X.shape[1] # Compute true importances true_importances = np.zeros(n_features) for i in range(n_features): true_importances[i] = mdi_importance(i, X, y) # Estimate importances with totally randomized trees clf = ExtraTreesClassifier(n_estimators=500, max_features=1, criterion="entropy", random_state=0).fit(X, y) importances = sum(tree.tree_.compute_feature_importances(normalize=False) for tree in clf.estimators_) / clf.n_estimators # Check correctness assert_almost_equal(entropy(y), sum(importances)) assert_less(np.abs(true_importances - importances).mean(), 0.01) def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): # Check that oob prediction is a good estimation of the generalization # error. # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # csc matrix yield check_oob_score, name, csc_matrix(iris.data), iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 # csc matrix yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): # Check that base trees can be grid-searched. for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): # Check pickability. ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): # Check estimators on multi-output problems. X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): # Test that n_classes_ and classes_ have proper shape. ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning a dense array. # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning the same array for both argument values. # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) # Ignore warnings from switching to more power iterations in randomized_svd @ignore_warnings def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name): X, y = hastie_X, hastie_y # Test precedence of max_leaf_nodes over max_depth. ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name def check_min_samples_split(name): X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] # test boundary value assert_raises(ValueError, ForestEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_split=0).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_split=1.1).fit, X, y) est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0) est.fit(X, y) node_idx = est.estimators_[0].tree_.children_left != -1 node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] assert_greater(np.min(node_samples), len(X) * 0.5 - 1, "Failed with {0}".format(name)) est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0) est.fit(X, y) node_idx = est.estimators_[0].tree_.children_left != -1 node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] assert_greater(np.min(node_samples), len(X) * 0.5 - 1, "Failed with {0}".format(name)) def test_min_samples_split(): for name in FOREST_ESTIMATORS: yield check_min_samples_split, name def check_min_samples_leaf(name): X, y = hastie_X, hastie_y # Test if leaves contain more than leaf_count training examples ForestEstimator = FOREST_ESTIMATORS[name] # test boundary value assert_raises(ValueError, ForestEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_leaf=0).fit, X, y) est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), len(X) * 0.25 - 1, "Failed with {0}".format(name)) def test_min_samples_leaf(): for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name def check_min_weight_fraction_leaf(name): X, y = hastie_X, hastie_y # Test if leaves contain at least min_weight_fraction_leaf of the # training set ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1, random_state=0) if "RandomForest" in name: est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): # Check that it works no matter the memory layout est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype @ignore_warnings def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(n_estimators=1, random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) @ignore_warnings def test_1d_input(): X = iris.data[:, 0] X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_class_weights(name): # Check class_weights resemble sample_weights behavior. ForestClassifier = FOREST_CLASSIFIERS[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = ForestClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "balanced" which should also have no effect clf4 = ForestClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Using a Python 2.x list as the sample_weight parameter used to raise # an exception. This test makes sure such code will now run correctly. clf = ForestClassifier() sample_weight = [1.] * len(iris.data) clf.fit(iris.data, iris.target, sample_weight=sample_weight) def test_class_weights(): for name in FOREST_CLASSIFIERS: yield check_class_weights, name def check_class_weight_balanced_and_bootstrap_multi_output(name): # Test class_weight works for multi-output""" ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(class_weight='balanced', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}], random_state=0) clf.fit(X, _y) # smoke test for balanced subsample clf = ForestClassifier(class_weight='balanced_subsample', random_state=0) clf.fit(X, _y) def test_class_weight_balanced_and_bootstrap_multi_output(): for name in FOREST_CLASSIFIERS: yield check_class_weight_balanced_and_bootstrap_multi_output, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = ForestClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Warning warm_start with preset clf = ForestClassifier(class_weight='balanced', warm_start=True, random_state=0) assert_warns(UserWarning, clf.fit, X, y) assert_warns(UserWarning, clf.fit, X, _y) # Not a list or preset for multi-output clf = ForestClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in FOREST_CLASSIFIERS: yield check_class_weight_errors, name def check_warm_start(name, random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): # Test if fit clears state and grows a new forest when warm_start==False. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): # Test if warm start second fit with smaller n_estimators raises error. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): # Test if warm start with equal n_estimators does nothing and returns the # same forest and raises a warning. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): # Test that the warm start computes oob score when asked. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name def test_dtype_convert(n_classes=15): classifier = RandomForestClassifier(random_state=0, bootstrap=False) X = np.eye(n_classes) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]] result = classifier.fit(X, y).predict(X) assert_array_equal(classifier.classes_, y) assert_array_equal(result, y) def check_decision_path(name): X, y = hastie_X, hastie_y n_samples = X.shape[0] ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) est.fit(X, y) indicator, n_nodes_ptr = est.decision_path(X) assert_equal(indicator.shape[1], n_nodes_ptr[-1]) assert_equal(indicator.shape[0], n_samples) assert_array_equal(np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]) # Assert that leaves index are correct leaves = est.apply(X) for est_id in range(leaves.shape[1]): leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j] for i, j in enumerate(leaves[:, est_id])] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) def test_decision_path(): for name in FOREST_CLASSIFIERS: yield check_decision_path, name for name in FOREST_REGRESSORS: yield check_decision_path, name def test_min_impurity_split(): # Test if min_impurity_split of base estimators is set # Regression test for #8006 X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor] for Estimator in all_estimators: est = Estimator(min_impurity_split=0.1) est = assert_warns_message(DeprecationWarning, "min_impurity_decrease", est.fit, X, y) for tree in est.estimators_: assert_equal(tree.min_impurity_split, 0.1) def test_min_impurity_decrease(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor] for Estimator in all_estimators: est = Estimator(min_impurity_decrease=0.1) est.fit(X, y) for tree in est.estimators_: # Simply check if the parameter is passed on correctly. Tree tests # will suffice for the actual working of this param assert_equal(tree.min_impurity_decrease, 0.1)
43,434
34.486111
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py
""" Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting). """ import warnings import numpy as np from itertools import product from scipy.sparse import csr_matrix from scipy.sparse import csc_matrix from scipy.sparse import coo_matrix from sklearn import datasets from sklearn.base import clone from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble.gradient_boosting import ZeroEstimator from sklearn.metrics import mean_squared_error from sklearn.utils import check_random_state, tosequence from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import skip_if_32bit from sklearn.exceptions import DataConversionWarning from sklearn.exceptions import NotFittedError # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] rng = np.random.RandomState(0) # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def check_classification_toy(presort, loss): # Check classification on a toy dataset. clf = GradientBoostingClassifier(loss=loss, n_estimators=10, random_state=1, presort=presort) assert_raises(ValueError, clf.predict, T) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf.estimators_)) deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:]) assert_true(np.any(deviance_decrease >= 0.0)) leaves = clf.apply(X) assert_equal(leaves.shape, (6, 10, 1)) def test_classification_toy(): for presort, loss in product(('auto', True, False), ('deviance', 'exponential')): yield check_classification_toy, presort, loss def test_parameter_checks(): # Check input parameter validation. assert_raises(ValueError, GradientBoostingClassifier(n_estimators=0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(n_estimators=-1).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(learning_rate=0.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(learning_rate=-1.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(loss='foobar').fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_samples_split=0.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_samples_split=1.1).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_samples_leaf=0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(subsample=0.0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(subsample=1.1).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(subsample=-0.1).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(max_depth=-0.1).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(max_depth=0).fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(init={}).fit, X, y) # test fit before feature importance assert_raises(ValueError, lambda: GradientBoostingClassifier().feature_importances_) # deviance requires ``n_classes >= 2``. assert_raises(ValueError, lambda X, y: GradientBoostingClassifier( loss='deviance').fit(X, y), X, [0, 0, 0, 0]) def test_loss_function(): assert_raises(ValueError, GradientBoostingClassifier(loss='ls').fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(loss='lad').fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(loss='quantile').fit, X, y) assert_raises(ValueError, GradientBoostingClassifier(loss='huber').fit, X, y) assert_raises(ValueError, GradientBoostingRegressor(loss='deviance').fit, X, y) assert_raises(ValueError, GradientBoostingRegressor(loss='exponential').fit, X, y) def check_classification_synthetic(presort, loss): # Test GradientBoostingClassifier on synthetic dataset used by # Hastie et al. in ESLII Example 12.7. X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X_train, X_test = X[:2000], X[2000:] y_train, y_test = y[:2000], y[2000:] gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2, max_depth=1, loss=loss, learning_rate=1.0, random_state=0) gbrt.fit(X_train, y_train) error_rate = (1.0 - gbrt.score(X_test, y_test)) assert_less(error_rate, 0.09) gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2, max_depth=1, loss=loss, learning_rate=1.0, subsample=0.5, random_state=0, presort=presort) gbrt.fit(X_train, y_train) error_rate = (1.0 - gbrt.score(X_test, y_test)) assert_less(error_rate, 0.08) def test_classification_synthetic(): for presort, loss in product(('auto', True, False), ('deviance', 'exponential')): yield check_classification_synthetic, presort, loss def check_boston(presort, loss, subsample): # Check consistency on dataset boston house prices with least squares # and least absolute deviation. ones = np.ones(len(boston.target)) last_y_pred = None for sample_weight in None, ones, 2 * ones: clf = GradientBoostingRegressor(n_estimators=100, loss=loss, max_depth=4, subsample=subsample, min_samples_split=2, random_state=1, presort=presort) assert_raises(ValueError, clf.predict, boston.data) clf.fit(boston.data, boston.target, sample_weight=sample_weight) leaves = clf.apply(boston.data) assert_equal(leaves.shape, (506, 100)) y_pred = clf.predict(boston.data) mse = mean_squared_error(boston.target, y_pred) assert_less(mse, 6.0) if last_y_pred is not None: assert_array_almost_equal(last_y_pred, y_pred) last_y_pred = y_pred def test_boston(): for presort, loss, subsample in product(('auto', True, False), ('ls', 'lad', 'huber'), (1.0, 0.5)): yield check_boston, presort, loss, subsample def check_iris(presort, subsample, sample_weight): # Check consistency on dataset iris. clf = GradientBoostingClassifier(n_estimators=100, loss='deviance', random_state=1, subsample=subsample, presort=presort) clf.fit(iris.data, iris.target, sample_weight=sample_weight) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9) leaves = clf.apply(iris.data) assert_equal(leaves.shape, (150, 100, 3)) def test_iris(): ones = np.ones(len(iris.target)) for presort, subsample, sample_weight in product(('auto', True, False), (1.0, 0.5), (None, ones)): yield check_iris, presort, subsample, sample_weight def test_regression_synthetic(): # Test on synthetic regression datasets used in Leo Breiman, # `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). random_state = check_random_state(1) regression_params = {'n_estimators': 100, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.1, 'loss': 'ls'} # Friedman1 X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0) X_train, y_train = X[:200], y[:200] X_test, y_test = X[200:], y[200:] for presort in True, False: clf = GradientBoostingRegressor(presort=presort) clf.fit(X_train, y_train) mse = mean_squared_error(y_test, clf.predict(X_test)) assert_less(mse, 5.0) # Friedman2 X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state) X_train, y_train = X[:200], y[:200] X_test, y_test = X[200:], y[200:] for presort in True, False: regression_params['presort'] = presort clf = GradientBoostingRegressor(**regression_params) clf.fit(X_train, y_train) mse = mean_squared_error(y_test, clf.predict(X_test)) assert_less(mse, 1700.0) # Friedman3 X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state) X_train, y_train = X[:200], y[:200] X_test, y_test = X[200:], y[200:] for presort in True, False: regression_params['presort'] = presort clf = GradientBoostingRegressor(**regression_params) clf.fit(X_train, y_train) mse = mean_squared_error(y_test, clf.predict(X_test)) assert_less(mse, 0.015) def test_feature_importances(): X = np.array(boston.data, dtype=np.float32) y = np.array(boston.target, dtype=np.float32) for presort in True, False: clf = GradientBoostingRegressor(n_estimators=100, max_depth=5, min_samples_split=2, random_state=1, presort=presort) clf.fit(X, y) assert_true(hasattr(clf, 'feature_importances_')) def test_probability_log(): # Predict probabilities. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) assert_raises(ValueError, clf.predict_proba, T) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # check if probabilities are in [0, 1]. y_proba = clf.predict_proba(T) assert_true(np.all(y_proba >= 0.0)) assert_true(np.all(y_proba <= 1.0)) # derive predictions from probabilities y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0) assert_array_equal(y_pred, true_result) def test_check_inputs(): # Test input checks (shape and type of X and y). clf = GradientBoostingClassifier(n_estimators=100, random_state=1) assert_raises(ValueError, clf.fit, X, y + [0, 1]) clf = GradientBoostingClassifier(n_estimators=100, random_state=1) assert_raises(ValueError, clf.fit, X, y, sample_weight=([1] * len(y)) + [0, 1]) def test_check_inputs_predict(): # X has wrong shape clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X, y) x = np.array([1.0, 2.0])[:, np.newaxis] assert_raises(ValueError, clf.predict, x) x = np.array([[]]) assert_raises(ValueError, clf.predict, x) x = np.array([1.0, 2.0, 3.0])[:, np.newaxis] assert_raises(ValueError, clf.predict, x) clf = GradientBoostingRegressor(n_estimators=100, random_state=1) clf.fit(X, rng.rand(len(X))) x = np.array([1.0, 2.0])[:, np.newaxis] assert_raises(ValueError, clf.predict, x) x = np.array([[]]) assert_raises(ValueError, clf.predict, x) x = np.array([1.0, 2.0, 3.0])[:, np.newaxis] assert_raises(ValueError, clf.predict, x) def test_check_max_features(): # test if max_features is valid. clf = GradientBoostingRegressor(n_estimators=100, random_state=1, max_features=0) assert_raises(ValueError, clf.fit, X, y) clf = GradientBoostingRegressor(n_estimators=100, random_state=1, max_features=(len(X[0]) + 1)) assert_raises(ValueError, clf.fit, X, y) clf = GradientBoostingRegressor(n_estimators=100, random_state=1, max_features=-0.1) assert_raises(ValueError, clf.fit, X, y) def test_max_feature_regression(): # Test to make sure random state is set properly. X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X_train, X_test = X[:2000], X[2000:] y_train, y_test = y[:2000], y[2000:] gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5, max_depth=2, learning_rate=.1, max_features=2, random_state=1) gbrt.fit(X_train, y_train) deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test)) assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance) def test_max_feature_auto(): # Test if max features is set properly for floats and str. X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) _, n_features = X.shape X_train = X[:2000] y_train = y[:2000] gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto') gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, int(np.sqrt(n_features))) gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto') gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, n_features) gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3) gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, int(n_features * 0.3)) gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt') gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, int(np.sqrt(n_features))) gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2') gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, int(np.log2(n_features))) gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1]) gbrt.fit(X_train, y_train) assert_equal(gbrt.max_features_, 1) def test_staged_predict(): # Test whether staged decision function eventually gives # the same prediction. X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0) X_train, y_train = X[:200], y[:200] X_test = X[200:] clf = GradientBoostingRegressor() # test raise ValueError if not fitted assert_raises(ValueError, lambda X: np.fromiter( clf.staged_predict(X), dtype=np.float64), X_test) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # test if prediction for last stage equals ``predict`` for y in clf.staged_predict(X_test): assert_equal(y.shape, y_pred.shape) assert_array_equal(y_pred, y) def test_staged_predict_proba(): # Test whether staged predict proba eventually gives # the same prediction. X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1) X_train, y_train = X[:200], y[:200] X_test, y_test = X[200:], y[200:] clf = GradientBoostingClassifier(n_estimators=20) # test raise NotFittedError if not fitted assert_raises(NotFittedError, lambda X: np.fromiter( clf.staged_predict_proba(X), dtype=np.float64), X_test) clf.fit(X_train, y_train) # test if prediction for last stage equals ``predict`` for y_pred in clf.staged_predict(X_test): assert_equal(y_test.shape, y_pred.shape) assert_array_equal(clf.predict(X_test), y_pred) # test if prediction for last stage equals ``predict_proba`` for staged_proba in clf.staged_predict_proba(X_test): assert_equal(y_test.shape[0], staged_proba.shape[0]) assert_equal(2, staged_proba.shape[1]) assert_array_equal(clf.predict_proba(X_test), staged_proba) def test_staged_functions_defensive(): # test that staged_functions make defensive copies rng = np.random.RandomState(0) X = rng.uniform(size=(10, 3)) y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros for estimator in [GradientBoostingRegressor(), GradientBoostingClassifier()]: estimator.fit(X, y) for func in ['predict', 'decision_function', 'predict_proba']: staged_func = getattr(estimator, "staged_" + func, None) if staged_func is None: # regressor has no staged_predict_proba continue with warnings.catch_warnings(record=True): staged_result = list(staged_func(X)) staged_result[1][:] = 0 assert_true(np.all(staged_result[0] != 0)) def test_serialization(): # Check model serialization. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) try: import cPickle as pickle except ImportError: import pickle serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL) clf = None clf = pickle.loads(serialized_clf) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) def test_degenerate_targets(): # Check if we can fit even though all targets are equal. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) # classifier should raise exception assert_raises(ValueError, clf.fit, X, np.ones(len(X))) clf = GradientBoostingRegressor(n_estimators=100, random_state=1) clf.fit(X, np.ones(len(X))) clf.predict([rng.rand(2)]) assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)])) def test_quantile_loss(): # Check if quantile loss with alpha=0.5 equals lad. clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile', max_depth=4, alpha=0.5, random_state=7) clf_quantile.fit(boston.data, boston.target) y_quantile = clf_quantile.predict(boston.data) clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad', max_depth=4, random_state=7) clf_lad.fit(boston.data, boston.target) y_lad = clf_lad.predict(boston.data) assert_array_almost_equal(y_quantile, y_lad, decimal=4) def test_symbol_labels(): # Test with non-integer class labels. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) symbol_y = tosequence(map(str, y)) clf.fit(X, symbol_y) assert_array_equal(clf.predict(T), tosequence(map(str, true_result))) assert_equal(100, len(clf.estimators_)) def test_float_class_labels(): # Test with float class labels. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) float_y = np.asarray(y, dtype=np.float32) clf.fit(X, float_y) assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32)) assert_equal(100, len(clf.estimators_)) def test_shape_y(): # Test with float class labels. clf = GradientBoostingClassifier(n_estimators=100, random_state=1) y_ = np.asarray(y, dtype=np.int32) y_ = y_[:, np.newaxis] # This will raise a DataConversionWarning that we want to # "always" raise, elsewhere the warnings gets ignored in the # later tests, and the tests that check for this warning fail assert_warns(DataConversionWarning, clf.fit, X, y_) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) def test_mem_layout(): # Test with different memory layouts of X and y X_ = np.asfortranarray(X) clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X_, y) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) X_ = np.ascontiguousarray(X) clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X_, y) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) y_ = np.asarray(y, dtype=np.int32) y_ = np.ascontiguousarray(y_) clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X, y_) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) y_ = np.asarray(y, dtype=np.int32) y_ = np.asfortranarray(y_) clf = GradientBoostingClassifier(n_estimators=100, random_state=1) clf.fit(X, y_) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) def test_oob_improvement(): # Test if oob improvement has correct shape and regression test. clf = GradientBoostingClassifier(n_estimators=100, random_state=1, subsample=0.5) clf.fit(X, y) assert_equal(clf.oob_improvement_.shape[0], 100) # hard-coded regression test - change if modification in OOB computation assert_array_almost_equal(clf.oob_improvement_[:5], np.array([0.19, 0.15, 0.12, -0.12, -0.11]), decimal=2) def test_oob_improvement_raise(): # Test if oob improvement has correct shape. clf = GradientBoostingClassifier(n_estimators=100, random_state=1, subsample=1.0) clf.fit(X, y) assert_raises(AttributeError, lambda: clf.oob_improvement_) def test_oob_multilcass_iris(): # Check OOB improvement on multi-class dataset. clf = GradientBoostingClassifier(n_estimators=100, loss='deviance', random_state=1, subsample=0.5) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9) assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators) # hard-coded regression test - change if modification in OOB computation # FIXME: the following snippet does not yield the same results on 32 bits # assert_array_almost_equal(clf.oob_improvement_[:5], # np.array([12.68, 10.45, 8.18, 6.43, 5.13]), # decimal=2) def test_verbose_output(): # Check verbose=1 does not cause error. from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout sys.stdout = StringIO() clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=1, subsample=0.8) clf.fit(X, y) verbose_output = sys.stdout sys.stdout = old_stdout # check output verbose_output.seek(0) header = verbose_output.readline().rstrip() # with OOB true_header = ' '.join(['%10s'] + ['%16s'] * 3) % ( 'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time') assert_equal(true_header, header) n_lines = sum(1 for l in verbose_output.readlines()) # one for 1-10 and then 9 for 20-100 assert_equal(10 + 9, n_lines) def test_more_verbose_output(): # Check verbose=2 does not cause error. from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout sys.stdout = StringIO() clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2) clf.fit(X, y) verbose_output = sys.stdout sys.stdout = old_stdout # check output verbose_output.seek(0) header = verbose_output.readline().rstrip() # no OOB true_header = ' '.join(['%10s'] + ['%16s'] * 2) % ( 'Iter', 'Train Loss', 'Remaining Time') assert_equal(true_header, header) n_lines = sum(1 for l in verbose_output.readlines()) # 100 lines for n_estimators==100 assert_equal(100, n_lines) def test_warm_start(): # Test if warm start equals fit. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=200, max_depth=1) est.fit(X, y) est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True) est_ws.fit(X, y) est_ws.set_params(n_estimators=200) est_ws.fit(X, y) assert_array_almost_equal(est_ws.predict(X), est.predict(X)) def test_warm_start_n_estimators(): # Test if warm start equals fit - set n_estimators. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=300, max_depth=1) est.fit(X, y) est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True) est_ws.fit(X, y) est_ws.set_params(n_estimators=300) est_ws.fit(X, y) assert_array_almost_equal(est_ws.predict(X), est.predict(X)) def test_warm_start_max_depth(): # Test if possible to fit trees of different depth in ensemble. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1, warm_start=True) est.fit(X, y) est.set_params(n_estimators=110, max_depth=2) est.fit(X, y) # last 10 trees have different depth assert_equal(est.estimators_[0, 0].max_depth, 1) for i in range(1, 11): assert_equal(est.estimators_[-i, 0].max_depth, 2) def test_warm_start_clear(): # Test if fit clears state. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1) est.fit(X, y) est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True) est_2.fit(X, y) # inits state est_2.set_params(warm_start=False) est_2.fit(X, y) # clears old state and equals est assert_array_almost_equal(est_2.predict(X), est.predict(X)) def test_warm_start_zero_n_estimators(): # Test if warm start with zero n_estimators raises error X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1, warm_start=True) est.fit(X, y) est.set_params(n_estimators=0) assert_raises(ValueError, est.fit, X, y) def test_warm_start_smaller_n_estimators(): # Test if warm start with smaller n_estimators raises error X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1, warm_start=True) est.fit(X, y) est.set_params(n_estimators=99) assert_raises(ValueError, est.fit, X, y) def test_warm_start_equal_n_estimators(): # Test if warm start with equal n_estimators does nothing X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1) est.fit(X, y) est2 = clone(est) est2.set_params(n_estimators=est.n_estimators, warm_start=True) est2.fit(X, y) assert_array_almost_equal(est2.predict(X), est.predict(X)) def test_warm_start_oob_switch(): # Test if oob can be turned on during warm start. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=100, max_depth=1, warm_start=True) est.fit(X, y) est.set_params(n_estimators=110, subsample=0.5) est.fit(X, y) assert_array_equal(est.oob_improvement_[:100], np.zeros(100)) # the last 10 are not zeros assert_array_equal(est.oob_improvement_[-10:] == 0.0, np.zeros(10, dtype=np.bool)) def test_warm_start_oob(): # Test if warm start OOB equals fit. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1) est.fit(X, y) est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True) est_ws.fit(X, y) est_ws.set_params(n_estimators=200) est_ws.fit(X, y) assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100]) def early_stopping_monitor(i, est, locals): """Returns True on the 10th iteration. """ if i == 9: return True else: return False def test_monitor_early_stopping(): # Test if monitor return value works. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]: est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5) est.fit(X, y, monitor=early_stopping_monitor) assert_equal(est.n_estimators, 20) # this is not altered assert_equal(est.estimators_.shape[0], 10) assert_equal(est.train_score_.shape[0], 10) assert_equal(est.oob_improvement_.shape[0], 10) # try refit est.set_params(n_estimators=30) est.fit(X, y) assert_equal(est.n_estimators, 30) assert_equal(est.estimators_.shape[0], 30) assert_equal(est.train_score_.shape[0], 30) est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5, warm_start=True) est.fit(X, y, monitor=early_stopping_monitor) assert_equal(est.n_estimators, 20) assert_equal(est.estimators_.shape[0], 10) assert_equal(est.train_score_.shape[0], 10) assert_equal(est.oob_improvement_.shape[0], 10) # try refit est.set_params(n_estimators=30, warm_start=False) est.fit(X, y) assert_equal(est.n_estimators, 30) assert_equal(est.train_score_.shape[0], 30) assert_equal(est.estimators_.shape[0], 30) assert_equal(est.oob_improvement_.shape[0], 30) def test_complete_classification(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 est = GradientBoostingClassifier(n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1) est.fit(X, y) tree = est.estimators_[0, 0].tree_ assert_equal(tree.max_depth, k) assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0], k + 1) def test_complete_regression(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF k = 4 est = GradientBoostingRegressor(n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1) est.fit(boston.data, boston.target) tree = est.estimators_[-1, 0].tree_ assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0], k + 1) def test_zero_estimator_reg(): # Test if ZeroEstimator works for regression. est = GradientBoostingRegressor(n_estimators=20, max_depth=1, random_state=1, init=ZeroEstimator()) est.fit(boston.data, boston.target) y_pred = est.predict(boston.data) mse = mean_squared_error(boston.target, y_pred) assert_almost_equal(mse, 33.0, decimal=0) est = GradientBoostingRegressor(n_estimators=20, max_depth=1, random_state=1, init='zero') est.fit(boston.data, boston.target) y_pred = est.predict(boston.data) mse = mean_squared_error(boston.target, y_pred) assert_almost_equal(mse, 33.0, decimal=0) est = GradientBoostingRegressor(n_estimators=20, max_depth=1, random_state=1, init='foobar') assert_raises(ValueError, est.fit, boston.data, boston.target) def test_zero_estimator_clf(): # Test if ZeroEstimator works for classification. X = iris.data y = np.array(iris.target) est = GradientBoostingClassifier(n_estimators=20, max_depth=1, random_state=1, init=ZeroEstimator()) est.fit(X, y) assert_greater(est.score(X, y), 0.96) est = GradientBoostingClassifier(n_estimators=20, max_depth=1, random_state=1, init='zero') est.fit(X, y) assert_greater(est.score(X, y), 0.96) # binary clf mask = y != 0 y[mask] = 1 y[~mask] = 0 est = GradientBoostingClassifier(n_estimators=20, max_depth=1, random_state=1, init='zero') est.fit(X, y) assert_greater(est.score(X, y), 0.96) est = GradientBoostingClassifier(n_estimators=20, max_depth=1, random_state=1, init='foobar') assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test precedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier] k = 4 for GBEstimator in all_estimators: est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.estimators_[0, 0].tree_ assert_greater(tree.max_depth, 1) est = GBEstimator(max_depth=1).fit(X, y) tree = est.estimators_[0, 0].tree_ assert_equal(tree.max_depth, 1) def test_min_impurity_split(): # Test if min_impurity_split of base estimators is set # Regression test for #8006 X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier] for GBEstimator in all_estimators: est = GBEstimator(min_impurity_split=0.1) est = assert_warns_message(DeprecationWarning, "min_impurity_decrease", est.fit, X, y) for tree in est.estimators_.flat: assert_equal(tree.min_impurity_split, 0.1) def test_min_impurity_decrease(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier] for GBEstimator in all_estimators: est = GBEstimator(min_impurity_decrease=0.1) est.fit(X, y) for tree in est.estimators_.flat: # Simply check if the parameter is passed on correctly. Tree tests # will suffice for the actual working of this param assert_equal(tree.min_impurity_decrease, 0.1) def test_warm_start_wo_nestimators_change(): # Test if warm_start does nothing if n_estimators is not changed. # Regression test for #3513. clf = GradientBoostingClassifier(n_estimators=10, warm_start=True) clf.fit([[0, 1], [2, 3]], [0, 1]) assert_equal(clf.estimators_.shape[0], 10) clf.fit([[0, 1], [2, 3]], [0, 1]) assert_equal(clf.estimators_.shape[0], 10) def test_probability_exponential(): # Predict probabilities. clf = GradientBoostingClassifier(loss='exponential', n_estimators=100, random_state=1) assert_raises(ValueError, clf.predict_proba, T) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # check if probabilities are in [0, 1]. y_proba = clf.predict_proba(T) assert_true(np.all(y_proba >= 0.0)) assert_true(np.all(y_proba <= 1.0)) score = clf.decision_function(T).ravel() assert_array_almost_equal(y_proba[:, 1], 1.0 / (1.0 + np.exp(-2 * score))) # derive predictions from probabilities y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0) assert_array_equal(y_pred, true_result) def test_non_uniform_weights_toy_edge_case_reg(): X = [[1, 0], [1, 0], [1, 0], [0, 1]] y = [0, 0, 1, 0] # ignore the first 2 training samples by setting their weight to 0 sample_weight = [0, 0, 1, 1] for loss in ('huber', 'ls', 'lad', 'quantile'): gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss) gb.fit(X, y, sample_weight=sample_weight) assert_greater(gb.predict([[1, 0]])[0], 0.5) def test_non_uniform_weights_toy_edge_case_clf(): X = [[1, 0], [1, 0], [1, 0], [0, 1]] y = [0, 0, 1, 0] # ignore the first 2 training samples by setting their weight to 0 sample_weight = [0, 0, 1, 1] for loss in ('deviance', 'exponential'): gb = GradientBoostingClassifier(n_estimators=5, loss=loss) gb.fit(X, y, sample_weight=sample_weight) assert_array_equal(gb.predict([[1, 0]]), [1]) def check_sparse_input(EstimatorClass, X, X_sparse, y): dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y) sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2, presort=False).fit(X_sparse, y) auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2, presort='auto').fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) assert_array_almost_equal(sparse.apply(X), auto.apply(X)) assert_array_almost_equal(sparse.predict(X), auto.predict(X)) assert_array_almost_equal(sparse.feature_importances_, auto.feature_importances_) assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X)) assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X)) if isinstance(EstimatorClass, GradientBoostingClassifier): assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) assert_array_almost_equal(sparse.predict_proba(X), auto.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), auto.predict_log_proba(X)) assert_array_almost_equal(sparse.decision_function(X_sparse), sparse.decision_function(X)) assert_array_almost_equal(dense.decision_function(X_sparse), sparse.decision_function(X)) assert_array_almost_equal( np.array(sparse.staged_decision_function(X_sparse)), np.array(sparse.staged_decision_function(X))) @skip_if_32bit def test_sparse_input(): ests = (GradientBoostingClassifier, GradientBoostingRegressor) sparse_matrices = (csr_matrix, csc_matrix, coo_matrix) y, X = datasets.make_multilabel_classification(random_state=0, n_samples=50, n_features=1, n_classes=20) y = y[:, 0] for EstimatorClass, sparse_matrix in product(ests, sparse_matrices): yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
41,305
36.246168
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_partial_dependence.py
""" Testing for the partial dependence module. """ import numpy as np from numpy.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import if_matplotlib from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the boston dataset boston = datasets.load_boston() # also load the iris dataset iris = datasets.load_iris() def test_partial_dependence_classifier(): # Test partial dependence for classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5) # only 4 grid points instead of 5 because only 4 unique X[:,0] vals assert pdp.shape == (1, 4) assert axes[0].shape[0] == 4 # now with our own grid X_ = np.asarray(X) grid = np.unique(X_[:, 0]) pdp_2, axes = partial_dependence(clf, [0], grid=grid) assert axes is None assert_array_equal(pdp, pdp_2) def test_partial_dependence_multiclass(): # Test partial dependence for multi-class classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 n_classes = clf.n_classes_ pdp, axes = partial_dependence( clf, [0], X=iris.data, grid_resolution=grid_resolution) assert pdp.shape == (n_classes, grid_resolution) assert len(axes) == 1 assert axes[0].shape[0] == grid_resolution def test_partial_dependence_regressor(): # Test partial dependence for regressor clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 pdp, axes = partial_dependence( clf, [0], X=boston.data, grid_resolution=grid_resolution) assert pdp.shape == (1, grid_resolution) assert axes[0].shape[0] == grid_resolution def test_partial_dependecy_input(): # Test input validation of partial dependence. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_raises(ValueError, partial_dependence, clf, [0], grid=None, X=None) assert_raises(ValueError, partial_dependence, clf, [0], grid=[0, 1], X=X) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, partial_dependence, {}, [0], X=X) # Gradient boosting estimator must be fit assert_raises(ValueError, partial_dependence, GradientBoostingClassifier(), [0], X=X) assert_raises(ValueError, partial_dependence, clf, [-1], X=X) assert_raises(ValueError, partial_dependence, clf, [100], X=X) # wrong ndim for grid grid = np.random.rand(10, 2, 1) assert_raises(ValueError, partial_dependence, clf, [0], grid=grid) @if_matplotlib def test_plot_partial_dependence(): # Test partial dependence plot function. clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with str features and array feature names fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with list feature_names feature_names = boston.feature_names.tolist() fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) @if_matplotlib def test_plot_partial_dependence_input(): # Test partial dependence plot function input checks. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) # not fitted yet assert_raises(ValueError, plot_partial_dependence, clf, X, [0]) clf.fit(X, y) assert_raises(ValueError, plot_partial_dependence, clf, np.array(X)[:, :0], [0]) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, plot_partial_dependence, {}, X, [0]) # must be larger than -1 assert_raises(ValueError, plot_partial_dependence, clf, X, [-1]) # too large feature value assert_raises(ValueError, plot_partial_dependence, clf, X, [100]) # str feature but no feature_names assert_raises(ValueError, plot_partial_dependence, clf, X, ['foobar']) # not valid features value assert_raises(ValueError, plot_partial_dependence, clf, X, [{'foo': 'bar'}]) @if_matplotlib def test_plot_partial_dependence_multiclass(): # Test partial dependence plot function on multi-class input. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label=0, grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # now with symbol labels target = iris.target_names[iris.target] clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label='setosa', grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # label not in gbrt.classes_ assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], label='foobar', grid_resolution=grid_resolution) # label not provided assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], grid_resolution=grid_resolution)
6,996
32.801932
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_iforest.py
""" Testing for Isolation Forest algorithm (sklearn.ensemble.iforest). """ # Authors: Nicolas Goix <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD 3 clause import numpy as np from sklearn.utils.fixes import euler_gamma from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_greater from sklearn.utils.testing import ignore_warnings from sklearn.model_selection import ParameterGrid from sklearn.ensemble import IsolationForest from sklearn.ensemble.iforest import _average_path_length from sklearn.model_selection import train_test_split from sklearn.datasets import load_boston, load_iris from sklearn.utils import check_random_state from sklearn.metrics import roc_auc_score from scipy.sparse import csc_matrix, csr_matrix rng = check_random_state(0) # load the iris dataset # and randomly permute it iris = load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_iforest(): """Check Isolation Forest for various parameter settings.""" X_train = np.array([[0, 1], [1, 2]]) X_test = np.array([[2, 1], [1, 1]]) grid = ParameterGrid({"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]}) with ignore_warnings(): for params in grid: IsolationForest(random_state=rng, **params).fit(X_train).predict(X_test) def test_iforest_sparse(): """Check IForest for various parameter settings on sparse input.""" rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]}) for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in grid: # Trained on sparse format sparse_classifier = IsolationForest( n_estimators=10, random_state=1, **params).fit(X_train_sparse) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_classifier = IsolationForest( n_estimators=10, random_state=1, **params).fit(X_train) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) def test_iforest_error(): """Test that it gives proper exception on deficient input.""" X = iris.data # Test max_samples assert_raises(ValueError, IsolationForest(max_samples=-1).fit, X) assert_raises(ValueError, IsolationForest(max_samples=0.0).fit, X) assert_raises(ValueError, IsolationForest(max_samples=2.0).fit, X) # The dataset has less than 256 samples, explicitly setting # max_samples > n_samples should result in a warning. If not set # explicitly there should be no warning assert_warns_message(UserWarning, "max_samples will be set to n_samples for estimation", IsolationForest(max_samples=1000).fit, X) assert_no_warnings(IsolationForest(max_samples='auto').fit, X) assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X) assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X) assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X) def test_recalculate_max_depth(): """Check max_depth recalculation when max_samples is reset to n_samples""" X = iris.data clf = IsolationForest().fit(X) for est in clf.estimators_: assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0])))) def test_max_samples_attribute(): X = iris.data clf = IsolationForest().fit(X) assert_equal(clf.max_samples_, X.shape[0]) clf = IsolationForest(max_samples=500) assert_warns_message(UserWarning, "max_samples will be set to n_samples for estimation", clf.fit, X) assert_equal(clf.max_samples_, X.shape[0]) clf = IsolationForest(max_samples=0.4).fit(X) assert_equal(clf.max_samples_, 0.4*X.shape[0]) def test_iforest_parallel_regression(): """Check parallel regression.""" rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = IsolationForest(n_jobs=3, random_state=0).fit(X_train) ensemble.set_params(n_jobs=1) y1 = ensemble.predict(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict(X_test) assert_array_almost_equal(y1, y2) ensemble = IsolationForest(n_jobs=1, random_state=0).fit(X_train) y3 = ensemble.predict(X_test) assert_array_almost_equal(y1, y3) def test_iforest_performance(): """Test Isolation Forest performs well""" # Generate train/test data rng = check_random_state(2) X = 0.3 * rng.randn(120, 2) X_train = np.r_[X + 2, X - 2] X_train = X[:100] # Generate some abnormal novel observations X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)) X_test = np.r_[X[100:], X_outliers] y_test = np.array([0] * 20 + [1] * 20) # fit the model clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train) # predict scores (the lower, the more normal) y_pred = - clf.decision_function(X_test) # check that there is at most 6 errors (false positive or false negative) assert_greater(roc_auc_score(y_test, y_pred), 0.98) def test_iforest_works(): # toy sample (the last two samples are outliers) X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]] # Test LOF clf = IsolationForest(random_state=rng, contamination=0.25) clf.fit(X) decision_func = - clf.decision_function(X) pred = clf.predict(X) # assert detect outliers: assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2])) assert_array_equal(pred, 6 * [1] + 2 * [-1]) def test_max_samples_consistency(): # Make sure validated max_samples in iforest and BaseBagging are identical X = iris.data clf = IsolationForest().fit(X) assert_equal(clf.max_samples_, clf._max_samples) def test_iforest_subsampled_features(): # It tests non-regression for #5732 which failed at predict. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) clf = IsolationForest(max_features=0.8) clf.fit(X_train, y_train) clf.predict(X_test) def test_iforest_average_path_length(): # It tests non-regression for #8549 which used the wrong formula # for average path length, strictly for the integer case result_one = 2. * (np.log(4.) + euler_gamma) - 2. * 4. / 5. result_two = 2. * (np.log(998.) + euler_gamma) - 2. * 998. / 999. assert_almost_equal(_average_path_length(1), 1., decimal=10) assert_almost_equal(_average_path_length(5), result_one, decimal=10) assert_almost_equal(_average_path_length(999), result_two, decimal=10) assert_array_almost_equal(_average_path_length(np.array([1, 5, 999])), [1., result_one, result_two], decimal=10)
8,377
35.585153
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_base.py
""" Testing for the base module (sklearn.ensemble.base). """ # Authors: Gilles Louppe # License: BSD 3 clause import numpy as np from numpy.testing import assert_equal from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_true from sklearn.datasets import load_iris from sklearn.ensemble import BaggingClassifier from sklearn.ensemble.base import _set_random_states from sklearn.linear_model import Perceptron from collections import OrderedDict from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectFromModel def test_base(): # Check BaseEnsemble methods. ensemble = BaggingClassifier( base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3) iris = load_iris() ensemble.fit(iris.data, iris.target) ensemble.estimators_ = [] # empty the list and create estimators manually ensemble._make_estimator() random_state = np.random.RandomState(3) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(append=False) assert_equal(3, len(ensemble)) assert_equal(3, len(ensemble.estimators_)) assert_true(isinstance(ensemble[0], Perceptron)) assert_equal(ensemble[0].random_state, None) assert_true(isinstance(ensemble[1].random_state, int)) assert_true(isinstance(ensemble[2].random_state, int)) assert_not_equal(ensemble[1].random_state, ensemble[2].random_state) np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=np.int32(3)) np_int_ensemble.fit(iris.data, iris.target) def test_base_zero_n_estimators(): # Check that instantiating a BaseEnsemble with n_estimators<=0 raises # a ValueError. ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=0) iris = load_iris() assert_raise_message(ValueError, "n_estimators must be greater than zero, got 0.", ensemble.fit, iris.data, iris.target) def test_base_not_int_n_estimators(): # Check that instantiating a BaseEnsemble with a string as n_estimators # raises a ValueError demanding n_estimators to be supplied as an integer. string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators='3') iris = load_iris() assert_raise_message(ValueError, "n_estimators must be an integer", string_ensemble.fit, iris.data, iris.target) float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=3.0) assert_raise_message(ValueError, "n_estimators must be an integer", float_ensemble.fit, iris.data, iris.target) def test_set_random_states(): # Linear Discriminant Analysis doesn't have random state: smoke test _set_random_states(LinearDiscriminantAnalysis(), random_state=17) clf1 = Perceptron(tol=1e-3, random_state=None) assert_equal(clf1.random_state, None) # check random_state is None still sets _set_random_states(clf1, None) assert_true(isinstance(clf1.random_state, int)) # check random_state fixes results in consistent initialisation _set_random_states(clf1, 3) assert_true(isinstance(clf1.random_state, int)) clf2 = Perceptron(tol=1e-3, random_state=None) _set_random_states(clf2, 3) assert_equal(clf1.random_state, clf2.random_state) # nested random_state def make_steps(): return [('sel', SelectFromModel(Perceptron(tol=1e-3, random_state=None))), ('clf', Perceptron(tol=1e-3, random_state=None))] est1 = Pipeline(make_steps()) _set_random_states(est1, 3) assert_true(isinstance(est1.steps[0][1].estimator.random_state, int)) assert_true(isinstance(est1.steps[1][1].random_state, int)) assert_not_equal(est1.get_params()['sel__estimator__random_state'], est1.get_params()['clf__random_state']) # ensure multiple random_state parameters are invariant to get_params() # iteration order class AlphaParamPipeline(Pipeline): def get_params(self, *args, **kwargs): params = Pipeline.get_params(self, *args, **kwargs).items() return OrderedDict(sorted(params)) class RevParamPipeline(Pipeline): def get_params(self, *args, **kwargs): params = Pipeline.get_params(self, *args, **kwargs).items() return OrderedDict(sorted(params, reverse=True)) for cls in [AlphaParamPipeline, RevParamPipeline]: est2 = cls(make_steps()) _set_random_states(est2, 3) assert_equal(est1.get_params()['sel__estimator__random_state'], est2.get_params()['sel__estimator__random_state']) assert_equal(est1.get_params()['clf__random_state'], est2.get_params()['clf__random_state'])
5,277
38.984848
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_weight_boosting.py
"""Testing for the boost module (sklearn.ensemble.boost).""" import numpy as np from sklearn.utils.testing import assert_array_equal, assert_array_less from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal, assert_true, assert_greater from sklearn.utils.testing import assert_raises, assert_raises_regexp from sklearn.base import BaseEstimator from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import weight_boosting from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.svm import SVC, SVR from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.utils import shuffle from sklearn import datasets # Common random state rng = np.random.RandomState(0) # Toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels y_regr = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] y_t_class = ["foo", 1, 1] y_t_regr = [-1, 1, 1] # Load the iris dataset and randomly permute it iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) # Load the boston dataset and randomly permute it boston = datasets.load_boston() boston.data, boston.target = shuffle(boston.data, boston.target, random_state=rng) def test_samme_proba(): # Test the `_samme_proba` helper function. # Define some example (bad) `predict_proba` output. probs = np.array([[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]) probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] # _samme_proba calls estimator.predict_proba. # Make a mock object so I can control what gets returned. class MockEstimator(object): def predict_proba(self, X): assert_array_equal(X.shape, probs.shape) return probs mock = MockEstimator() samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs)) assert_array_equal(samme_proba.shape, probs.shape) assert_true(np.isfinite(samme_proba).all()) # Make sure that the correct elements come out as smallest -- # `_samme_proba` should preserve the ordering in each example. assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) def test_oneclass_adaboost_proba(): # Test predict_proba robustness for one class label input. # In response to issue #7501 # https://github.com/scikit-learn/scikit-learn/issues/7501 y_t = np.ones(len(X)) clf = AdaBoostClassifier().fit(X, y_t) assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1))) def test_classification_toy(): # Check classification on a toy dataset. for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, random_state=0) clf.fit(X, y_class) assert_array_equal(clf.predict(T), y_t_class) assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) assert_equal(clf.predict_proba(T).shape, (len(T), 2)) assert_equal(clf.decision_function(T).shape, (len(T),)) def test_regression_toy(): # Check classification on a toy dataset. clf = AdaBoostRegressor(random_state=0) clf.fit(X, y_regr) assert_array_equal(clf.predict(T), y_t_regr) def test_iris(): # Check consistency on dataset iris. classes = np.unique(iris.target) clf_samme = prob_samme = None for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(iris.data, iris.target) assert_array_equal(classes, clf.classes_) proba = clf.predict_proba(iris.data) if alg == "SAMME": clf_samme = clf prob_samme = proba assert_equal(proba.shape[1], len(classes)) assert_equal(clf.decision_function(iris.data).shape[1], len(classes)) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with algorithm %s and score = %f" % \ (alg, score) # Check we used multiple estimators assert_greater(len(clf.estimators_), 1) # Check for distinct random states (see issue #7408) assert_equal(len(set(est.random_state for est in clf.estimators_)), len(clf.estimators_)) # Somewhat hacky regression test: prior to # ae7adc880d624615a34bafdb1d75ef67051b8200, # predict_proba returned SAMME.R values for SAMME. clf_samme.algorithm = "SAMME.R" assert_array_less(0, np.abs(clf_samme.predict_proba(iris.data) - prob_samme)) def test_boston(): # Check consistency on dataset boston house prices. reg = AdaBoostRegressor(random_state=0) reg.fit(boston.data, boston.target) score = reg.score(boston.data, boston.target) assert score > 0.85 # Check we used multiple estimators assert_true(len(reg.estimators_) > 1) # Check for distinct random states (see issue #7408) assert_equal(len(set(est.random_state for est in reg.estimators_)), len(reg.estimators_)) def test_staged_predict(): # Check staged predictions. rng = np.random.RandomState(0) iris_weights = rng.randint(10, size=iris.target.shape) boston_weights = rng.randint(10, size=boston.target.shape) # AdaBoost classification for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, n_estimators=10) clf.fit(iris.data, iris.target, sample_weight=iris_weights) predictions = clf.predict(iris.data) staged_predictions = [p for p in clf.staged_predict(iris.data)] proba = clf.predict_proba(iris.data) staged_probas = [p for p in clf.staged_predict_proba(iris.data)] score = clf.score(iris.data, iris.target, sample_weight=iris_weights) staged_scores = [ s for s in clf.staged_score( iris.data, iris.target, sample_weight=iris_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_probas), 10) assert_array_almost_equal(proba, staged_probas[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) # AdaBoost regression clf = AdaBoostRegressor(n_estimators=10, random_state=0) clf.fit(boston.data, boston.target, sample_weight=boston_weights) predictions = clf.predict(boston.data) staged_predictions = [p for p in clf.staged_predict(boston.data)] score = clf.score(boston.data, boston.target, sample_weight=boston_weights) staged_scores = [ s for s in clf.staged_score( boston.data, boston.target, sample_weight=boston_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) def test_gridsearch(): # Check that base trees can be grid-searched. # AdaBoost classification boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier()) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2), 'algorithm': ('SAMME', 'SAMME.R')} clf = GridSearchCV(boost, parameters) clf.fit(iris.data, iris.target) # AdaBoost regression boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(), random_state=0) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2)} clf = GridSearchCV(boost, parameters) clf.fit(boston.data, boston.target) def test_pickle(): # Check pickability. import pickle # Adaboost classifier for alg in ['SAMME', 'SAMME.R']: obj = AdaBoostClassifier(algorithm=alg) obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_equal(score, score2) # Adaboost regressor obj = AdaBoostRegressor(random_state=0) obj.fit(boston.data, boston.target) score = obj.score(boston.data, boston.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(boston.data, boston.target) assert_equal(score, score2) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=1) for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(X, y) importances = clf.feature_importances_ assert_equal(importances.shape[0], 10) assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(), True) def test_error(): # Test that it gives proper exception on deficient input. assert_raises(ValueError, AdaBoostClassifier(learning_rate=-1).fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier(algorithm="foo").fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier().fit, X, y_class, sample_weight=np.asarray([-1])) def test_base_estimator(): # Test different base estimators. from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # XXX doesn't work with y_class because RF doesn't support classes_ # Shouldn't AdaBoost run a LabelBinarizer? clf = AdaBoostClassifier(RandomForestClassifier()) clf.fit(X, y_regr) clf = AdaBoostClassifier(SVC(), algorithm="SAMME") clf.fit(X, y_class) from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) clf.fit(X, y_regr) clf = AdaBoostRegressor(SVR(), random_state=0) clf.fit(X, y_regr) # Check that an empty discrete ensemble fails in fit, not predict. X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] y_fail = ["foo", "bar", 1, 2] clf = AdaBoostClassifier(SVC(), algorithm="SAMME") assert_raises_regexp(ValueError, "worse than random", clf.fit, X_fail, y_fail) def test_sample_weight_missing(): from sklearn.cluster import KMeans clf = AdaBoostClassifier(KMeans(), algorithm="SAMME") assert_raises(ValueError, clf.fit, X, y_regr) clf = AdaBoostRegressor(KMeans()) assert_raises(ValueError, clf.fit, X, y_regr) def test_sparse_classification(): # Check classification with sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVC, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15, n_features=5, random_state=42) # Flatten y to a 1d array y = np.ravel(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # decision_function sparse_results = sparse_classifier.decision_function(X_test_sparse) dense_results = dense_classifier.decision_function(X_test) assert_array_equal(sparse_results, dense_results) # predict_log_proba sparse_results = sparse_classifier.predict_log_proba(X_test_sparse) dense_results = dense_classifier.predict_log_proba(X_test) assert_array_equal(sparse_results, dense_results) # predict_proba sparse_results = sparse_classifier.predict_proba(X_test_sparse) dense_results = dense_classifier.predict_proba(X_test) assert_array_equal(sparse_results, dense_results) # score sparse_results = sparse_classifier.score(X_test_sparse, y_test) dense_results = dense_classifier.score(X_test, y_test) assert_array_equal(sparse_results, dense_results) # staged_decision_function sparse_results = sparse_classifier.staged_decision_function( X_test_sparse) dense_results = dense_classifier.staged_decision_function(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict_proba sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse) dense_results = dense_classifier.staged_predict_proba(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_score sparse_results = sparse_classifier.staged_score(X_test_sparse, y_test) dense_results = dense_classifier.staged_score(X_test, y_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # Verify sparsity of data is maintained during training types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sparse_regression(): # Check regression with sparse input. class CustomSVR(SVR): """SVR variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVR, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = dense_results = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sample_weight_adaboost_regressor(): """ AdaBoostRegressor should work without sample_weights in the base estimator The random weighted sampling is done internally in the _boost method in AdaBoostRegressor. """ class DummyEstimator(BaseEstimator): def fit(self, X, y): pass def predict(self, X): return np.zeros(X.shape[0]) boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) boost.fit(X, y_regr) assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
17,975
35.987654
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_voting_classifier.py
"""Testing for the VotingClassifier""" import numpy as np from sklearn.utils.testing import assert_almost_equal, assert_array_equal from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_warns_message from sklearn.exceptions import NotFittedError from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.model_selection import GridSearchCV from sklearn import datasets from sklearn.model_selection import cross_val_score from sklearn.datasets import make_multilabel_classification from sklearn.svm import SVC from sklearn.multiclass import OneVsRestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.base import BaseEstimator, ClassifierMixin # Load the iris dataset and randomly permute it iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target def test_estimator_init(): eclf = VotingClassifier(estimators=[]) msg = ('Invalid `estimators` attribute, `estimators` should be' ' a list of (string, estimator) tuples') assert_raise_message(AttributeError, msg, eclf.fit, X, y) clf = LogisticRegression(random_state=1) eclf = VotingClassifier(estimators=[('lr', clf)], voting='error') msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')') assert_raise_message(ValueError, msg, eclf.fit, X, y) eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2]) msg = ('Number of classifiers and weights must be equal' '; got 2 weights, 1 estimators') assert_raise_message(ValueError, msg, eclf.fit, X, y) eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)], weights=[1, 2]) msg = "Names provided are not unique: ['lr', 'lr']" assert_raise_message(ValueError, msg, eclf.fit, X, y) eclf = VotingClassifier(estimators=[('lr__', clf)]) msg = "Estimator names must not contain __: got ['lr__']" assert_raise_message(ValueError, msg, eclf.fit, X, y) eclf = VotingClassifier(estimators=[('estimators', clf)]) msg = "Estimator names conflict with constructor arguments: ['estimators']" assert_raise_message(ValueError, msg, eclf.fit, X, y) def test_predictproba_hardvoting(): eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()), ('lr2', LogisticRegression())], voting='hard') msg = "predict_proba is not available when voting='hard'" assert_raise_message(AttributeError, msg, eclf.predict_proba, X) def test_notfitted(): eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()), ('lr2', LogisticRegression())], voting='soft') msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'" " with appropriate arguments before using this method.") assert_raise_message(NotFittedError, msg, eclf.predict_proba, X) def test_majority_label_iris(): """Check classification by majority label on dataset iris.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy') assert_almost_equal(scores.mean(), 0.95, decimal=2) def test_tie_situation(): """Check voting classifier selects smaller class label in tie situation.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)], voting='hard') assert_equal(clf1.fit(X, y).predict(X)[73], 2) assert_equal(clf2.fit(X, y).predict(X)[73], 1) assert_equal(eclf.fit(X, y).predict(X)[73], 1) def test_weights_iris(): """Check classification by average probabilities on dataset iris.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 2, 10]) scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy') assert_almost_equal(scores.mean(), 0.93, decimal=2) def test_predict_on_toy_problem(): """Manually check predicted class labels for toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]) y = np.array([1, 1, 1, 2, 2, 2]) assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard', weights=[1, 1, 1]) assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 1, 1]) assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) def test_predict_proba_on_toy_problem(): """Calculate predicted probabilities on toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) clf1_res = np.array([[0.59790391, 0.40209609], [0.57622162, 0.42377838], [0.50728456, 0.49271544], [0.40241774, 0.59758226]]) clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]]) clf3_res = np.array([[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0., 1.], [0., 1.]]) t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4 t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4 t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4 t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4 eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[2, 1, 1]) eclf_res = eclf.fit(X, y).predict_proba(X) assert_almost_equal(t00, eclf_res[0][0], decimal=1) assert_almost_equal(t11, eclf_res[1][1], decimal=1) assert_almost_equal(t21, eclf_res[2][1], decimal=1) assert_almost_equal(t31, eclf_res[3][1], decimal=1) try: eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') eclf.fit(X, y).predict_proba(X) except AttributeError: pass else: raise AssertionError('AttributeError for voting == "hard"' ' and with predict_proba not raised') def test_multilabel(): """Check if error is raised for multilabel classification.""" X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123) clf = OneVsRestClassifier(SVC(kernel='linear')) eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard') try: eclf.fit(X, y) except NotImplementedError: return def test_gridsearch(): """Check GridSearch support.""" clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft') params = {'lr__C': [1.0, 100.0], 'voting': ['soft', 'hard'], 'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) def test_parallel_fit(): """Check parallel backend of VotingClassifier on toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) eclf1 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', n_jobs=1).fit(X, y) eclf2 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', n_jobs=2).fit(X, y) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) def test_sample_weight(): """Tests sample_weight parameter of VotingClassifier""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = SVC(probability=True, random_state=123) eclf1 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('svc', clf3)], voting='soft').fit(X, y, sample_weight=np.ones((len(y),))) eclf2 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('svc', clf3)], voting='soft').fit(X, y) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) sample_weight = np.random.RandomState(123).uniform(size=(len(y),)) eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft') eclf3.fit(X, y, sample_weight) clf1.fit(X, y, sample_weight) assert_array_equal(eclf3.predict(X), clf1.predict(X)) assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X)) clf4 = KNeighborsClassifier() eclf3 = VotingClassifier(estimators=[ ('lr', clf1), ('svc', clf3), ('knn', clf4)], voting='soft') msg = ('Underlying estimator \'knn\' does not support sample weights.') assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight) def test_sample_weight_kwargs(): """Check that VotingClassifier passes sample_weight as kwargs""" class MockClassifier(BaseEstimator, ClassifierMixin): """Mock Classifier to check that sample_weight is received as kwargs""" def fit(self, X, y, *args, **sample_weight): assert_true('sample_weight' in sample_weight) clf = MockClassifier() eclf = VotingClassifier(estimators=[('mock', clf)], voting='soft') # Should not raise an error. eclf.fit(X, y, sample_weight=np.ones((len(y),))) def test_set_params(): """set_params should be able to set estimators""" clf1 = LogisticRegression(random_state=123, C=1.0) clf2 = RandomForestClassifier(random_state=123, max_depth=None) clf3 = GaussianNB() eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft', weights=[1, 2]) eclf1.fit(X, y) eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft', weights=[1, 2]) eclf2.set_params(nb=clf2).fit(X, y) assert_false(hasattr(eclf2, 'nb')) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params()) assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params()) eclf1.set_params(lr__C=10.0) eclf2.set_params(nb__max_depth=5) assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0) assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5) assert_equal(eclf1.get_params()["lr__C"], eclf1.get_params()["lr"].get_params()['C']) def test_set_estimator_none(): """VotingClassifier set_params should be able to set estimators as None""" # Test predict clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('nb', clf3)], voting='hard', weights=[1, 0, 0.5]).fit(X, y) eclf2 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('nb', clf3)], voting='hard', weights=[1, 1, 0.5]) eclf2.set_params(rf=None).fit(X, y) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) assert_true(dict(eclf2.estimators)["rf"] is None) assert_true(len(eclf2.estimators_) == 2) assert_true(all([not isinstance(est, RandomForestClassifier) for est in eclf2.estimators_])) assert_true(eclf2.get_params()["rf"] is None) eclf1.set_params(voting='soft').fit(X, y) eclf2.set_params(voting='soft').fit(X, y) assert_array_equal(eclf1.predict(X), eclf2.predict(X)) assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) msg = ('All estimators are None. At least one is required' ' to be a classifier!') assert_raise_message( ValueError, msg, eclf2.set_params(lr=None, rf=None, nb=None).fit, X, y) # Test soft voting transform X1 = np.array([[1], [2]]) y1 = np.array([1, 2]) eclf1 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)], voting='soft', weights=[0, 0.5]).fit(X1, y1) eclf2 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)], voting='soft', weights=[1, 0.5]) eclf2.set_params(rf=None).fit(X1, y1) assert_array_equal(eclf1.transform(X1), np.array([[[0.7, 0.3], [0.3, 0.7]], [[1., 0.], [0., 1.]]])) assert_array_equal(eclf2.transform(X1), np.array([[[1., 0.], [0., 1.]]])) eclf1.set_params(voting='hard') eclf2.set_params(voting='hard') assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]])) assert_array_equal(eclf2.transform(X1), np.array([[0], [1]])) def test_estimator_weights_format(): # Test estimator weights inputs as list and array clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) eclf1 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2)], weights=[1, 2], voting='soft') eclf2 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2)], weights=np.array((1, 2)), voting='soft') eclf1.fit(X, y) eclf2.fit(X, y) assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) def test_transform(): """Check transform method of VotingClassifier on toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) eclf1 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft').fit(X, y) eclf2 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', flatten_transform=True).fit(X, y) eclf3 = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', flatten_transform=False).fit(X, y) warn_msg = ("'flatten_transform' default value will be " "changed to True in 0.21." "To silence this warning you may" " explicitly set flatten_transform=False.") res = assert_warns_message(DeprecationWarning, warn_msg, eclf1.transform, X) assert_array_equal(res.shape, (3, 4, 2)) assert_array_equal(eclf2.transform(X).shape, (4, 6)) assert_array_equal(eclf3.transform(X).shape, (3, 4, 2)) assert_array_equal(res.swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X)) assert_array_equal(eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X))
17,078
39.858852
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/base.py
"""Principal Component Analysis Base Classes""" # Author: Alexandre Gramfort <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Denis A. Engemann <[email protected]> # Kyle Kastner <[email protected]> # # License: BSD 3 clause import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin from ..utils import check_array from ..utils.validation import check_is_fitted from ..externals import six from abc import ABCMeta, abstractmethod class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class for PCA methods. Warning: This class should not be used directly. Use derived classes instead. """ def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision @abstractmethod def fit(X, y=None): """Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ def transform(self, X): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False) >>> ipca.transform(X) # doctest: +SKIP """ check_is_fitted(self, ['mean_', 'components_'], all_or_any=all) X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = np.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed def inverse_transform(self, X): """Transform data back to its original space. In other words, return an input X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening. """ if self.whiten: return np.dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_) + self.mean_ else: return np.dot(X, self.components_) + self.mean_
5,596
33.763975
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/truncated_svd.py
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck # Olivier Grisel <[email protected]> # Michael Becker <[email protected]> # License: 3-clause BSD. import numpy as np import scipy.sparse as sp from scipy.sparse.linalg import svds from ..base import BaseEstimator, TransformerMixin from ..utils import check_array, check_random_state from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). Contrary to PCA, this estimator does not center the data before computing the singular value decomposition. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithms: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional (default 5) Number of iterations for randomized SVD solver. Not used by ARPACK. The default is larger than the default in `randomized_svd` to handle sparse matrices that may have large slowly decaying spectrum. random_state : int, RandomState instance or None, optional, default = None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : array, shape (n_components, n_features) explained_variance_ : array, shape (n_components,) The variance of the training samples transformed by a projection to each component. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.0606... 0.0584... 0.0497... 0.0434... 0.0372...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.249... >>> print(svd.singular_values_) # doctest: +ELLIPSIS [ 2.5841... 2.5245... 2.3201... 2.1753... 2.0443...] See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0.): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : Ignored. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : Ignored. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse=['csr', 'csc']) random_state = check_random_state(self.random_state) if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio X_transformed = U * Sigma self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var self.singular_values_ = Sigma # Store the singular values. return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse='csr') return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_)
8,345
35.286957
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/setup.py
import os import numpy from numpy.distutils.misc_util import Configuration def configuration(parent_package="", top_path=None): config = Configuration("decomposition", parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension("_online_lda", sources=["_online_lda.pyx"], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('cdnmf_fast', sources=['cdnmf_fast.pyx'], include_dirs=[numpy.get_include()], libraries=libraries) config.add_subpackage("tests") return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
843
27.133333
69
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/kernel_pca.py
"""Kernel Principal Components Analysis""" # Author: Mathieu Blondel <[email protected]> # License: BSD 3 clause import numpy as np from scipy import linalg from scipy.sparse.linalg import eigsh from ..utils import check_random_state from ..utils.validation import check_is_fitted, check_array from ..exceptions import NotFittedError from ..base import BaseEstimator, TransformerMixin from ..preprocessing import KernelCenterer from ..metrics.pairwise import pairwise_kernels class KernelPCA(BaseEstimator, TransformerMixin): """Kernel Principal component analysis (KPCA) Non-linear dimensionality reduction through the use of kernels (see :ref:`metrics`). Read more in the :ref:`User Guide <kernel_PCA>`. Parameters ---------- n_components : int, default=None Number of components. If None, all non-zero components are kept. kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed" Kernel. Default="linear". gamma : float, default=1/n_features Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other kernels. degree : int, default=3 Degree for poly kernels. Ignored by other kernels. coef0 : float, default=1 Independent term in poly and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, default=None Parameters (keyword arguments) and values for kernel passed as callable object. Ignored by other kernels. alpha : int, default=1.0 Hyperparameter of the ridge regression that learns the inverse transform (when fit_inverse_transform=True). fit_inverse_transform : bool, default=False Learn the inverse transform for non-precomputed kernels. (i.e. learn to find the pre-image of a point) eigen_solver : string ['auto'|'dense'|'arpack'], default='auto' Select eigensolver to use. If n_components is much less than the number of training samples, arpack may be more efficient than the dense eigensolver. tol : float, default=0 Convergence tolerance for arpack. If 0, optimal value will be chosen by arpack. max_iter : int, default=None Maximum number of iterations for arpack. If None, optimal value will be chosen by arpack. remove_zero_eig : boolean, default=False If True, then all components with zero eigenvalues are removed, so that the number of components in the output may be < n_components (and sometimes even zero due to numerical instability). When n_components is None, this parameter is ignored and components with zero eigenvalues are removed regardless. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``eigen_solver`` == 'arpack'. .. versionadded:: 0.18 copy_X : boolean, default=True If True, input X is copied and stored by the model in the `X_fit_` attribute. If no further changes will be done to X, setting `copy_X=False` saves memory by storing a reference. .. versionadded:: 0.18 n_jobs : int, default=1 The number of parallel jobs to run. If `-1`, then the number of jobs is set to the number of CPU cores. .. versionadded:: 0.18 Attributes ---------- lambdas_ : array, (n_components,) Eigenvalues of the centered kernel matrix in decreasing order. If `n_components` and `remove_zero_eig` are not set, then all values are stored. alphas_ : array, (n_samples, n_components) Eigenvectors of the centered kernel matrix. If `n_components` and `remove_zero_eig` are not set, then all components are stored. dual_coef_ : array, (n_samples, n_features) Inverse transform matrix. Set if `fit_inverse_transform` is True. X_transformed_fit_ : array, (n_samples, n_components) Projection of the fitted data on the kernel principal components. X_fit_ : (n_samples, n_features) The data used to fit the model. If `copy_X=False`, then `X_fit_` is a reference. This attribute is used for the calls to transform. References ---------- Kernel PCA was introduced in: Bernhard Schoelkopf, Alexander J. Smola, and Klaus-Robert Mueller. 1999. Kernel principal component analysis. In Advances in kernel methods, MIT Press, Cambridge, MA, USA 327-352. """ def __init__(self, n_components=None, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None, alpha=1.0, fit_inverse_transform=False, eigen_solver='auto', tol=0, max_iter=None, remove_zero_eig=False, random_state=None, copy_X=True, n_jobs=1): if fit_inverse_transform and kernel == 'precomputed': raise ValueError( "Cannot fit_inverse_transform with a precomputed kernel.") self.n_components = n_components self.kernel = kernel self.kernel_params = kernel_params self.gamma = gamma self.degree = degree self.coef0 = coef0 self.alpha = alpha self.fit_inverse_transform = fit_inverse_transform self.eigen_solver = eigen_solver self.remove_zero_eig = remove_zero_eig self.tol = tol self.max_iter = max_iter self._centerer = KernelCenterer() self.random_state = random_state self.n_jobs = n_jobs self.copy_X = copy_X @property def _pairwise(self): return self.kernel == "precomputed" def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params) def _fit_transform(self, K): """ Fit's using kernel K""" # center kernel K = self._centerer.fit_transform(K) if self.n_components is None: n_components = K.shape[0] else: n_components = min(K.shape[0], self.n_components) # compute eigenvectors if self.eigen_solver == 'auto': if K.shape[0] > 200 and n_components < 10: eigen_solver = 'arpack' else: eigen_solver = 'dense' else: eigen_solver = self.eigen_solver if eigen_solver == 'dense': self.lambdas_, self.alphas_ = linalg.eigh( K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1)) elif eigen_solver == 'arpack': random_state = check_random_state(self.random_state) # initialize with [-1,1] as in ARPACK v0 = random_state.uniform(-1, 1, K.shape[0]) self.lambdas_, self.alphas_ = eigsh(K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0) # sort eigenvectors in descending order indices = self.lambdas_.argsort()[::-1] self.lambdas_ = self.lambdas_[indices] self.alphas_ = self.alphas_[:, indices] # remove eigenvectors with a zero eigenvalue if self.remove_zero_eig or self.n_components is None: self.alphas_ = self.alphas_[:, self.lambdas_ > 0] self.lambdas_ = self.lambdas_[self.lambdas_ > 0] return K def _fit_inverse_transform(self, X_transformed, X): if hasattr(X, "tocsr"): raise NotImplementedError("Inverse transform not implemented for " "sparse matrices!") n_samples = X_transformed.shape[0] K = self._get_kernel(X_transformed) K.flat[::n_samples + 1] += self.alpha self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True) self.X_transformed_fit_ = X_transformed def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ X = check_array(X, accept_sparse='csr', copy=self.copy_X) K = self._get_kernel(X) self._fit_transform(K) if self.fit_inverse_transform: sqrt_lambdas = np.diag(np.sqrt(self.lambdas_)) X_transformed = np.dot(self.alphas_, sqrt_lambdas) self._fit_inverse_transform(X_transformed, X) self.X_fit_ = X return self def fit_transform(self, X, y=None, **params): """Fit the model from data in X and transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ self.fit(X, **params) X_transformed = self.alphas_ * np.sqrt(self.lambdas_) if self.fit_inverse_transform: self._fit_inverse_transform(X_transformed, X) return X_transformed def transform(self, X): """Transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'X_fit_') K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_)) def inverse_transform(self, X): """Transform X back to original space. Parameters ---------- X : array-like, shape (n_samples, n_components) Returns ------- X_new : array-like, shape (n_samples, n_features) References ---------- "Learning to Find Pre-Images", G BakIr et al, 2004. """ if not self.fit_inverse_transform: raise NotFittedError("The fit_inverse_transform parameter was not" " set to True when instantiating and hence " "the inverse transform is not available.") K = self._get_kernel(X, self.X_transformed_fit_) return np.dot(K, self.dual_coef_)
11,152
34.977419
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/dict_learning.py
""" Dictionary learning """ from __future__ import print_function # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort # License: BSD 3 clause import time import sys import itertools from math import sqrt, ceil import numpy as np from scipy import linalg from numpy.lib.stride_tricks import as_strided from ..base import BaseEstimator, TransformerMixin from ..externals.joblib import Parallel, delayed, cpu_count from ..externals.six.moves import zip from ..utils import (check_array, check_random_state, gen_even_slices, gen_batches, _get_n_jobs) from ..utils.extmath import randomized_svd, row_norms from ..utils.validation import check_is_fitted from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars', regularization=None, copy_cov=True, init=None, max_iter=1000, check_input=True, verbose=0): """Generic sparse coding Each column of the result is the solution to a Lasso problem. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. dictionary : array of shape (n_components, n_features) The dictionary matrix against which to solve the sparse coding of the data. Some of the algorithms assume normalized rows. gram : None | array, shape=(n_components, n_components) Precomputed Gram matrix, dictionary * dictionary' gram can be None if method is 'threshold'. cov : array, shape=(n_components, n_samples) Precomputed covariance, dictionary * X' algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'} lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than regularization from the projection dictionary * data' regularization : int | float The regularization parameter. It corresponds to alpha when algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'. Otherwise it corresponds to n_nonzero_coefs. init : array of shape (n_samples, n_components) Initialization value of the sparse code. Only used if `algorithm='lasso_cd'`. max_iter : int, 1000 by default Maximum number of iterations to perform if `algorithm='lasso_cd'`. copy_cov : boolean, optional Whether to copy the precomputed covariance matrix; if False, it may be overwritten. check_input : boolean, optional If False, the input arrays X and dictionary will not be checked. verbose : int Controls the verbosity; the higher, the more messages. Defaults to 0. Returns ------- code : array of shape (n_components, n_features) The sparse codes See also -------- sklearn.linear_model.lars_path sklearn.linear_model.orthogonal_mp sklearn.linear_model.Lasso SparseCoder """ if X.ndim == 1: X = X[:, np.newaxis] n_samples, n_features = X.shape n_components = dictionary.shape[0] if dictionary.shape[1] != X.shape[1]: raise ValueError("Dictionary and X have different numbers of features:" "dictionary.shape: {} X.shape{}".format( dictionary.shape, X.shape)) if cov is None and algorithm != 'lasso_cd': # overwriting cov is safe copy_cov = False cov = np.dot(dictionary, X.T) if algorithm == 'lasso_lars': alpha = float(regularization) / n_features # account for scaling try: err_mgt = np.seterr(all='ignore') # Not passing in verbose=max(0, verbose-1) because Lars.fit already # corrects the verbosity level. lasso_lars = LassoLars(alpha=alpha, fit_intercept=False, verbose=verbose, normalize=False, precompute=gram, fit_path=False) lasso_lars.fit(dictionary.T, X.T, Xy=cov) new_code = lasso_lars.coef_ finally: np.seterr(**err_mgt) elif algorithm == 'lasso_cd': alpha = float(regularization) / n_features # account for scaling # TODO: Make verbosity argument for Lasso? # sklearn.linear_model.coordinate_descent.enet_path has a verbosity # argument that we could pass in from Lasso. clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False, precompute=gram, max_iter=max_iter, warm_start=True) if init is not None: clf.coef_ = init clf.fit(dictionary.T, X.T, check_input=check_input) new_code = clf.coef_ elif algorithm == 'lars': try: err_mgt = np.seterr(all='ignore') # Not passing in verbose=max(0, verbose-1) because Lars.fit already # corrects the verbosity level. lars = Lars(fit_intercept=False, verbose=verbose, normalize=False, precompute=gram, n_nonzero_coefs=int(regularization), fit_path=False) lars.fit(dictionary.T, X.T, Xy=cov) new_code = lars.coef_ finally: np.seterr(**err_mgt) elif algorithm == 'threshold': new_code = ((np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T) elif algorithm == 'omp': # TODO: Should verbose argument be passed to this? new_code = orthogonal_mp_gram( Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization), tol=None, norms_squared=row_norms(X, squared=True), copy_Xy=copy_cov).T else: raise ValueError('Sparse coding method must be "lasso_lars" ' '"lasso_cd", "lasso", "threshold" or "omp", got %s.' % algorithm) if new_code.ndim != 2: return new_code.reshape(n_samples, n_components) return new_code # XXX : could be moved to the linear_model module def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None, max_iter=1000, n_jobs=1, check_input=True, verbose=0): """Sparse coding Each row of the result is the solution to a sparse coding problem. The goal is to find a sparse array `code` such that:: X ~= code * dictionary Read more in the :ref:`User Guide <SparseCoder>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix dictionary : array of shape (n_components, n_features) The dictionary matrix against which to solve the sparse coding of the data. Some of the algorithms assume normalized rows for meaningful output. gram : array, shape=(n_components, n_components) Precomputed Gram matrix, dictionary * dictionary' cov : array, shape=(n_components, n_samples) Precomputed covariance, dictionary' * X algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'} lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection dictionary * X' n_nonzero_coefs : int, 0.1 * n_features by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. copy_cov : boolean, optional Whether to copy the precomputed covariance matrix; if False, it may be overwritten. init : array of shape (n_samples, n_components) Initialization value of the sparse codes. Only used if `algorithm='lasso_cd'`. max_iter : int, 1000 by default Maximum number of iterations to perform if `algorithm='lasso_cd'`. n_jobs : int, optional Number of parallel jobs to run. check_input : boolean, optional If False, the input arrays X and dictionary will not be checked. verbose : int, optional Controls the verbosity; the higher, the more messages. Defaults to 0. Returns ------- code : array of shape (n_samples, n_components) The sparse codes See also -------- sklearn.linear_model.lars_path sklearn.linear_model.orthogonal_mp sklearn.linear_model.Lasso SparseCoder """ if check_input: if algorithm == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype='float64') X = check_array(X, order='C', dtype='float64') else: dictionary = check_array(dictionary) X = check_array(X) n_samples, n_features = X.shape n_components = dictionary.shape[0] if gram is None and algorithm != 'threshold': gram = np.dot(dictionary, dictionary.T) if cov is None and algorithm != 'lasso_cd': copy_cov = False cov = np.dot(dictionary, X.T) if algorithm in ('lars', 'omp'): regularization = n_nonzero_coefs if regularization is None: regularization = min(max(n_features / 10, 1), n_components) else: regularization = alpha if regularization is None: regularization = 1. if n_jobs == 1 or algorithm == 'threshold': code = _sparse_encode(X, dictionary, gram, cov=cov, algorithm=algorithm, regularization=regularization, copy_cov=copy_cov, init=init, max_iter=max_iter, check_input=False, verbose=verbose) return code # Enter parallel code block code = np.empty((n_samples, n_components)) slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs))) code_views = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_sparse_encode)( X[this_slice], dictionary, gram, cov[:, this_slice] if cov is not None else None, algorithm, regularization=regularization, copy_cov=copy_cov, init=init[this_slice] if init is not None else None, max_iter=max_iter, check_input=False) for this_slice in slices) for this_slice, this_view in zip(slices, code_views): code[this_slice] = this_view return code def _update_dict(dictionary, Y, code, verbose=False, return_r2=False, random_state=None): """Update the dense dictionary factor in place. Parameters ---------- dictionary : array of shape (n_features, n_components) Value of the dictionary at the previous iteration. Y : array of shape (n_features, n_samples) Data matrix. code : array of shape (n_components, n_samples) Sparse coding of the data against which to optimize the dictionary. verbose: Degree of output the procedure will print. return_r2 : bool Whether to compute and return the residual sum of squares corresponding to the computed solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- dictionary : array of shape (n_features, n_components) Updated dictionary. """ n_components = len(code) n_samples = Y.shape[0] random_state = check_random_state(random_state) # Residuals, computed 'in-place' for efficiency R = -np.dot(dictionary, code) R += Y R = np.asfortranarray(R) ger, = linalg.get_blas_funcs(('ger',), (dictionary, code)) for k in range(n_components): # R <- 1.0 * U_k * V_k^T + R R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) dictionary[:, k] = np.dot(R, code[k, :].T) # Scale k'th atom atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k]) if atom_norm_square < 1e-20: if verbose == 1: sys.stdout.write("+") sys.stdout.flush() elif verbose: print("Adding new random atom") dictionary[:, k] = random_state.randn(n_samples) # Setting corresponding coefs to 0 code[k, :] = 0.0 dictionary[:, k] /= sqrt(np.dot(dictionary[:, k], dictionary[:, k])) else: dictionary[:, k] /= sqrt(atom_norm_square) # R <- -1.0 * U_k * V_k^T + R R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) if return_r2: R **= 2 # R is fortran-ordered. For numpy version < 1.6, sum does not # follow the quick striding first, and is thus inefficient on # fortran ordered data. We take a flat view of the data with no # striding R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,)) R = np.sum(R) return dictionary, R return dictionary def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8, method='lars', n_jobs=1, dict_init=None, code_init=None, callback=None, verbose=False, random_state=None, return_n_iter=False): """Solves a dictionary learning matrix factorization problem. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : int, Sparsity controlling parameter. max_iter : int, Maximum number of iterations to perform. tol : float, Tolerance for the stopping condition. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int, Number of parallel jobs to run, or -1 to autodetect. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. code_init : array of shape (n_samples, n_components), Initial value for the sparse code for warm restart scenarios. callback : callable or None, optional (default: None) Callable that gets invoked every five iterations verbose : bool, optional (default: False) To control the verbosity of the procedure. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool Whether or not to return the number of iterations. Returns ------- code : array of shape (n_samples, n_components) The sparse code factor in the matrix factorization. dictionary : array of shape (n_components, n_features), The dictionary factor in the matrix factorization. errors : array Vector of errors at each iteration. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to True. See also -------- dict_learning_online DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if method not in ('lars', 'cd'): raise ValueError('Coding method %r not supported as a fit algorithm.' % method) method = 'lasso_' + method t0 = time.time() # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) if n_jobs == -1: n_jobs = cpu_count() # Init the code and the dictionary with SVD of Y if code_init is not None and dict_init is not None: code = np.array(code_init, order='F') # Don't copy V, it will happen below dictionary = dict_init else: code, S, dictionary = linalg.svd(X, full_matrices=False) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: # True even if n_components=None code = code[:, :n_components] dictionary = dictionary[:n_components, :] else: code = np.c_[code, np.zeros((len(code), n_components - r))] dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] # Fortran-order dict, as we are going to access its row vectors dictionary = np.array(dictionary, order='F') residuals = 0 errors = [] current_cost = np.nan if verbose == 1: print('[dict_learning]', end=' ') # If max_iter is 0, number of iterations returned should be zero ii = -1 for ii in range(max_iter): dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: print("Iteration % 3i " "(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" % (ii, dt, dt / 60, current_cost)) # Update code code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha, init=code, n_jobs=n_jobs) # Update dictionary dictionary, residuals = _update_dict(dictionary.T, X.T, code.T, verbose=verbose, return_r2=True, random_state=random_state) dictionary = dictionary.T # Cost function current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code)) errors.append(current_cost) if ii > 0: dE = errors[-2] - errors[-1] # assert(dE >= -tol * errors[-1]) if dE < tol * errors[-1]: if verbose == 1: # A line return print("") elif verbose: print("--- Convergence reached after %d iterations" % ii) break if ii % 5 == 0 and callback is not None: callback(locals()) if return_n_iter: return code, dictionary, errors, ii + 1 else: return code, dictionary, errors def dict_learning_online(X, n_components=2, alpha=1, n_iter=100, return_code=True, dict_init=None, callback=None, batch_size=3, verbose=False, shuffle=True, n_jobs=1, method='lars', iter_offset=0, random_state=None, return_inner_stats=False, inner_stats=None, return_n_iter=False): """Solves a dictionary learning matrix factorization problem online. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. This is accomplished by repeatedly iterating over mini-batches by slicing the input data. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : float, Sparsity controlling parameter. n_iter : int, Number of iterations to perform. return_code : boolean, Whether to also return the code U or just the dictionary V. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. callback : callable or None, optional (default: None) callable that gets invoked every five iterations batch_size : int, The number of samples to take in each batch. verbose : bool, optional (default: False) To control the verbosity of the procedure. shuffle : boolean, Whether to shuffle the data before splitting it in batches. n_jobs : int, Number of parallel jobs to run, or -1 to autodetect. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. iter_offset : int, default 0 Number of previous iterations completed on the dictionary used for initialization. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_inner_stats : boolean, optional Return the inner statistics A (dictionary covariance) and B (data approximation). Useful to restart the algorithm in an online setting. If return_inner_stats is True, return_code is ignored inner_stats : tuple of (A, B) ndarrays Inner sufficient statistics that are kept by the algorithm. Passing them at initialization is useful in online settings, to avoid loosing the history of the evolution. A (n_components, n_components) is the dictionary covariance matrix. B (n_features, n_components) is the data approximation matrix return_n_iter : bool Whether or not to return the number of iterations. Returns ------- code : array of shape (n_samples, n_components), the sparse code (only returned if `return_code=True`) dictionary : array of shape (n_components, n_features), the solutions to the dictionary learning problem n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to `True`. See also -------- dict_learning DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if n_components is None: n_components = X.shape[1] if method not in ('lars', 'cd'): raise ValueError('Coding method not supported as a fit algorithm.') method = 'lasso_' + method t0 = time.time() n_samples, n_features = X.shape # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) if n_jobs == -1: n_jobs = cpu_count() # Init V with SVD of X if dict_init is not None: dictionary = dict_init else: _, S, dictionary = randomized_svd(X, n_components, random_state=random_state) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: dictionary = dictionary[:n_components, :] else: dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] if verbose == 1: print('[dict_learning]', end=' ') if shuffle: X_train = X.copy() random_state.shuffle(X_train) else: X_train = X dictionary = check_array(dictionary.T, order='F', dtype=np.float64, copy=False) X_train = check_array(X_train, order='C', dtype=np.float64, copy=False) batches = gen_batches(n_samples, batch_size) batches = itertools.cycle(batches) # The covariance of the dictionary if inner_stats is None: A = np.zeros((n_components, n_components)) # The data approximation B = np.zeros((n_features, n_components)) else: A = inner_stats[0].copy() B = inner_stats[1].copy() # If n_iter is zero, we need to return zero. ii = iter_offset - 1 for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches): this_X = X_train[batch] dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: if verbose > 10 or ii % ceil(100. / verbose) == 0: print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)" % (ii, dt, dt / 60)) this_code = sparse_encode(this_X, dictionary.T, algorithm=method, alpha=alpha, n_jobs=n_jobs).T # Update the auxiliary variables if ii < batch_size - 1: theta = float((ii + 1) * batch_size) else: theta = float(batch_size ** 2 + ii + 1 - batch_size) beta = (theta + 1 - batch_size) / (theta + 1) A *= beta A += np.dot(this_code, this_code.T) B *= beta B += np.dot(this_X.T, this_code.T) # Update dictionary dictionary = _update_dict(dictionary, B, A, verbose=verbose, random_state=random_state) # XXX: Can the residuals be of any use? # Maybe we need a stopping criteria based on the amount of # modification in the dictionary if callback is not None: callback(locals()) if return_inner_stats: if return_n_iter: return dictionary.T, (A, B), ii - iter_offset + 1 else: return dictionary.T, (A, B) if return_code: if verbose > 1: print('Learning code...', end=' ') elif verbose == 1: print('|', end=' ') code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha, n_jobs=n_jobs, check_input=False) if verbose > 1: dt = (time.time() - t0) print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)) if return_n_iter: return code, dictionary.T, ii - iter_offset + 1 else: return code, dictionary.T if return_n_iter: return dictionary.T, ii - iter_offset + 1 else: return dictionary.T class SparseCodingMixin(TransformerMixin): """Sparse coding mixin""" def _set_sparse_coding_params(self, n_components, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, split_sign=False, n_jobs=1): self.n_components = n_components self.transform_algorithm = transform_algorithm self.transform_n_nonzero_coefs = transform_n_nonzero_coefs self.transform_alpha = transform_alpha self.split_sign = split_sign self.n_jobs = n_jobs def transform(self, X): """Encode the data as a sparse combination of the dictionary atoms. Coding method is determined by the object parameter `transform_algorithm`. Parameters ---------- X : array of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : array, shape (n_samples, n_components) Transformed data """ check_is_fitted(self, 'components_') X = check_array(X) n_samples, n_features = X.shape code = sparse_encode( X, self.components_, algorithm=self.transform_algorithm, n_nonzero_coefs=self.transform_n_nonzero_coefs, alpha=self.transform_alpha, n_jobs=self.n_jobs) if self.split_sign: # feature vector is split into a positive and negative side n_samples, n_features = code.shape split_code = np.empty((n_samples, 2 * n_features)) split_code[:, :n_features] = np.maximum(code, 0) split_code[:, n_features:] = -np.minimum(code, 0) code = split_code return code class SparseCoder(BaseEstimator, SparseCodingMixin): """Sparse coding Finds a sparse representation of data against a fixed, precomputed dictionary. Each row of the result is the solution to a sparse coding problem. The goal is to find a sparse array `code` such that:: X ~= code * dictionary Read more in the :ref:`User Guide <SparseCoder>`. Parameters ---------- dictionary : array, [n_components, n_features] The dictionary atoms used for sparse coding. Lines are assumed to be normalized to unit norm. transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data: lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection ``dictionary * X'`` transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. n_jobs : int, number of parallel jobs to run Attributes ---------- components_ : array, [n_components, n_features] The unchanged dictionary atoms See also -------- DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA sparse_encode """ _required_parameters = ["dictionary"] def __init__(self, dictionary, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, split_sign=False, n_jobs=1): self._set_sparse_coding_params(dictionary.shape[0], transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.components_ = dictionary def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. Parameters ---------- X : Ignored. y : Ignored. Returns ------- self : object Returns the object itself """ return self class DictionaryLearning(BaseEstimator, SparseCodingMixin): """Dictionary learning Finds a dictionary (a set of atoms) that can best be used to represent data using a sparse code. Solves the optimization problem:: (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- n_components : int, number of dictionary elements to extract alpha : float, sparsity controlling parameter max_iter : int, maximum number of iterations to perform tol : float, tolerance for numerical error fit_algorithm : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. .. versionadded:: 0.17 *cd* coordinate descent method to improve speed. transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection ``dictionary * X'`` .. versionadded:: 0.17 *lasso_cd* coordinate descent method to improve speed. transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. n_jobs : int, number of parallel jobs to run code_init : array of shape (n_samples, n_components), initial value for the code, for warm restart dict_init : array of shape (n_components, n_features), initial values for the dictionary, for warm restart verbose : bool, optional (default: False) To control the verbosity of the procedure. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] dictionary atoms extracted from the data error_ : array vector of errors at each iteration n_iter_ : int Number of iterations run. Notes ----- **References:** J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf) See also -------- SparseCoder MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8, fit_algorithm='lars', transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, n_jobs=1, code_init=None, dict_init=None, verbose=False, split_sign=False, random_state=None): self._set_sparse_coding_params(n_components, transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.alpha = alpha self.max_iter = max_iter self.tol = tol self.fit_algorithm = fit_algorithm self.code_init = code_init self.dict_init = dict_init self.verbose = verbose self.random_state = random_state def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the object itself """ random_state = check_random_state(self.random_state) X = check_array(X) if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components V, U, E, self.n_iter_ = dict_learning( X, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, code_init=self.code_init, dict_init=self.dict_init, verbose=self.verbose, random_state=random_state, return_n_iter=True) self.components_ = U self.error_ = E return self class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin): """Mini-batch dictionary learning Finds a dictionary (a set of atoms) that can best be used to represent data using a sparse code. Solves the optimization problem:: (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- n_components : int, number of dictionary elements to extract alpha : float, sparsity controlling parameter n_iter : int, total number of iterations to perform fit_algorithm : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int, number of parallel jobs to run batch_size : int, number of samples in each mini-batch shuffle : bool, whether to shuffle the samples before forming batches dict_init : array of shape (n_components, n_features), initial value of the dictionary for warm restart scenarios transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data. lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection dictionary * X' transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. verbose : bool, optional (default: False) To control the verbosity of the procedure. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] components extracted from the data inner_stats_ : tuple of (A, B) ndarrays Internal sufficient statistics that are kept by the algorithm. Keeping them is useful in online settings, to avoid loosing the history of the evolution, but they shouldn't have any use for the end user. A (n_components, n_components) is the dictionary covariance matrix. B (n_features, n_components) is the data approximation matrix n_iter_ : int Number of iterations run. Notes ----- **References:** J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf) See also -------- SparseCoder DictionaryLearning SparsePCA MiniBatchSparsePCA """ def __init__(self, n_components=None, alpha=1, n_iter=1000, fit_algorithm='lars', n_jobs=1, batch_size=3, shuffle=True, dict_init=None, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, verbose=False, split_sign=False, random_state=None): self._set_sparse_coding_params(n_components, transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.alpha = alpha self.n_iter = n_iter self.fit_algorithm = fit_algorithm self.dict_init = dict_init self.verbose = verbose self.shuffle = shuffle self.batch_size = batch_size self.split_sign = split_sign self.random_state = random_state def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ random_state = check_random_state(self.random_state) X = check_array(X) U, (A, B), self.n_iter_ = dict_learning_online( X, self.n_components, self.alpha, n_iter=self.n_iter, return_code=False, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=self.dict_init, batch_size=self.batch_size, shuffle=self.shuffle, verbose=self.verbose, random_state=random_state, return_inner_stats=True, return_n_iter=True) self.components_ = U # Keep track of the state of the algorithm to be able to do # some online fitting (partial_fit) self.inner_stats_ = (A, B) self.iter_offset_ = self.n_iter return self def partial_fit(self, X, y=None, iter_offset=None): """Updates the model using the data in X as a mini-batch. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored. iter_offset : integer, optional The number of iteration on data batches that has been performed before this call to partial_fit. This is optional: if no number is passed, the memory of the object is used. Returns ------- self : object Returns the instance itself. """ if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) X = check_array(X) if hasattr(self, 'components_'): dict_init = self.components_ else: dict_init = self.dict_init inner_stats = getattr(self, 'inner_stats_', None) if iter_offset is None: iter_offset = getattr(self, 'iter_offset_', 0) U, (A, B) = dict_learning_online( X, self.n_components, self.alpha, n_iter=self.n_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=dict_init, batch_size=len(X), shuffle=False, verbose=self.verbose, return_code=False, iter_offset=iter_offset, random_state=self.random_state_, return_inner_stats=True, inner_stats=inner_stats) self.components_ = U # Keep track of the state of the algorithm to be able to do # some online fitting (partial_fit) self.inner_stats_ = (A, B) self.iter_offset_ = iter_offset + self.n_iter return self
48,029
35.139955
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/online_lda.py
""" ============================================================= Online Latent Dirichlet Allocation with variational inference ============================================================= This implementation is modified from Matthew D. Hoffman's onlineldavb code Link: http://matthewdhoffman.com/code/onlineldavb.tar """ # Author: Chyi-Kwei Yau # Author: Matthew D. Hoffman (original onlineldavb implementation) import numpy as np import scipy.sparse as sp from scipy.special import gammaln import warnings from ..base import BaseEstimator, TransformerMixin from ..utils import (check_random_state, check_array, gen_batches, gen_even_slices, _get_n_jobs) from ..utils.fixes import logsumexp from ..utils.validation import check_non_negative from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange from ..exceptions import NotFittedError from ._online_lda import (mean_change, _dirichlet_expectation_1d, _dirichlet_expectation_2d) EPS = np.finfo(np.float).eps def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior, max_iters, mean_change_tol, cal_sstats, random_state): """E-step: update document-topic distribution. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. exp_topic_word_distr : dense matrix, shape=(n_topics, n_features) Exponential value of expection of log topic word distribution. In the literature, this is `exp(E[log(beta)])`. doc_topic_prior : float Prior of document topic distribution `theta`. max_iters : int Max number of iterations for updating document topic distribution in the E-step. mean_change_tol : float Stopping tolerance for updating document topic distribution in E-setp. cal_sstats : boolean Parameter that indicate to calculate sufficient statistics or not. Set `cal_sstats` to `True` when we need to run M-step. random_state : RandomState instance or None Parameter that indicate how to initialize document topic distribution. Set `random_state` to None will initialize document topic distribution to a constant number. Returns ------- (doc_topic_distr, suff_stats) : `doc_topic_distr` is unnormalized topic distribution for each document. In the literature, this is `gamma`. we can calculate `E[log(theta)]` from it. `suff_stats` is expected sufficient statistics for the M-step. When `cal_sstats == False`, this will be None. """ is_sparse_x = sp.issparse(X) n_samples, n_features = X.shape n_topics = exp_topic_word_distr.shape[0] if random_state: doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics)) else: doc_topic_distr = np.ones((n_samples, n_topics)) # In the literature, this is `exp(E[log(theta)])` exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr)) # diff on `component_` (only calculate it when `cal_diff` is True) suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None if is_sparse_x: X_data = X.data X_indices = X.indices X_indptr = X.indptr for idx_d in xrange(n_samples): if is_sparse_x: ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]] cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]] else: ids = np.nonzero(X[idx_d, :])[0] cnts = X[idx_d, ids] doc_topic_d = doc_topic_distr[idx_d, :] # The next one is a copy, since the inner loop overwrites it. exp_doc_topic_d = exp_doc_topic[idx_d, :].copy() exp_topic_word_d = exp_topic_word_distr[:, ids] # Iterate between `doc_topic_d` and `norm_phi` until convergence for _ in xrange(0, max_iters): last_d = doc_topic_d # The optimal phi_{dwk} is proportional to # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]). norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS doc_topic_d = (exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)) # Note: adds doc_topic_prior to doc_topic_d, in-place. _dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d) if mean_change(last_d, doc_topic_d) < mean_change_tol: break doc_topic_distr[idx_d, :] = doc_topic_d # Contribution of document d to the expected sufficient # statistics for the M step. if cal_sstats: norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi) return (doc_topic_distr, suff_stats) class LatentDirichletAllocation(BaseEstimator, TransformerMixin): """Latent Dirichlet Allocation with online variational Bayes algorithm .. versionadded:: 0.17 Read more in the :ref:`User Guide <LatentDirichletAllocation>`. Parameters ---------- n_components : int, optional (default=10) Number of topics. doc_topic_prior : float, optional (default=None) Prior of document topic distribution `theta`. If the value is None, defaults to `1 / n_components`. In the literature, this is called `alpha`. topic_word_prior : float, optional (default=None) Prior of topic word distribution `beta`. If the value is None, defaults to `1 / n_components`. In the literature, this is called `eta`. learning_method : 'batch' | 'online', default='online' Method used to update `_component`. Only used in `fit` method. In general, if the data size is large, the online update will be much faster than the batch update. The default learning method is going to be changed to 'batch' in the 0.20 release. Valid options:: 'batch': Batch variational Bayes method. Use all training data in each EM update. Old `components_` will be overwritten in each iteration. 'online': Online variational Bayes method. In each EM update, use mini-batch of training data to update the ``components_`` variable incrementally. The learning rate is controlled by the ``learning_decay`` and the ``learning_offset`` parameters. learning_decay : float, optional (default=0.7) It is a parameter that control learning rate in the online learning method. The value should be set between (0.5, 1.0] to guarantee asymptotic convergence. When the value is 0.0 and batch_size is ``n_samples``, the update method is same as batch learning. In the literature, this is called kappa. learning_offset : float, optional (default=10.) A (positive) parameter that downweights early iterations in online learning. It should be greater than 1.0. In the literature, this is called tau_0. max_iter : integer, optional (default=10) The maximum number of iterations. batch_size : int, optional (default=128) Number of documents to use in each EM iteration. Only used in online learning. evaluate_every : int, optional (default=0) How often to evaluate perplexity. Only used in `fit` method. set it to 0 or negative number to not evalute perplexity in training at all. Evaluating perplexity can help you check convergence in training process, but it will also increase total training time. Evaluating perplexity in every iteration might increase training time up to two-fold. total_samples : int, optional (default=1e6) Total number of documents. Only used in the `partial_fit` method. perp_tol : float, optional (default=1e-1) Perplexity tolerance in batch learning. Only used when ``evaluate_every`` is greater than 0. mean_change_tol : float, optional (default=1e-3) Stopping tolerance for updating document topic distribution in E-step. max_doc_update_iter : int (default=100) Max number of iterations for updating document topic distribution in the E-step. n_jobs : int, optional (default=1) The number of jobs to use in the E-step. If -1, all CPUs are used. For ``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used. verbose : int, optional (default=0) Verbosity level. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. n_topics : int, optional (default=None) This parameter has been renamed to n_components and will be removed in version 0.21. .. deprecated:: 0.19 Attributes ---------- components_ : array, [n_components, n_features] Variational parameters for topic word distribution. Since the complete conditional for topic word distribution is a Dirichlet, ``components_[i, j]`` can be viewed as pseudocount that represents the number of times word `j` was assigned to topic `i`. It can also be viewed as distribution over the words for each topic after normalization: ``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``. n_batch_iter_ : int Number of iterations of the EM step. n_iter_ : int Number of passes over the dataset. References ---------- [1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman, David M. Blei, Francis Bach, 2010 [2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei, Chong Wang, John Paisley, 2013 [3] Matthew D. Hoffman's onlineldavb code. Link: http://matthewdhoffman.com//code/onlineldavb.tar """ def __init__(self, n_components=10, doc_topic_prior=None, topic_word_prior=None, learning_method=None, learning_decay=.7, learning_offset=10., max_iter=10, batch_size=128, evaluate_every=-1, total_samples=1e6, perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100, n_jobs=1, verbose=0, random_state=None, n_topics=None): self.n_components = n_components self.doc_topic_prior = doc_topic_prior self.topic_word_prior = topic_word_prior self.learning_method = learning_method self.learning_decay = learning_decay self.learning_offset = learning_offset self.max_iter = max_iter self.batch_size = batch_size self.evaluate_every = evaluate_every self.total_samples = total_samples self.perp_tol = perp_tol self.mean_change_tol = mean_change_tol self.max_doc_update_iter = max_doc_update_iter self.n_jobs = n_jobs self.verbose = verbose self.random_state = random_state self.n_topics = n_topics def _check_params(self): """Check model parameters.""" if self.n_topics is not None: self._n_components = self.n_topics warnings.warn("n_topics has been renamed to n_components in " "version 0.19 and will be removed in 0.21", DeprecationWarning) else: self._n_components = self.n_components if self._n_components <= 0: raise ValueError("Invalid 'n_components' parameter: %r" % self._n_components) if self.total_samples <= 0: raise ValueError("Invalid 'total_samples' parameter: %r" % self.total_samples) if self.learning_offset < 0: raise ValueError("Invalid 'learning_offset' parameter: %r" % self.learning_offset) if self.learning_method not in ("batch", "online", None): raise ValueError("Invalid 'learning_method' parameter: %r" % self.learning_method) def _init_latent_vars(self, n_features): """Initialize latent variables.""" self.random_state_ = check_random_state(self.random_state) self.n_batch_iter_ = 1 self.n_iter_ = 0 if self.doc_topic_prior is None: self.doc_topic_prior_ = 1. / self._n_components else: self.doc_topic_prior_ = self.doc_topic_prior if self.topic_word_prior is None: self.topic_word_prior_ = 1. / self._n_components else: self.topic_word_prior_ = self.topic_word_prior init_gamma = 100. init_var = 1. / init_gamma # In the literature, this is called `lambda` self.components_ = self.random_state_.gamma( init_gamma, init_var, (self._n_components, n_features)) # In the literature, this is `exp(E[log(beta)])` self.exp_dirichlet_component_ = np.exp( _dirichlet_expectation_2d(self.components_)) def _e_step(self, X, cal_sstats, random_init, parallel=None): """E-step in EM update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. cal_sstats : boolean Parameter that indicate whether to calculate sufficient statistics or not. Set ``cal_sstats`` to True when we need to run M-step. random_init : boolean Parameter that indicate whether to initialize document topic distribution randomly in the E-step. Set it to True in training steps. parallel : joblib.Parallel (optional) Pre-initialized instance of joblib.Parallel. Returns ------- (doc_topic_distr, suff_stats) : `doc_topic_distr` is unnormalized topic distribution for each document. In the literature, this is called `gamma`. `suff_stats` is expected sufficient statistics for the M-step. When `cal_sstats == False`, it will be None. """ # Run e-step in parallel random_state = self.random_state_ if random_init else None # TODO: make Parallel._effective_n_jobs public instead? n_jobs = _get_n_jobs(self.n_jobs) if parallel is None: parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) results = parallel( delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, cal_sstats, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)) # merge result doc_topics, sstats_list = zip(*results) doc_topic_distr = np.vstack(doc_topics) if cal_sstats: # This step finishes computing the sufficient statistics for the # M-step. suff_stats = np.zeros(self.components_.shape) for sstats in sstats_list: suff_stats += sstats suff_stats *= self.exp_dirichlet_component_ else: suff_stats = None return (doc_topic_distr, suff_stats) def _em_step(self, X, total_samples, batch_update, parallel=None): """EM update for 1 iteration. update `_component` by batch VB or online VB. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. total_samples : integer Total number of documents. It is only used when batch_update is `False`. batch_update : boolean Parameter that controls updating method. `True` for batch learning, `False` for online learning. parallel : joblib.Parallel Pre-initialized instance of joblib.Parallel Returns ------- doc_topic_distr : array, shape=(n_samples, n_components) Unnormalized document topic distribution. """ # E-step _, suff_stats = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel) # M-step if batch_update: self.components_ = self.topic_word_prior_ + suff_stats else: # online update # In the literature, the weight is `rho` weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay) doc_ratio = float(total_samples) / X.shape[0] self.components_ *= (1 - weight) self.components_ += (weight * (self.topic_word_prior_ + doc_ratio * suff_stats)) # update `component_` related variables self.exp_dirichlet_component_ = np.exp( _dirichlet_expectation_2d(self.components_)) self.n_batch_iter_ += 1 return def _check_non_neg_array(self, X, whom): """check X format check X format and make sure no negative value in X. Parameters ---------- X : array-like or sparse matrix """ X = check_array(X, accept_sparse='csr') check_non_negative(X, whom) return X def partial_fit(self, X, y=None): """Online VB with Mini-Batch update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. y : Ignored. Returns ------- self """ self._check_params() X = self._check_non_neg_array(X, "LatentDirichletAllocation.partial_fit") n_samples, n_features = X.shape batch_size = self.batch_size # initialize parameters or check if not hasattr(self, 'components_'): self._init_latent_vars(n_features) if n_features != self.components_.shape[1]: raise ValueError( "The provided data has %d dimensions while " "the model was trained with feature size %d." % (n_features, self.components_.shape[1])) n_jobs = _get_n_jobs(self.n_jobs) with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: for idx_slice in gen_batches(n_samples, batch_size): self._em_step(X[idx_slice, :], total_samples=self.total_samples, batch_update=False, parallel=parallel) return self def fit(self, X, y=None): """Learn model for the data X with variational Bayes method. When `learning_method` is 'online', use mini-batch update. Otherwise, use batch update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. y : Ignored. Returns ------- self """ self._check_params() X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit") n_samples, n_features = X.shape max_iter = self.max_iter evaluate_every = self.evaluate_every learning_method = self.learning_method if learning_method is None: warnings.warn("The default value for 'learning_method' will be " "changed from 'online' to 'batch' in the release " "0.20. This warning was introduced in 0.18.", DeprecationWarning) learning_method = 'online' batch_size = self.batch_size # initialize parameters self._init_latent_vars(n_features) # change to perplexity later last_bound = None n_jobs = _get_n_jobs(self.n_jobs) with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: for i in xrange(max_iter): if learning_method == 'online': for idx_slice in gen_batches(n_samples, batch_size): self._em_step(X[idx_slice, :], total_samples=n_samples, batch_update=False, parallel=parallel) else: # batch update self._em_step(X, total_samples=n_samples, batch_update=True, parallel=parallel) # check perplexity if evaluate_every > 0 and (i + 1) % evaluate_every == 0: doc_topics_distr, _ = self._e_step(X, cal_sstats=False, random_init=False, parallel=parallel) bound = self._perplexity_precomp_distr(X, doc_topics_distr, sub_sampling=False) if self.verbose: print('iteration: %d of max_iter: %d, perplexity: %.4f' % (i + 1, max_iter, bound)) if last_bound and abs(last_bound - bound) < self.perp_tol: break last_bound = bound elif self.verbose: print('iteration: %d of max_iter: %d' % (i + 1, max_iter)) self.n_iter_ += 1 # calculate final perplexity value on train set doc_topics_distr, _ = self._e_step(X, cal_sstats=False, random_init=False, parallel=parallel) self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr, sub_sampling=False) return self def _unnormalized_transform(self, X): """Transform data X according to fitted model. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- doc_topic_distr : shape=(n_samples, n_components) Document topic distribution for X. """ if not hasattr(self, 'components_'): raise NotFittedError("no 'components_' attribute in model." " Please fit model first.") # make sure feature size is the same in fitted model and in X X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform") n_samples, n_features = X.shape if n_features != self.components_.shape[1]: raise ValueError( "The provided data has %d dimensions while " "the model was trained with feature size %d." % (n_features, self.components_.shape[1])) doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False) return doc_topic_distr def transform(self, X): """Transform data X according to the fitted model. .. versionchanged:: 0.18 *doc_topic_distr* is now normalized Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- doc_topic_distr : shape=(n_samples, n_components) Document topic distribution for X. """ doc_topic_distr = self._unnormalized_transform(X) doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis] return doc_topic_distr def _approx_bound(self, X, doc_topic_distr, sub_sampling): """Estimate the variational bound. Estimate the variational bound over "all documents" using only the documents passed in as X. Since log-likelihood of each word cannot be computed directly, we use this bound to estimate it. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. doc_topic_distr : array, shape=(n_samples, n_components) Document topic distribution. In the literature, this is called gamma. sub_sampling : boolean, optional, (default=False) Compensate for subsampling of documents. It is used in calculate bound in online learning. Returns ------- score : float """ def _loglikelihood(prior, distr, dirichlet_distr, size): # calculate log-likelihood score = np.sum((prior - distr) * dirichlet_distr) score += np.sum(gammaln(distr) - gammaln(prior)) score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1))) return score is_sparse_x = sp.issparse(X) n_samples, n_components = doc_topic_distr.shape n_features = self.components_.shape[1] score = 0 dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr) dirichlet_component_ = _dirichlet_expectation_2d(self.components_) doc_topic_prior = self.doc_topic_prior_ topic_word_prior = self.topic_word_prior_ if is_sparse_x: X_data = X.data X_indices = X.indices X_indptr = X.indptr # E[log p(docs | theta, beta)] for idx_d in xrange(0, n_samples): if is_sparse_x: ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]] cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]] else: ids = np.nonzero(X[idx_d, :])[0] cnts = X[idx_d, ids] temp = (dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]) norm_phi = logsumexp(temp, axis=0) score += np.dot(cnts, norm_phi) # compute E[log p(theta | alpha) - log q(theta | gamma)] score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self._n_components) # Compensate for the subsampling of the population of documents if sub_sampling: doc_ratio = float(self.total_samples) / n_samples score *= doc_ratio # E[log p(beta | eta) - log q (beta | lambda)] score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features) return score def score(self, X, y=None): """Calculate approximate log-likelihood as score. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. y : Ignored. Returns ------- score : float Use approximate bound as score. """ X = self._check_non_neg_array(X, "LatentDirichletAllocation.score") doc_topic_distr = self._unnormalized_transform(X) score = self._approx_bound(X, doc_topic_distr, sub_sampling=False) return score def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False): """Calculate approximate perplexity for data X with ability to accept precomputed doc_topic_distr Perplexity is defined as exp(-1. * log-likelihood per word) Parameters ---------- X : array-like or sparse matrix, [n_samples, n_features] Document word matrix. doc_topic_distr : None or array, shape=(n_samples, n_components) Document topic distribution. If it is None, it will be generated by applying transform on X. Returns ------- score : float Perplexity score. """ if not hasattr(self, 'components_'): raise NotFittedError("no 'components_' attribute in model." " Please fit model first.") X = self._check_non_neg_array(X, "LatentDirichletAllocation.perplexity") if doc_topic_distr is None: doc_topic_distr = self._unnormalized_transform(X) else: n_samples, n_components = doc_topic_distr.shape if n_samples != X.shape[0]: raise ValueError("Number of samples in X and doc_topic_distr" " do not match.") if n_components != self._n_components: raise ValueError("Number of topics does not match.") current_samples = X.shape[0] bound = self._approx_bound(X, doc_topic_distr, sub_sampling) if sub_sampling: word_cnt = X.sum() * (float(self.total_samples) / current_samples) else: word_cnt = X.sum() perword_bound = bound / word_cnt return np.exp(-1.0 * perword_bound) def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False): """Calculate approximate perplexity for data X. Perplexity is defined as exp(-1. * log-likelihood per word) .. versionchanged:: 0.19 *doc_topic_distr* argument has been deprecated and is ignored because user no longer has access to unnormalized distribution Parameters ---------- X : array-like or sparse matrix, [n_samples, n_features] Document word matrix. doc_topic_distr : None or array, shape=(n_samples, n_components) Document topic distribution. This argument is deprecated and is currently being ignored. .. deprecated:: 0.19 sub_sampling : bool Do sub-sampling or not. Returns ------- score : float Perplexity score. """ if doc_topic_distr != 'deprecated': warnings.warn("Argument 'doc_topic_distr' is deprecated and is " "being ignored as of 0.19. Support for this " "argument will be removed in 0.21.", DeprecationWarning) return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
31,056
36.920635
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/sparse_pca.py
"""Matrix factorization with Sparse PCA""" # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort # License: BSD 3 clause import warnings import numpy as np from ..utils import check_random_state, check_array from ..utils.validation import check_is_fitted from ..linear_model import ridge_regression from ..base import BaseEstimator, TransformerMixin from .dict_learning import dict_learning, dict_learning_online class SparsePCA(BaseEstimator, TransformerMixin): """Sparse Principal Components Analysis (SparsePCA) Finds the set of sparse components that can optimally reconstruct the data. The amount of sparseness is controllable by the coefficient of the L1 penalty, given by the parameter alpha. Read more in the :ref:`User Guide <SparsePCA>`. Parameters ---------- n_components : int, Number of sparse atoms to extract. alpha : float, Sparsity controlling parameter. Higher values lead to sparser components. ridge_alpha : float, Amount of ridge shrinkage to apply in order to improve conditioning when calling the transform method. max_iter : int, Maximum number of iterations to perform. tol : float, Tolerance for the stopping condition. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int, Number of parallel jobs to run. U_init : array of shape (n_samples, n_components), Initial values for the loadings for warm restart scenarios. V_init : array of shape (n_components, n_features), Initial values for the components for warm restart scenarios. verbose : int Controls the verbosity; the higher, the more messages. Defaults to 0. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] Sparse components extracted from the data. error_ : array Vector of errors at each iteration. n_iter_ : int Number of iterations run. See also -------- PCA MiniBatchSparsePCA DictionaryLearning """ def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01, max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None, V_init=None, verbose=False, random_state=None): self.n_components = n_components self.alpha = alpha self.ridge_alpha = ridge_alpha self.max_iter = max_iter self.tol = tol self.method = method self.n_jobs = n_jobs self.U_init = U_init self.V_init = V_init self.verbose = verbose self.random_state = random_state def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ random_state = check_random_state(self.random_state) X = check_array(X) if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components code_init = self.V_init.T if self.V_init is not None else None dict_init = self.U_init.T if self.U_init is not None else None Vt, _, E, self.n_iter_ = dict_learning(X.T, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.method, n_jobs=self.n_jobs, verbose=self.verbose, random_state=random_state, code_init=code_init, dict_init=dict_init, return_n_iter=True ) self.components_ = Vt.T self.error_ = E return self def transform(self, X, ridge_alpha='deprecated'): """Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the `ridge_alpha` parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : array of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. ridge_alpha : float, default: 0.01 Amount of ridge shrinkage to apply in order to improve conditioning. .. deprecated:: 0.19 This parameter will be removed in 0.21. Specify ``ridge_alpha`` in the ``SparsePCA`` constructor. Returns ------- X_new array, shape (n_samples, n_components) Transformed data. """ check_is_fitted(self, 'components_') X = check_array(X) if ridge_alpha != 'deprecated': warnings.warn("The ridge_alpha parameter on transform() is " "deprecated since 0.19 and will be removed in 0.21. " "Specify ridge_alpha in the SparsePCA constructor.", DeprecationWarning) if ridge_alpha is None: ridge_alpha = self.ridge_alpha else: ridge_alpha = self.ridge_alpha U = ridge_regression(self.components_.T, X.T, ridge_alpha, solver='cholesky') s = np.sqrt((U ** 2).sum(axis=0)) s[s == 0] = 1 U /= s return U class MiniBatchSparsePCA(SparsePCA): """Mini-batch Sparse Principal Components Analysis Finds the set of sparse components that can optimally reconstruct the data. The amount of sparseness is controllable by the coefficient of the L1 penalty, given by the parameter alpha. Read more in the :ref:`User Guide <SparsePCA>`. Parameters ---------- n_components : int, number of sparse atoms to extract alpha : int, Sparsity controlling parameter. Higher values lead to sparser components. ridge_alpha : float, Amount of ridge shrinkage to apply in order to improve conditioning when calling the transform method. n_iter : int, number of iterations to perform for each mini batch callback : callable or None, optional (default: None) callable that gets invoked every five iterations batch_size : int, the number of features to take in each mini batch verbose : int Controls the verbosity; the higher, the more messages. Defaults to 0. shuffle : boolean, whether to shuffle the data before splitting it in batches n_jobs : int, number of parallel jobs to run, or -1 to autodetect. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] Sparse components extracted from the data. error_ : array Vector of errors at each iteration. n_iter_ : int Number of iterations run. See also -------- PCA SparsePCA DictionaryLearning """ def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01, n_iter=100, callback=None, batch_size=3, verbose=False, shuffle=True, n_jobs=1, method='lars', random_state=None): super(MiniBatchSparsePCA, self).__init__( n_components=n_components, alpha=alpha, verbose=verbose, ridge_alpha=ridge_alpha, n_jobs=n_jobs, method=method, random_state=random_state) self.n_iter = n_iter self.callback = callback self.batch_size = batch_size self.shuffle = shuffle def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ random_state = check_random_state(self.random_state) X = check_array(X) if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components Vt, _, self.n_iter_ = dict_learning_online( X.T, n_components, alpha=self.alpha, n_iter=self.n_iter, return_code=True, dict_init=None, verbose=self.verbose, callback=self.callback, batch_size=self.batch_size, shuffle=self.shuffle, n_jobs=self.n_jobs, method=self.method, random_state=random_state, return_n_iter=True) self.components_ = Vt.T return self
10,589
33.721311
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/nmf.py
""" Non-negative matrix factorization """ # Author: Vlad Niculae # Lars Buitinck # Mathieu Blondel <[email protected]> # Tom Dupre la Tour # License: BSD 3 clause from __future__ import division, print_function from math import sqrt import warnings import numbers import time import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..utils import check_random_state, check_array from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm from ..utils.extmath import safe_min from ..utils.validation import check_is_fitted, check_non_negative from ..exceptions import ConvergenceWarning from .cdnmf_fast import _update_cdnmf_fast EPSILON = np.finfo(np.float32).eps INTEGER_TYPES = (numbers.Integral, np.integer) def norm(x): """Dot product-based Euclidean norm implementation See: http://fseoane.net/blog/2011/computing-the-vector-norm/ """ return sqrt(squared_norm(x)) def trace_dot(X, Y): """Trace of np.dot(X, Y.T).""" return np.dot(X.ravel(), Y.ravel()) def _check_init(A, shape, whom): A = check_array(A) if np.shape(A) != shape: raise ValueError('Array with wrong shape passed to %s. Expected %s, ' 'but got %s ' % (whom, shape, np.shape(A))) check_non_negative(A, whom) if np.max(A) == 0: raise ValueError('Array passed to %s is full of zeros.' % whom) def _beta_divergence(X, W, H, beta, square_root=False): """Compute the beta-divergence of X and dot(W, H). Parameters ---------- X : float or array-like, shape (n_samples, n_features) W : float or dense array-like, shape (n_samples, n_components) H : float or dense array-like, shape (n_components, n_features) beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'} Parameter of the beta-divergence. If beta == 2, this is half the Frobenius *squared* norm. If beta == 1, this is the generalized Kullback-Leibler divergence. If beta == 0, this is the Itakura-Saito divergence. Else, this is the general beta-divergence. square_root : boolean, default False If True, return np.sqrt(2 * res) For beta == 2, it corresponds to the Frobenius norm. Returns ------- res : float Beta divergence of X and np.dot(X, H) """ beta = _beta_loss_to_float(beta) # The method can be called with scalars if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) # Frobenius norm if beta == 2: # Avoid the creation of the dense np.dot(W, H) if X is sparse. if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H) cross_prod = trace_dot((X * H.T), W) res = (norm_X + norm_WH - 2. * cross_prod) / 2. else: res = squared_norm(X - np.dot(W, H)) / 2. if square_root: return np.sqrt(res * 2) else: return res if sp.issparse(X): # compute np.dot(W, H) only where X is nonzero WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() # do not affect the zeros: here 0 ** (-1) = 0 and not infinity WH_data = WH_data[X_data != 0] X_data = X_data[X_data != 0] # used to avoid division by zero WH_data[WH_data == 0] = EPSILON # generalized Kullback-Leibler divergence if beta == 1: # fast and memory efficient computation of np.sum(np.dot(W, H)) sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) # computes np.sum(X * log(X / WH)) only where X is nonzero div = X_data / WH_data res = np.dot(X_data, np.log(div)) # add full np.sum(np.dot(W, H)) - np.sum(X) res += sum_WH - X_data.sum() # Itakura-Saito divergence elif beta == 0: div = X_data / WH_data res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div)) # beta-divergence, beta not in (0, 1, 2) else: if sp.issparse(X): # slow loop, but memory efficient computation of : # np.sum(np.dot(W, H) ** beta) sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) else: sum_WH_beta = np.sum(WH ** beta) sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) res = (X_data ** beta).sum() - beta * sum_X_WH res += sum_WH_beta * (beta - 1) res /= beta * (beta - 1) if square_root: return np.sqrt(2 * res) else: return res def _special_sparse_dot(W, H, X): """Computes np.dot(W, H), only where X is non zero.""" if sp.issparse(X): ii, jj = X.nonzero() dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1) WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) return WH.tocsr() else: return np.dot(W, H) def _compute_regularization(alpha, l1_ratio, regularization): """Compute L1 and L2 regularization coefficients for W and H""" alpha_H = 0. alpha_W = 0. if regularization in ('both', 'components'): alpha_H = float(alpha) if regularization in ('both', 'transformation'): alpha_W = float(alpha) l1_reg_W = alpha_W * l1_ratio l1_reg_H = alpha_H * l1_ratio l2_reg_W = alpha_W * (1. - l1_ratio) l2_reg_H = alpha_H * (1. - l1_ratio) return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H def _check_string_param(solver, regularization, beta_loss, init): allowed_solver = ('cd', 'mu') if solver not in allowed_solver: raise ValueError( 'Invalid solver parameter: got %r instead of one of %r' % (solver, allowed_solver)) allowed_regularization = ('both', 'components', 'transformation', None) if regularization not in allowed_regularization: raise ValueError( 'Invalid regularization parameter: got %r instead of one of %r' % (regularization, allowed_regularization)) # 'mu' is the only solver that handles other beta losses than 'frobenius' if solver != 'mu' and beta_loss not in (2, 'frobenius'): raise ValueError( 'Invalid beta_loss parameter: solver %r does not handle beta_loss' ' = %r' % (solver, beta_loss)) if solver == 'mu' and init == 'nndsvd': warnings.warn("The multiplicative update ('mu') solver cannot update " "zeros present in the initialization, and so leads to " "poorer results when used jointly with init='nndsvd'. " "You may try init='nndsvda' or init='nndsvdar' instead.", UserWarning) beta_loss = _beta_loss_to_float(beta_loss) return beta_loss def _beta_loss_to_float(beta_loss): """Convert string beta_loss to float""" allowed_beta_loss = {'frobenius': 2, 'kullback-leibler': 1, 'itakura-saito': 0} if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss: beta_loss = allowed_beta_loss[beta_loss] if not isinstance(beta_loss, numbers.Number): raise ValueError('Invalid beta_loss parameter: got %r instead ' 'of one of %r, or a float.' % (beta_loss, allowed_beta_loss.keys())) return beta_loss def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None): """Algorithms for NMF initialization. Computes an initial guess for the non-negative rank k matrix approximation for X: X = WH Parameters ---------- X : array-like, shape (n_samples, n_features) The data matrix to be decomposed. n_components : integer The number of components desired in the approximation. init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' Method used to initialize the procedure. Default: 'nndsvd' if n_components < n_features, otherwise 'random'. Valid options: - 'random': non-negative random matrices, scaled with: sqrt(X.mean() / n_components) - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) initialization (better for sparseness) - 'nndsvda': NNDSVD with zeros filled with the average of X (better when sparsity is not desired) - 'nndsvdar': NNDSVD with zeros filled with small random values (generally faster, less accurate alternative to NNDSVDa for when sparsity is not desired) - 'custom': use custom matrices W and H eps : float Truncate all values less then this in output to zero. random_state : int, RandomState instance or None, optional, default: None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``random`` == 'nndsvdar' or 'random'. Returns ------- W : array-like, shape (n_samples, n_components) Initial guesses for solving X ~= WH H : array-like, shape (n_components, n_features) Initial guesses for solving X ~= WH References ---------- C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for nonnegative matrix factorization - Pattern Recognition, 2008 http://tinyurl.com/nndsvd """ check_non_negative(X, "NMF initialization") n_samples, n_features = X.shape if init is None: if n_components < n_features: init = 'nndsvd' else: init = 'random' # Random initialization if init == 'random': avg = np.sqrt(X.mean() / n_components) rng = check_random_state(random_state) H = avg * rng.randn(n_components, n_features) W = avg * rng.randn(n_samples, n_components) # we do not write np.abs(H, out=H) to stay compatible with # numpy 1.5 and earlier where the 'out' keyword is not # supported as a kwarg on ufuncs np.abs(H, H) np.abs(W, W) return W, H # NNDSVD initialization U, S, V = randomized_svd(X, n_components, random_state=random_state) W, H = np.zeros(U.shape), np.zeros(V.shape) # The leading singular triplet is non-negative # so it can be used as is for initialization. W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0]) H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :]) for j in range(1, n_components): x, y = U[:, j], V[j, :] # extract positive and negative parts of column vectors x_p, y_p = np.maximum(x, 0), np.maximum(y, 0) x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)) # and their norms x_p_nrm, y_p_nrm = norm(x_p), norm(y_p) x_n_nrm, y_n_nrm = norm(x_n), norm(y_n) m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm # choose update if m_p > m_n: u = x_p / x_p_nrm v = y_p / y_p_nrm sigma = m_p else: u = x_n / x_n_nrm v = y_n / y_n_nrm sigma = m_n lbd = np.sqrt(S[j] * sigma) W[:, j] = lbd * u H[j, :] = lbd * v W[W < eps] = 0 H[H < eps] = 0 if init == "nndsvd": pass elif init == "nndsvda": avg = X.mean() W[W == 0] = avg H[H == 0] = avg elif init == "nndsvdar": rng = check_random_state(random_state) avg = X.mean() W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100) H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100) else: raise ValueError( 'Invalid init parameter: got %r instead of one of %r' % (init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar'))) return W, H def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state): """Helper function for _fit_coordinate_descent Update W to minimize the objective function, iterating once over all coordinates. By symmetry, to update H, one can call _update_coordinate_descent(X.T, Ht, W, ...) """ n_components = Ht.shape[1] HHt = np.dot(Ht.T, Ht) XHt = safe_sparse_dot(X, Ht) # L2 regularization corresponds to increase of the diagonal of HHt if l2_reg != 0.: # adds l2_reg only on the diagonal HHt.flat[::n_components + 1] += l2_reg # L1 regularization corresponds to decrease of each element of XHt if l1_reg != 0.: XHt -= l1_reg if shuffle: permutation = random_state.permutation(n_components) else: permutation = np.arange(n_components) # The following seems to be required on 64-bit Windows w/ Python 3.5. permutation = np.asarray(permutation, dtype=np.intp) return _update_cdnmf_fast(W, HHt, XHt, permutation) def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0, shuffle=False, random_state=None): """Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent The objective function is minimized with an alternating minimization of W and H. Each minimization is done with a cyclic (up to a permutation of the features) Coordinate Descent. Parameters ---------- X : array-like, shape (n_samples, n_features) Constant matrix. W : array-like, shape (n_samples, n_components) Initial guess for the solution. H : array-like, shape (n_components, n_features) Initial guess for the solution. tol : float, default: 1e-4 Tolerance of the stopping condition. max_iter : integer, default: 200 Maximum number of iterations before timing out. l1_reg_W : double, default: 0. L1 regularization parameter for W. l1_reg_H : double, default: 0. L1 regularization parameter for H. l2_reg_W : double, default: 0. L2 regularization parameter for W. l2_reg_H : double, default: 0. L2 regularization parameter for H. update_H : boolean, default: True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : integer, default: 0 The verbosity level. shuffle : boolean, default: False If true, randomize the order of coordinates in the CD solver. random_state : int, RandomState instance or None, optional, default: None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- W : array-like, shape (n_samples, n_components) Solution to the non-negative least squares problem. H : array-like, shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for large scale nonnegative matrix and tensor factorizations." IEICE transactions on fundamentals of electronics, communications and computer sciences 92.3: 708-721, 2009. """ # so W and Ht are both in C order in memory Ht = check_array(H.T, order='C') X = check_array(X, accept_sparse='csr') rng = check_random_state(random_state) for n_iter in range(max_iter): violation = 0. # Update W violation += _update_coordinate_descent(X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng) # Update H if update_H: violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng) if n_iter == 0: violation_init = violation if violation_init == 0: break if verbose: print("violation:", violation / violation_init) if violation / violation_init <= tol: if verbose: print("Converged at iteration", n_iter + 1) break return W, Ht.T, n_iter def _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma, H_sum=None, HHt=None, XHt=None, update_H=True): """update W in Multiplicative Update NMF""" if beta_loss == 2: # Numerator if XHt is None: XHt = safe_sparse_dot(X, H.T) if update_H: # avoid a copy of XHt, which will be re-computed (update_H=True) numerator = XHt else: # preserve the XHt, which is not re-computed (update_H=False) numerator = XHt.copy() # Denominator if HHt is None: HHt = np.dot(H, H.T) denominator = np.dot(W, HHt) else: # Numerator # if X is sparse, compute WH only where X is non zero WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X # copy used in the Denominator WH = WH_safe_X.copy() if beta_loss - 1. < 0: WH[WH == 0] = EPSILON # to avoid taking a negative power of zero if beta_loss - 2. < 0: WH_safe_X_data[WH_safe_X_data == 0] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: # speeds up computation time # refer to /numpy/numpy/issues/9363 WH_safe_X_data **= -1 WH_safe_X_data **= 2 # element-wise multiplication WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 # element-wise multiplication WH_safe_X_data *= X_data # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T) numerator = safe_sparse_dot(WH_safe_X, H.T) # Denominator if beta_loss == 1: if H_sum is None: H_sum = np.sum(H, axis=1) # shape(n_components, ) denominator = H_sum[np.newaxis, :] else: # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T) if sp.issparse(X): # memory efficient computation # (compute row by row, avoiding the dense matrix WH) WHHt = np.empty(W.shape) for i in range(X.shape[0]): WHi = np.dot(W[i, :], H) if beta_loss - 1 < 0: WHi[WHi == 0] = EPSILON WHi **= beta_loss - 1 WHHt[i, :] = np.dot(WHi, H.T) else: WH **= beta_loss - 1 WHHt = np.dot(WH, H.T) denominator = WHHt # Add L1 and L2 regularization if l1_reg_W > 0: denominator += l1_reg_W if l2_reg_W > 0: denominator = denominator + l2_reg_W * W denominator[denominator == 0] = EPSILON numerator /= denominator delta_W = numerator # gamma is in ]0, 1] if gamma != 1: delta_W **= gamma return delta_W, H_sum, HHt, XHt def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma): """update H in Multiplicative Update NMF""" if beta_loss == 2: numerator = safe_sparse_dot(W.T, X) denominator = np.dot(np.dot(W.T, W), H) else: # Numerator WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X # copy used in the Denominator WH = WH_safe_X.copy() if beta_loss - 1. < 0: WH[WH == 0] = EPSILON # to avoid division by zero if beta_loss - 2. < 0: WH_safe_X_data[WH_safe_X_data == 0] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: # speeds up computation time # refer to /numpy/numpy/issues/9363 WH_safe_X_data **= -1 WH_safe_X_data **= 2 # element-wise multiplication WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 # element-wise multiplication WH_safe_X_data *= X_data # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X) numerator = safe_sparse_dot(W.T, WH_safe_X) # Denominator if beta_loss == 1: W_sum = np.sum(W, axis=0) # shape(n_components, ) W_sum[W_sum == 0] = 1. denominator = W_sum[:, np.newaxis] # beta_loss not in (1, 2) else: # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1) if sp.issparse(X): # memory efficient computation # (compute column by column, avoiding the dense matrix WH) WtWH = np.empty(H.shape) for i in range(X.shape[1]): WHi = np.dot(W, H[:, i]) if beta_loss - 1 < 0: WHi[WHi == 0] = EPSILON WHi **= beta_loss - 1 WtWH[:, i] = np.dot(W.T, WHi) else: WH **= beta_loss - 1 WtWH = np.dot(W.T, WH) denominator = WtWH # Add L1 and L2 regularization if l1_reg_H > 0: denominator += l1_reg_H if l2_reg_H > 0: denominator = denominator + l2_reg_H * H denominator[denominator == 0] = EPSILON numerator /= denominator delta_H = numerator # gamma is in ]0, 1] if gamma != 1: delta_H **= gamma return delta_H def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', max_iter=200, tol=1e-4, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0): """Compute Non-negative Matrix Factorization with Multiplicative Update The objective function is _beta_divergence(X, WH) and is minimized with an alternating minimization of W and H. Each minimization is done with a Multiplicative Update. Parameters ---------- X : array-like, shape (n_samples, n_features) Constant input matrix. W : array-like, shape (n_samples, n_components) Initial guess for the solution. H : array-like, shape (n_components, n_features) Initial guess for the solution. beta_loss : float or string, default 'frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. max_iter : integer, default: 200 Number of iterations. tol : float, default: 1e-4 Tolerance of the stopping condition. l1_reg_W : double, default: 0. L1 regularization parameter for W. l1_reg_H : double, default: 0. L1 regularization parameter for H. l2_reg_W : double, default: 0. L2 regularization parameter for W. l2_reg_H : double, default: 0. L2 regularization parameter for H. update_H : boolean, default: True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : integer, default: 0 The verbosity level. Returns ------- W : array, shape (n_samples, n_components) Solution to the non-negative least squares problem. H : array, shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ start_time = time.time() beta_loss = _beta_loss_to_float(beta_loss) # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] if beta_loss < 1: gamma = 1. / (2. - beta_loss) elif beta_loss > 2: gamma = 1. / (beta_loss - 1.) else: gamma = 1. # used for the convergence criterion error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) previous_error = error_at_init H_sum, HHt, XHt = None, None, None for n_iter in range(1, max_iter + 1): # update W # H_sum, HHt and XHt are saved and reused if not update_H delta_W, H_sum, HHt, XHt = _multiplicative_update_w( X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma, H_sum, HHt, XHt, update_H) W *= delta_W # necessary for stability with beta_loss < 1 if beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0. # update H if update_H: delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma) H *= delta_H # These values will be recomputed since H changed H_sum, HHt, XHt = None, None, None # necessary for stability with beta_loss < 1 if beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0. # test convergence criterion every 10 iterations if tol > 0 and n_iter % 10 == 0: error = _beta_divergence(X, W, H, beta_loss, square_root=True) if verbose: iter_time = time.time() print("Epoch %02d reached after %.3f seconds, error: %f" % (n_iter, iter_time - start_time, error)) if (previous_error - error) / error_at_init < tol: break previous_error = error # do not print if we have already printed in the convergence test if verbose and (tol == 0 or n_iter % 10 != 0): end_time = time.time() print("Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)) return W, H, n_iter def non_negative_factorization(X, W=None, H=None, n_components=None, init='random', update_H=True, solver='cd', beta_loss='frobenius', tol=1e-4, max_iter=200, alpha=0., l1_ratio=0., regularization=None, random_state=None, verbose=0, shuffle=False): """Compute Non-negative Matrix Factorization (NMF) Find two non-negative matrices (W, H) whose product approximates the non- negative matrix X. This factorization can be used for example for dimensionality reduction, source separation or topic extraction. The objective function is:: 0.5 * ||X - WH||_Fro^2 + alpha * l1_ratio * ||vec(W)||_1 + alpha * l1_ratio * ||vec(H)||_1 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2 Where:: ||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm) ||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm) For multiplicative-update ('mu') solver, the Frobenius norm (0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss, by changing the beta_loss parameter. The objective function is minimized with an alternating minimization of W and H. If H is given and update_H=False, it solves for W only. Parameters ---------- X : array-like, shape (n_samples, n_features) Constant matrix. W : array-like, shape (n_samples, n_components) If init='custom', it is used as initial guess for the solution. H : array-like, shape (n_components, n_features) If init='custom', it is used as initial guess for the solution. If update_H=False, it is used as a constant, to solve for W only. n_components : integer Number of components, if n_components is not set all features are kept. init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom' Method used to initialize the procedure. Default: 'nndsvd' if n_components < n_features, otherwise random. Valid options: - 'random': non-negative random matrices, scaled with: sqrt(X.mean() / n_components) - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) initialization (better for sparseness) - 'nndsvda': NNDSVD with zeros filled with the average of X (better when sparsity is not desired) - 'nndsvdar': NNDSVD with zeros filled with small random values (generally faster, less accurate alternative to NNDSVDa for when sparsity is not desired) - 'custom': use custom matrices W and H update_H : boolean, default: True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. solver : 'cd' | 'mu' Numerical solver to use: 'cd' is a Coordinate Descent solver. 'mu' is a Multiplicative Update solver. .. versionadded:: 0.17 Coordinate Descent solver. .. versionadded:: 0.19 Multiplicative Update solver. beta_loss : float or string, default 'frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. Used only in 'mu' solver. .. versionadded:: 0.19 tol : float, default: 1e-4 Tolerance of the stopping condition. max_iter : integer, default: 200 Maximum number of iterations before timing out. alpha : double, default: 0. Constant that multiplies the regularization terms. l1_ratio : double, default: 0. The regularization mixing parameter, with 0 <= l1_ratio <= 1. For l1_ratio = 0 the penalty is an elementwise L2 penalty (aka Frobenius Norm). For l1_ratio = 1 it is an elementwise L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. regularization : 'both' | 'components' | 'transformation' | None Select whether the regularization affects the components (H), the transformation (W), both or none of them. random_state : int, RandomState instance or None, optional, default: None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : integer, default: 0 The verbosity level. shuffle : boolean, default: False If true, randomize the order of coordinates in the CD solver. Returns ------- W : array-like, shape (n_samples, n_components) Solution to the non-negative least squares problem. H : array-like, shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int Actual number of iterations. Examples -------- >>> import numpy as np >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) >>> from sklearn.decomposition import non_negative_factorization >>> W, H, n_iter = non_negative_factorization(X, n_components=2, \ init='random', random_state=0) References ---------- Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for large scale nonnegative matrix and tensor factorizations." IEICE transactions on fundamentals of electronics, communications and computer sciences 92.3: 708-721, 2009. Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float) check_non_negative(X, "NMF (input X)") beta_loss = _check_string_param(solver, regularization, beta_loss, init) if safe_min(X) == 0 and beta_loss <= 0: raise ValueError("When beta_loss <= 0 and X contains zeros, " "the solver may diverge. Please add small values to " "X, or use a positive beta_loss.") n_samples, n_features = X.shape if n_components is None: n_components = n_features if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0: raise ValueError("Number of components must be a positive integer;" " got (n_components=%r)" % n_components) if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0: raise ValueError("Maximum number of iterations must be a positive " "integer; got (max_iter=%r)" % max_iter) if not isinstance(tol, numbers.Number) or tol < 0: raise ValueError("Tolerance for stopping criteria must be " "positive; got (tol=%r)" % tol) # check W and H, or initialize them if init == 'custom' and update_H: _check_init(H, (n_components, n_features), "NMF (input H)") _check_init(W, (n_samples, n_components), "NMF (input W)") elif not update_H: _check_init(H, (n_components, n_features), "NMF (input H)") # 'mu' solver should not be initialized by zeros if solver == 'mu': avg = np.sqrt(X.mean() / n_components) W = avg * np.ones((n_samples, n_components)) else: W = np.zeros((n_samples, n_components)) else: W, H = _initialize_nmf(X, n_components, init=init, random_state=random_state) l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization( alpha, l1_ratio, regularization) if solver == 'cd': W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H=update_H, verbose=verbose, shuffle=shuffle, random_state=random_state) elif solver == 'mu': W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter, tol, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H, verbose) else: raise ValueError("Invalid solver parameter '%s'." % solver) if n_iter == max_iter and tol > 0: warnings.warn("Maximum number of iteration %d reached. Increase it to" " improve convergence." % max_iter, ConvergenceWarning) return W, H, n_iter class NMF(BaseEstimator, TransformerMixin): """Non-Negative Matrix Factorization (NMF) Find two non-negative matrices (W, H) whose product approximates the non- negative matrix X. This factorization can be used for example for dimensionality reduction, source separation or topic extraction. The objective function is:: 0.5 * ||X - WH||_Fro^2 + alpha * l1_ratio * ||vec(W)||_1 + alpha * l1_ratio * ||vec(H)||_1 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2 Where:: ||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm) ||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm) For multiplicative-update ('mu') solver, the Frobenius norm (0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss, by changing the beta_loss parameter. The objective function is minimized with an alternating minimization of W and H. Read more in the :ref:`User Guide <NMF>`. Parameters ---------- n_components : int or None Number of components, if n_components is not set all features are kept. init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom' Method used to initialize the procedure. Default: 'nndsvd' if n_components < n_features, otherwise random. Valid options: - 'random': non-negative random matrices, scaled with: sqrt(X.mean() / n_components) - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD) initialization (better for sparseness) - 'nndsvda': NNDSVD with zeros filled with the average of X (better when sparsity is not desired) - 'nndsvdar': NNDSVD with zeros filled with small random values (generally faster, less accurate alternative to NNDSVDa for when sparsity is not desired) - 'custom': use custom matrices W and H solver : 'cd' | 'mu' Numerical solver to use: 'cd' is a Coordinate Descent solver. 'mu' is a Multiplicative Update solver. .. versionadded:: 0.17 Coordinate Descent solver. .. versionadded:: 0.19 Multiplicative Update solver. beta_loss : float or string, default 'frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. Used only in 'mu' solver. .. versionadded:: 0.19 tol : float, default: 1e-4 Tolerance of the stopping condition. max_iter : integer, default: 200 Maximum number of iterations before timing out. random_state : int, RandomState instance or None, optional, default: None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. alpha : double, default: 0. Constant that multiplies the regularization terms. Set it to zero to have no regularization. .. versionadded:: 0.17 *alpha* used in the Coordinate Descent solver. l1_ratio : double, default: 0. The regularization mixing parameter, with 0 <= l1_ratio <= 1. For l1_ratio = 0 the penalty is an elementwise L2 penalty (aka Frobenius Norm). For l1_ratio = 1 it is an elementwise L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. .. versionadded:: 0.17 Regularization parameter *l1_ratio* used in the Coordinate Descent solver. verbose : bool, default=False Whether to be verbose. shuffle : boolean, default: False If true, randomize the order of coordinates in the CD solver. .. versionadded:: 0.17 *shuffle* parameter used in the Coordinate Descent solver. Attributes ---------- components_ : array, [n_components, n_features] Factorization matrix, sometimes called 'dictionary'. reconstruction_err_ : number Frobenius norm of the matrix difference, or beta-divergence, between the training data ``X`` and the reconstructed data ``WH`` from the fitted model. n_iter_ : int Actual number of iterations. Examples -------- >>> import numpy as np >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]]) >>> from sklearn.decomposition import NMF >>> model = NMF(n_components=2, init='random', random_state=0) >>> W = model.fit_transform(X) >>> H = model.components_ References ---------- Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for large scale nonnegative matrix and tensor factorizations." IEICE transactions on fundamentals of electronics, communications and computer sciences 92.3: 708-721, 2009. Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ def __init__(self, n_components=None, init=None, solver='cd', beta_loss='frobenius', tol=1e-4, max_iter=200, random_state=None, alpha=0., l1_ratio=0., verbose=0, shuffle=False): self.n_components = n_components self.init = init self.solver = solver self.beta_loss = beta_loss self.tol = tol self.max_iter = max_iter self.random_state = random_state self.alpha = alpha self.l1_ratio = l1_ratio self.verbose = verbose self.shuffle = shuffle def fit_transform(self, X, y=None, W=None, H=None): """Learn a NMF model for the data X and returns the transformed data. This is more efficient than calling fit followed by transform. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data matrix to be decomposed y : Ignored. W : array-like, shape (n_samples, n_components) If init='custom', it is used as initial guess for the solution. H : array-like, shape (n_components, n_features) If init='custom', it is used as initial guess for the solution. Returns ------- W : array, shape (n_samples, n_components) Transformed data. """ X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float) W, H, n_iter_ = non_negative_factorization( X=X, W=W, H=H, n_components=self.n_components, init=self.init, update_H=True, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both', random_state=self.random_state, verbose=self.verbose, shuffle=self.shuffle) self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss, square_root=True) self.n_components_ = H.shape[0] self.components_ = H self.n_iter_ = n_iter_ return W def fit(self, X, y=None, **params): """Learn a NMF model for the data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data matrix to be decomposed y : Ignored. Returns ------- self """ self.fit_transform(X, **params) return self def transform(self, X): """Transform the data X according to the fitted NMF model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data matrix to be transformed by the model Returns ------- W : array, shape (n_samples, n_components) Transformed data """ check_is_fitted(self, 'n_components_') W, _, n_iter_ = non_negative_factorization( X=X, W=None, H=self.components_, n_components=self.n_components_, init=self.init, update_H=False, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both', random_state=self.random_state, verbose=self.verbose, shuffle=self.shuffle) return W def inverse_transform(self, W): """Transform data back to its original space. Parameters ---------- W : {array-like, sparse matrix}, shape (n_samples, n_components) Transformed data matrix Returns ------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data matrix of original shape .. versionadded:: 0.18 """ check_is_fitted(self, 'n_components_') return np.dot(W, self.components_)
45,648
33.980077
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/factor_analysis.py
"""Factor Analysis. A latent linear variable model. FactorAnalysis is similar to probabilistic PCA implemented by PCA.score While PCA assumes Gaussian noise with the same variance for each feature, the FactorAnalysis model assumes different variances for each of them. This implementation is based on David Barber's Book, Bayesian Reasoning and Machine Learning, http://www.cs.ucl.ac.uk/staff/d.barber/brml, Algorithm 21.1 """ # Author: Christian Osendorfer <[email protected]> # Alexandre Gramfort <[email protected]> # Denis A. Engemann <[email protected]> # License: BSD3 import warnings from math import sqrt, log import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin from ..externals.six.moves import xrange from ..utils import check_array, check_random_state from ..utils.extmath import fast_logdet, randomized_svd, squared_norm from ..utils.validation import check_is_fitted from ..exceptions import ConvergenceWarning class FactorAnalysis(BaseEstimator, TransformerMixin): """Factor Analysis (FA) A simple linear generative model with Gaussian latent variables. The observations are assumed to be caused by a linear transformation of lower dimensional latent factors and added Gaussian noise. Without loss of generality the factors are distributed according to a Gaussian with zero mean and unit covariance. The noise is also zero mean and has an arbitrary diagonal covariance matrix. If we would restrict the model further, by assuming that the Gaussian noise is even isotropic (all diagonal entries are the same) we would obtain :class:`PPCA`. FactorAnalysis performs a maximum likelihood estimate of the so-called `loading` matrix, the transformation of the latent variables to the observed ones, using expectation-maximization (EM). Read more in the :ref:`User Guide <FA>`. Parameters ---------- n_components : int | None Dimensionality of latent space, the number of components of ``X`` that are obtained after ``transform``. If None, n_components is set to the number of features. tol : float Stopping tolerance for EM algorithm. copy : bool Whether to make a copy of X. If ``False``, the input X gets overwritten during fitting. max_iter : int Maximum number of iterations. noise_variance_init : None | array, shape=(n_features,) The initial guess of the noise variance for each feature. If None, it defaults to np.ones(n_features) svd_method : {'lapack', 'randomized'} Which SVD method to use. If 'lapack' use standard SVD from scipy.linalg, if 'randomized' use fast ``randomized_svd`` function. Defaults to 'randomized'. For most applications 'randomized' will be sufficiently precise while providing significant speed gains. Accuracy can also be improved by setting higher values for `iterated_power`. If this is not sufficient, for maximum precision you should choose 'lapack'. iterated_power : int, optional Number of iterations for the power method. 3 by default. Only used if ``svd_method`` equals 'randomized' random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Only used when ``svd_method`` equals 'randomized'. Attributes ---------- components_ : array, [n_components, n_features] Components with maximum variance. loglike_ : list, [n_iterations] The log likelihood at each iteration. noise_variance_ : array, shape=(n_features,) The estimated noise variance for each feature. n_iter_ : int Number of iterations run. References ---------- .. David Barber, Bayesian Reasoning and Machine Learning, Algorithm 21.1 .. Christopher M. Bishop: Pattern Recognition and Machine Learning, Chapter 12.2.4 See also -------- PCA: Principal component analysis is also a latent linear variable model which however assumes equal noise variance for each feature. This extra assumption makes probabilistic PCA faster as it can be computed in closed form. FastICA: Independent component analysis, a latent variable model with non-Gaussian latent variables. """ def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000, noise_variance_init=None, svd_method='randomized', iterated_power=3, random_state=0): self.n_components = n_components self.copy = copy self.tol = tol self.max_iter = max_iter if svd_method not in ['lapack', 'randomized']: raise ValueError('SVD method %s is not supported. Please consider' ' the documentation' % svd_method) self.svd_method = svd_method self.noise_variance_init = noise_variance_init self.iterated_power = iterated_power self.random_state = random_state def fit(self, X, y=None): """Fit the FactorAnalysis model to X using EM Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : Ignored. Returns ------- self """ X = check_array(X, copy=self.copy, dtype=np.float64) n_samples, n_features = X.shape n_components = self.n_components if n_components is None: n_components = n_features self.mean_ = np.mean(X, axis=0) X -= self.mean_ # some constant terms nsqrt = sqrt(n_samples) llconst = n_features * log(2. * np.pi) + n_components var = np.var(X, axis=0) if self.noise_variance_init is None: psi = np.ones(n_features, dtype=X.dtype) else: if len(self.noise_variance_init) != n_features: raise ValueError("noise_variance_init dimension does not " "with number of features : %d != %d" % (len(self.noise_variance_init), n_features)) psi = np.array(self.noise_variance_init) loglike = [] old_ll = -np.inf SMALL = 1e-12 # we'll modify svd outputs to return unexplained variance # to allow for unified computation of loglikelihood if self.svd_method == 'lapack': def my_svd(X): _, s, V = linalg.svd(X, full_matrices=False) return (s[:n_components], V[:n_components], squared_norm(s[n_components:])) elif self.svd_method == 'randomized': random_state = check_random_state(self.random_state) def my_svd(X): _, s, V = randomized_svd(X, n_components, random_state=random_state, n_iter=self.iterated_power) return s, V, squared_norm(X) - squared_norm(s) else: raise ValueError('SVD method %s is not supported. Please consider' ' the documentation' % self.svd_method) for i in xrange(self.max_iter): # SMALL helps numerics sqrt_psi = np.sqrt(psi) + SMALL s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt)) s **= 2 # Use 'maximum' here to avoid sqrt problems. W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V del V W *= sqrt_psi # loglikelihood ll = llconst + np.sum(np.log(s)) ll += unexp_var + np.sum(np.log(psi)) ll *= -n_samples / 2. loglike.append(ll) if (ll - old_ll) < self.tol: break old_ll = ll psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL) else: warnings.warn('FactorAnalysis did not converge.' + ' You might want' + ' to increase the number of iterations.', ConvergenceWarning) self.components_ = W self.noise_variance_ = psi self.loglike_ = loglike self.n_iter_ = i + 1 return self def transform(self, X): """Apply dimensionality reduction to X using the model. Compute the expected mean of the latent variables. See Barber, 21.2.33 (or Bishop, 12.66). Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. Returns ------- X_new : array-like, shape (n_samples, n_components) The latent variables of X. """ check_is_fitted(self, 'components_') X = check_array(X) Ih = np.eye(len(self.components_)) X_transformed = X - self.mean_ Wpsi = self.components_ / self.noise_variance_ cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T)) tmp = np.dot(X_transformed, Wpsi.T) X_transformed = np.dot(tmp, cov_z) return X_transformed def get_covariance(self): """Compute data covariance with the FactorAnalysis model. ``cov = components_.T * components_ + diag(noise_variance)`` Returns ------- cov : array, shape (n_features, n_features) Estimated covariance of data. """ check_is_fitted(self, 'components_') cov = np.dot(self.components_.T, self.components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the FactorAnalysis model. Returns ------- precision : array, shape (n_features, n_features) Estimated precision of data. """ check_is_fitted(self, 'components_') n_features = self.components_.shape[1] # handle corner cases first if self.n_components == 0: return np.diag(1. / self.noise_variance_) if self.n_components == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ precision = np.dot(components_ / self.noise_variance_, components_.T) precision.flat[::len(precision) + 1] += 1. precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= self.noise_variance_[:, np.newaxis] precision /= -self.noise_variance_[np.newaxis, :] precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision def score_samples(self, X): """Compute the log-likelihood of each sample Parameters ---------- X : array, shape (n_samples, n_features) The data Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model """ check_is_fitted(self, 'components_') Xr = X - self.mean_ precision = self.get_precision() n_features = X.shape[1] log_like = np.zeros(X.shape[0]) log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - fast_logdet(precision)) return log_like def score(self, X, y=None): """Compute the average log-likelihood of the samples Parameters ---------- X : array, shape (n_samples, n_features) The data y : Ignored. Returns ------- ll : float Average log-likelihood of the samples under the current model """ return np.mean(self.score_samples(X))
12,222
33.823362
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/incremental_pca.py
"""Incremental Principal Components Analysis.""" # Author: Kyle Kastner <[email protected]> # Giorgio Patrini # License: BSD 3 clause import numpy as np from scipy import linalg from .base import _BasePCA from ..utils import check_array, gen_batches from ..utils.extmath import svd_flip, _incremental_mean_and_var class IncrementalPCA(_BasePCA): """Incremental principal components analysis (IPCA). Linear dimensionality reduction using Singular Value Decomposition of centered data, keeping only the most significant singular vectors to project the data to a lower dimensional space. Depending on the size of the input data, this algorithm can be much more memory efficient than a PCA. This algorithm has constant memory complexity, on the order of ``batch_size``, enabling use of np.memmap files without loading the entire file into memory. The computational overhead of each SVD is ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples remain in memory at a time. There will be ``n_samples / batch_size`` SVD computations to get the principal components, versus 1 large SVD of complexity ``O(n_samples * n_features ** 2)`` for PCA. Read more in the :ref:`User Guide <IncrementalPCA>`. Parameters ---------- n_components : int or None, (default=None) Number of components to keep. If ``n_components `` is ``None``, then ``n_components`` is set to ``min(n_samples, n_features)``. whiten : bool, optional When True (False by default) the ``components_`` vectors are divided by ``n_samples`` times ``components_`` to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometimes improve the predictive accuracy of the downstream estimators by making data respect some hard-wired assumptions. copy : bool, (default=True) If False, X will be overwritten. ``copy=False`` can be used to save memory but is unsafe for general use. batch_size : int or None, (default=None) The number of samples to use for each batch. Only used when calling ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` is inferred from the data and set to ``5 * n_features``, to provide a balance between approximation accuracy and memory consumption. Attributes ---------- components_ : array, shape (n_components, n_features) Components with maximum variance. explained_variance_ : array, shape (n_components,) Variance explained by each of the selected components. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If all components are stored, the sum of explained variances is equal to 1.0. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : array, shape (n_features,) Per-feature empirical mean, aggregate over calls to ``partial_fit``. var_ : array, shape (n_features,) Per-feature empirical variance, aggregate over calls to ``partial_fit``. noise_variance_ : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. n_components_ : int The estimated number of components. Relevant when ``n_components=None``. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. Notes ----- Implements the incremental PCA model from: `D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008.` See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf This model is an extension of the Sequential Karhunen-Loeve Transform from: `A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and its Application to Images, IEEE Transactions on Image Processing, Volume 9, Number 8, pp. 1371-1374, August 2000.` See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf We have specifically abstained from an optimization used by authors of both papers, a QR decomposition used in specific situations to reduce the algorithmic complexity of the SVD. The source for this technique is `Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, section 5.4.4, pp 252-253.`. This technique has been omitted because it is advantageous only when decomposing a matrix with ``n_samples`` (rows) >= 5/3 * ``n_features`` (columns), and hurts the readability of the implemented algorithm. This would be a good opportunity for future optimization, if it is deemed necessary. References ---------- D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008. G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, Section 5.4.4, pp. 252-253. See also -------- PCA RandomizedPCA KernelPCA SparsePCA TruncatedSVD """ def __init__(self, n_components=None, whiten=False, copy=True, batch_size=None): self.n_components = n_components self.whiten = whiten self.copy = copy self.batch_size = batch_size def fit(self, X, y=None): """Fit the model with X, using minibatches of size batch_size. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ self.components_ = None self.n_samples_seen_ = 0 self.mean_ = .0 self.var_ = .0 self.singular_values_ = None self.explained_variance_ = None self.explained_variance_ratio_ = None self.singular_values_ = None self.noise_variance_ = None X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if self.batch_size is None: self.batch_size_ = 5 * n_features else: self.batch_size_ = self.batch_size for batch in gen_batches(n_samples, self.batch_size_): self.partial_fit(X[batch], check_input=False) return self def partial_fit(self, X, y=None, check_input=True): """Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. check_input : bool Run check_array on X. y : Ignored. Returns ------- self : object Returns the instance itself. """ if check_input: X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if not hasattr(self, 'components_'): self.components_ = None if self.n_components is None: self.n_components_ = n_features elif not 1 <= self.n_components <= n_features: raise ValueError("n_components=%r invalid for n_features=%d, need " "more rows than columns for IncrementalPCA " "processing" % (self.n_components, n_features)) else: self.n_components_ = self.n_components if (self.components_ is not None) and (self.components_.shape[0] != self.n_components_): raise ValueError("Number of input features has changed from %i " "to %i between calls to partial_fit! Try " "setting n_components to a fixed value." % (self.components_.shape[0], self.n_components_)) # This is the first partial_fit if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = 0 self.mean_ = .0 self.var_ = .0 # Update stats - they are 0 if this is the fisrt step col_mean, col_var, n_total_samples = \ _incremental_mean_and_var(X, last_mean=self.mean_, last_variance=self.var_, last_sample_count=self.n_samples_seen_) # Whitening if self.n_samples_seen_ == 0: # If it is the first step, simply whiten X X -= col_mean else: col_batch_mean = np.mean(X, axis=0) X -= col_batch_mean # Build matrix of combined previous basis and new data mean_correction = \ np.sqrt((self.n_samples_seen_ * n_samples) / n_total_samples) * (self.mean_ - col_batch_mean) X = np.vstack((self.singular_values_.reshape((-1, 1)) * self.components_, X, mean_correction)) U, S, V = linalg.svd(X, full_matrices=False) U, V = svd_flip(U, V, u_based_decision=False) explained_variance = S ** 2 / (n_total_samples - 1) explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self.components_ = V[:self.n_components_] self.singular_values_ = S[:self.n_components_] self.mean_ = col_mean self.var_ = col_var self.explained_variance_ = explained_variance[:self.n_components_] self.explained_variance_ratio_ = \ explained_variance_ratio[:self.n_components_] if self.n_components_ < n_features: self.noise_variance_ = \ explained_variance[self.n_components_:].mean() else: self.noise_variance_ = 0. return self
10,795
38.258182
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/pca.py
""" Principal Component Analysis """ # Author: Alexandre Gramfort <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Denis A. Engemann <[email protected]> # Michael Eickenberg <[email protected]> # Giorgio Patrini <[email protected]> # # License: BSD 3 clause from math import log, sqrt import numpy as np from scipy import linalg from scipy.special import gammaln from scipy.sparse import issparse from scipy.sparse.linalg import svds from ..externals import six from .base import _BasePCA from ..base import BaseEstimator, TransformerMixin from ..utils import deprecated from ..utils import check_random_state, as_float_array from ..utils import check_array from ..utils.extmath import fast_logdet, randomized_svd, svd_flip from ..utils.extmath import stable_cumsum from ..utils.validation import check_is_fitted def _assess_dimension_(spectrum, rank, n_samples, n_features): """Compute the likelihood of a rank ``rank`` dataset The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. Parameters ---------- spectrum : array of shape (n) Data spectrum. rank : int Tested rank value. n_samples : int Number of samples. n_features : int Number of features. Returns ------- ll : float, The log-likelihood Notes ----- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` """ if rank > len(spectrum): raise ValueError("The tested rank cannot exceed the rank of the" " dataset") pu = -rank * log(2.) for i in range(rank): pu += (gammaln((n_features - i) / 2.) - log(np.pi) * (n_features - i) / 2.) pl = np.sum(np.log(spectrum[:rank])) pl = -pl * n_samples / 2. if rank == n_features: pv = 0 v = 1 else: v = np.sum(spectrum[rank:]) / (n_features - rank) pv = -np.log(v) * n_samples * (n_features - rank) / 2. m = n_features * rank - rank * (rank + 1.) / 2. pp = log(2. * np.pi) * (m + rank + 1.) / 2. pa = 0. spectrum_ = spectrum.copy() spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, len(spectrum)): pa += log((spectrum[i] - spectrum[j]) * (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples) ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2. return ll def _infer_dimension_(spectrum, n_samples, n_features): """Infers the dimension of a dataset of shape (n_samples, n_features) The dataset is described by its spectrum `spectrum`. """ n_spectrum = len(spectrum) ll = np.empty(n_spectrum) for rank in range(n_spectrum): ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features) return ll.argmax() class PCA(_BasePCA): """Principal component analysis (PCA) Linear dimensionality reduction using Singular Value Decomposition of the data to project it to a lower dimensional space. It uses the LAPACK implementation of the full SVD or a randomized truncated SVD by the method of Halko et al. 2009, depending on the shape of the input data and the number of components to extract. It can also use the scipy.sparse.linalg ARPACK implementation of the truncated SVD. Notice that this class does not support sparse input. See :class:`TruncatedSVD` for an alternative with sparse data. Read more in the :ref:`User Guide <PCA>`. Parameters ---------- n_components : int, float, None or string Number of components to keep. if n_components is not set all components are kept:: n_components == min(n_samples, n_features) if n_components == 'mle' and svd_solver == 'full', Minka\'s MLE is used to guess the dimension if ``0 < n_components < 1`` and svd_solver == 'full', select the number of components such that the amount of variance that needs to be explained is greater than the percentage specified by n_components n_components cannot be equal to n_features for svd_solver == 'arpack'. copy : bool (default True) If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. whiten : bool, optional (default False) When True (False by default) the `components_` vectors are multiplied by the square root of n_samples and then divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. svd_solver : string {'auto', 'full', 'arpack', 'randomized'} auto : the solver is selected by a default policy based on `X.shape` and `n_components`: if the input data is larger than 500x500 and the number of components to extract is lower than 80% of the smallest dimension of the data, then the more efficient 'randomized' method is enabled. Otherwise the exact full SVD is computed and optionally truncated afterwards. full : run exact full SVD calling the standard LAPACK solver via `scipy.linalg.svd` and select the components by postprocessing arpack : run SVD truncated to n_components calling ARPACK solver via `scipy.sparse.linalg.svds`. It requires strictly 0 < n_components < X.shape[1] randomized : run randomized SVD by the method of Halko et al. .. versionadded:: 0.18.0 tol : float >= 0, optional (default .0) Tolerance for singular values computed by svd_solver == 'arpack'. .. versionadded:: 0.18.0 iterated_power : int >= 0, or 'auto', (default 'auto') Number of iterations for the power method computed by svd_solver == 'randomized'. .. versionadded:: 0.18.0 random_state : int, RandomState instance or None, optional (default None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``svd_solver`` == 'arpack' or 'randomized'. .. versionadded:: 0.18.0 Attributes ---------- components_ : array, shape (n_components, n_features) Principal axes in feature space, representing the directions of maximum variance in the data. The components are sorted by ``explained_variance_``. explained_variance_ : array, shape (n_components,) The amount of variance explained by each of the selected components. Equal to n_components largest eigenvalues of the covariance matrix of X. .. versionadded:: 0.18 explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If ``n_components`` is not set then all components are stored and the sum of explained variances is equal to 1.0. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : array, shape (n_features,) Per-feature empirical mean, estimated from the training set. Equal to `X.mean(axis=0)`. n_components_ : int The estimated number of components. When n_components is set to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this number is estimated from input data. Otherwise it equals the parameter n_components, or n_features if n_components is None. noise_variance_ : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. It is required to computed the estimated data covariance and score samples. Equal to the average of (min(n_features, n_samples) - n_components) smallest eigenvalues of the covariance matrix of X. References ---------- For n_components == 'mle', this class uses the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` Implements the probabilistic PCA model from: M. Tipping and C. Bishop, Probabilistic Principal Component Analysis, Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622 via the score and score_samples methods. See http://www.miketipping.com/papers/met-mppca.pdf For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`. For svd_solver == 'randomized', see: `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909)` `A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert` Examples -------- >>> import numpy as np >>> from sklearn.decomposition import PCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = PCA(n_components=2) >>> pca.fit(X) PCA(copy=True, iterated_power='auto', n_components=2, random_state=None, svd_solver='auto', tol=0.0, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] >>> print(pca.singular_values_) # doctest: +ELLIPSIS [ 6.30061... 0.54980...] >>> pca = PCA(n_components=2, svd_solver='full') >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE PCA(copy=True, iterated_power='auto', n_components=2, random_state=None, svd_solver='full', tol=0.0, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] >>> print(pca.singular_values_) # doctest: +ELLIPSIS [ 6.30061... 0.54980...] >>> pca = PCA(n_components=1, svd_solver='arpack') >>> pca.fit(X) PCA(copy=True, iterated_power='auto', n_components=1, random_state=None, svd_solver='arpack', tol=0.0, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244...] >>> print(pca.singular_values_) # doctest: +ELLIPSIS [ 6.30061...] See also -------- KernelPCA SparsePCA TruncatedSVD IncrementalPCA """ def __init__(self, n_components=None, copy=True, whiten=False, svd_solver='auto', tol=0.0, iterated_power='auto', random_state=None): self.n_components = n_components self.copy = copy self.whiten = whiten self.svd_solver = svd_solver self.tol = tol self.iterated_power = iterated_power self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ self._fit(X) return self def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ U, S, V = self._fit(X) U = U[:, :self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:self.n_components_] return U def _fit(self, X): """Dispatch to the right submethod depending on the chosen solver.""" # Raise an error for sparse input. # This is more informative than the generic one raised by check_array. if issparse(X): raise TypeError('PCA does not support sparse input. See ' 'TruncatedSVD for a possible alternative.') X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy) # Handle n_components==None if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components # Handle svd_solver svd_solver = self.svd_solver if svd_solver == 'auto': # Small problem, just call full PCA if max(X.shape) <= 500: svd_solver = 'full' elif n_components >= 1 and n_components < .8 * min(X.shape): svd_solver = 'randomized' # This is also the case of n_components in (0,1) else: svd_solver = 'full' # Call different fits for either full or truncated SVD if svd_solver == 'full': return self._fit_full(X, n_components) elif svd_solver in ['arpack', 'randomized']: return self._fit_truncated(X, n_components, svd_solver) else: raise ValueError("Unrecognized svd_solver='{0}'" "".format(svd_solver)) def _fit_full(self, X, n_components): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape if n_components == 'mle': if n_samples < n_features: raise ValueError("n_components='mle' is only supported " "if n_samples >= n_features") elif not 0 <= n_components <= n_features: raise ValueError("n_components=%r must be between 0 and " "n_features=%r with svd_solver='full'" % (n_components, n_features)) # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ U, S, V = linalg.svd(X, full_matrices=False) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V def _fit_truncated(self, X, n_components, svd_solver): """Fit the model by computing truncated SVD (by ARPACK or randomized) on X """ n_samples, n_features = X.shape if isinstance(n_components, six.string_types): raise ValueError("n_components=%r cannot be a string " "with svd_solver='%s'" % (n_components, svd_solver)) elif not 1 <= n_components <= n_features: raise ValueError("n_components=%r must be between 1 and " "n_features=%r with svd_solver='%s'" % (n_components, n_features, svd_solver)) elif svd_solver == 'arpack' and n_components == n_features: raise ValueError("n_components=%r must be stricly less than " "n_features=%r with svd_solver='%s'" % (n_components, n_features, svd_solver)) random_state = check_random_state(self.random_state) # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ if svd_solver == 'arpack': # random init solution, as ARPACK does it internally v0 = random_state.uniform(-1, 1, size=min(X.shape)) U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. S = S[::-1] # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U[:, ::-1], V[::-1]) elif svd_solver == 'randomized': # sign flipping is done inside U, S, V = randomized_svd(X, n_components=n_components, n_iter=self.iterated_power, flip_sign=True, random_state=random_state) self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = V self.n_components_ = n_components # Get variance explained by singular values self.explained_variance_ = (S ** 2) / (n_samples - 1) total_var = np.var(X, ddof=1, axis=0) self.explained_variance_ratio_ = \ self.explained_variance_ / total_var.sum() self.singular_values_ = S.copy() # Store the singular values. if self.n_components_ < min(n_features, n_samples): self.noise_variance_ = (total_var.sum() - self.explained_variance_.sum()) self.noise_variance_ /= min(n_features, n_samples) - n_components else: self.noise_variance_ = 0. return U, S, V def score_samples(self, X): """Return the log-likelihood of each sample. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model """ check_is_fitted(self, 'mean_') X = check_array(X) Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) precision = self.get_precision() log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - fast_logdet(precision)) return log_like def score(self, X, y=None): """Return the average log-likelihood of all samples. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. y : Ignored. Returns ------- ll : float Average log-likelihood of the samples under the current model """ return np.mean(self.score_samples(X)) @deprecated("RandomizedPCA was deprecated in 0.18 and will be removed in " "0.20. " "Use PCA(svd_solver='randomized') instead. The new implementation " "DOES NOT store whiten ``components_``. Apply transform to get " "them.") class RandomizedPCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) using randomized SVD .. deprecated:: 0.18 This class will be removed in 0.20. Use :class:`PCA` with parameter svd_solver 'randomized' instead. The new implementation DOES NOT store whiten ``components_``. Apply transform to get them. Linear dimensionality reduction using approximated Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. Read more in the :ref:`User Guide <RandomizedPCA>`. Parameters ---------- n_components : int, optional Maximum number of components to keep. When not given or None, this is set to n_features (the second dimension of the training data). copy : bool If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. iterated_power : int, default=2 Number of iterations for the power method. .. versionchanged:: 0.18 whiten : bool, optional When True (False by default) the `components_` vectors are multiplied by the square root of (n_samples) and divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. random_state : int, RandomState instance or None, optional, default=None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, shape (n_components, n_features) Components with maximum variance. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If k is not set then all components are stored and the sum of explained variances is equal to 1.0. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : array, shape (n_features,) Per-feature empirical mean, estimated from the training set. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import RandomizedPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = RandomizedPCA(n_components=2) >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE RandomizedPCA(copy=True, iterated_power=2, n_components=2, random_state=None, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] >>> print(pca.singular_values_) # doctest: +ELLIPSIS [ 6.30061... 0.54980...] See also -------- PCA TruncatedSVD References ---------- .. [Halko2009] `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909)` .. [MRT] `A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert` """ def __init__(self, n_components=None, copy=True, iterated_power=2, whiten=False, random_state=None): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.whiten = whiten self.random_state = random_state def fit(self, X, y=None): """Fit the model with X by extracting the first principal components. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- self : object Returns the instance itself. """ self._fit(check_array(X)) return self def _fit(self, X): """Fit the model to the data X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X : ndarray, shape (n_samples, n_features) The input data, copied, centered and whitened when requested. """ random_state = check_random_state(self.random_state) X = np.atleast_2d(as_float_array(X, copy=self.copy)) n_samples = X.shape[0] # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components U, S, V = randomized_svd(X, n_components, n_iter=self.iterated_power, random_state=random_state) self.explained_variance_ = exp_var = (S ** 2) / (n_samples - 1) full_var = np.var(X, ddof=1, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var self.singular_values_ = S # Store the singular values. if self.whiten: self.components_ = V / S[:, np.newaxis] * sqrt(n_samples) else: self.components_ = V return X def transform(self, X): """Apply dimensionality reduction on X. X is projected on the first principal components previous extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'mean_') X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X = np.dot(X, self.components_.T) return X def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. y : Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = check_array(X) X = self._fit(X) return np.dot(X, self.components_.T) def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ check_is_fitted(self, 'mean_') X_original = np.dot(X, self.components_) if self.mean_ is not None: X_original = X_original + self.mean_ return X_original
29,345
35.22963
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/fastica_.py
""" Python implementation of the fast ICA algorithms. Reference: Tables 8.3 and 8.4 page 196 in the book: Independent Component Analysis, by Hyvarinen et al. """ # Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux, # Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann # License: BSD 3 clause import warnings import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six import moves from ..externals.six import string_types from ..utils import check_array, as_float_array, check_random_state from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES __all__ = ['fastica', 'FastICA'] def _gs_decorrelation(w, W, j): """ Orthonormalize w wrt the first j rows of W Parameters ---------- w : ndarray of shape(n) Array to be orthogonalized W : ndarray of shape(p, n) Null space definition j : int < p The no of (from the first) rows of Null space W wrt which w is orthogonalized. Notes ----- Assumes that W is orthogonal w changed in place """ w -= np.dot(np.dot(w, W[:j].T), W[:j]) return w def _sym_decorrelation(W): """ Symmetric decorrelation i.e. W <- (W * W.T) ^{-1/2} * W """ s, u = linalg.eigh(np.dot(W, W.T)) # u (resp. s) contains the eigenvectors (resp. square roots of # the eigenvalues) of W * W.T return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W) def _ica_def(X, tol, g, fun_args, max_iter, w_init): """Deflationary FastICA using fun approx to neg-entropy function Used internally by FastICA. """ n_components = w_init.shape[0] W = np.zeros((n_components, n_components), dtype=X.dtype) n_iter = [] # j is the index of the extracted component for j in range(n_components): w = w_init[j, :].copy() w /= np.sqrt((w ** 2).sum()) for i in moves.xrange(max_iter): gwtx, g_wtx = g(np.dot(w.T, X), fun_args) w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w _gs_decorrelation(w1, W, j) w1 /= np.sqrt((w1 ** 2).sum()) lim = np.abs(np.abs((w1 * w).sum()) - 1) w = w1 if lim < tol: break n_iter.append(i + 1) W[j, :] = w return W, max(n_iter) def _ica_par(X, tol, g, fun_args, max_iter, w_init): """Parallel FastICA. Used internally by FastICA --main loop """ W = _sym_decorrelation(w_init) del w_init p_ = float(X.shape[1]) for ii in moves.xrange(max_iter): gwtx, g_wtx = g(np.dot(W, X), fun_args) W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W) del gwtx, g_wtx # builtin max, abs are faster than numpy counter parts. lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1)) W = W1 if lim < tol: break else: warnings.warn('FastICA did not converge. Consider increasing ' 'tolerance or the maximum number of iterations.') return W, ii + 1 # Some standard non-linear functions. # XXX: these should be optimized, as they can be a bottleneck. def _logcosh(x, fun_args=None): alpha = fun_args.get('alpha', 1.0) # comment it out? x *= alpha gx = np.tanh(x, x) # apply the tanh inplace g_x = np.empty(x.shape[0]) # XXX compute in chunks to avoid extra allocation for i, gx_i in enumerate(gx): # please don't vectorize. g_x[i] = (alpha * (1 - gx_i ** 2)).mean() return gx, g_x def _exp(x, fun_args): exp = np.exp(-(x ** 2) / 2) gx = x * exp g_x = (1 - x ** 2) * exp return gx, g_x.mean(axis=-1) def _cube(x, fun_args): return x ** 3, (3 * x ** 2).mean(axis=-1) def fastica(X, n_components=None, algorithm="parallel", whiten=True, fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None, random_state=None, return_X_mean=False, compute_sources=True, return_n_iter=False): """Perform Fast Independent Component Analysis. Read more in the :ref:`User Guide <ICA>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. n_components : int, optional Number of components to extract. If None no dimension reduction is performed. algorithm : {'parallel', 'deflation'}, optional Apply a parallel or deflational FASTICA algorithm. whiten : boolean, optional If True perform an initial whitening of the data. If False, the data is assumed to have already been preprocessed: it should be centered, normed and white. Otherwise you will get incorrect results. In this case the parameter n_components will be ignored. fun : string or function, optional. Default: 'logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. Example: def my_g(x): return x ** 3, 3 * x ** 2 fun_args : dictionary, optional Arguments to send to the functional form. If empty or None and if fun='logcosh', fun_args will take value {'alpha' : 1.0} max_iter : int, optional Maximum number of iterations to perform. tol : float, optional A positive scalar giving the tolerance at which the un-mixing matrix is considered to have converged. w_init : (n_components, n_components) array, optional Initial un-mixing array of dimension (n.comp,n.comp). If None (default) then an array of normal r.v.'s is used. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_X_mean : bool, optional If True, X_mean is returned too. compute_sources : bool, optional If False, sources are not computed, but only the rotation matrix. This can save memory when working with big data. Defaults to True. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- K : array, shape (n_components, n_features) | None. If whiten is 'True', K is the pre-whitening matrix that projects data onto the first n_components principal components. If whiten is 'False', K is 'None'. W : array, shape (n_components, n_components) Estimated un-mixing matrix. The mixing matrix can be obtained by:: w = np.dot(W, K.T) A = w.T * (w * w.T).I S : array, shape (n_samples, n_components) | None Estimated source matrix X_mean : array, shape (n_features, ) The mean over features. Returned only if return_X_mean is True. n_iter : int If the algorithm is "deflation", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. This is returned only when return_n_iter is set to `True`. Notes ----- The data matrix X is considered to be a linear combination of non-Gaussian (independent) components i.e. X = AS where columns of S contain the independent components and A is a linear mixing matrix. In short ICA attempts to `un-mix' the data by estimating an un-mixing matrix W where ``S = W K X.`` This implementation was originally made for data of shape [n_features, n_samples]. Now the input is transposed before the algorithm is applied. This makes it slightly faster for Fortran-ordered input. Implemented using FastICA: `A. Hyvarinen and E. Oja, Independent Component Analysis: Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430` """ random_state = check_random_state(random_state) fun_args = {} if fun_args is None else fun_args # make interface compatible with other decompositions # a copy is required only for non whitened data X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES).T alpha = fun_args.get('alpha', 1.0) if not 1 <= alpha <= 2: raise ValueError('alpha must be in [1,2]') if fun == 'logcosh': g = _logcosh elif fun == 'exp': g = _exp elif fun == 'cube': g = _cube elif callable(fun): def g(x, fun_args): return fun(x, **fun_args) else: exc = ValueError if isinstance(fun, six.string_types) else TypeError raise exc("Unknown function %r;" " should be one of 'logcosh', 'exp', 'cube' or callable" % fun) n, p = X.shape if not whiten and n_components is not None: n_components = None warnings.warn('Ignoring n_components with whiten=False.') if n_components is None: n_components = min(n, p) if (n_components > min(n, p)): n_components = min(n, p) warnings.warn('n_components is too large: it will be set to %s' % n_components) if whiten: # Centering the columns (ie the variables) X_mean = X.mean(axis=-1) X -= X_mean[:, np.newaxis] # Whitening and preprocessing by PCA u, d, _ = linalg.svd(X, full_matrices=False) del _ K = (u / d).T[:n_components] # see (6.33) p.140 del u, d X1 = np.dot(K, X) # see (13.6) p.267 Here X1 is white and data # in X has been projected onto a subspace by PCA X1 *= np.sqrt(p) else: # X must be casted to floats to avoid typing issues with numpy # 2.0 and the line below X1 = as_float_array(X, copy=False) # copy has been taken care of if w_init is None: w_init = np.asarray(random_state.normal(size=(n_components, n_components)), dtype=X1.dtype) else: w_init = np.asarray(w_init) if w_init.shape != (n_components, n_components): raise ValueError('w_init has invalid shape -- should be %(shape)s' % {'shape': (n_components, n_components)}) kwargs = {'tol': tol, 'g': g, 'fun_args': fun_args, 'max_iter': max_iter, 'w_init': w_init} if algorithm == 'parallel': W, n_iter = _ica_par(X1, **kwargs) elif algorithm == 'deflation': W, n_iter = _ica_def(X1, **kwargs) else: raise ValueError('Invalid algorithm: must be either `parallel` or' ' `deflation`.') del X1 if whiten: if compute_sources: S = np.dot(np.dot(W, K), X).T else: S = None if return_X_mean: if return_n_iter: return K, W, S, X_mean, n_iter else: return K, W, S, X_mean else: if return_n_iter: return K, W, S, n_iter else: return K, W, S else: if compute_sources: S = np.dot(W, X).T else: S = None if return_X_mean: if return_n_iter: return None, W, S, None, n_iter else: return None, W, S, None else: if return_n_iter: return None, W, S, n_iter else: return None, W, S class FastICA(BaseEstimator, TransformerMixin): """FastICA: a fast algorithm for Independent Component Analysis. Read more in the :ref:`User Guide <ICA>`. Parameters ---------- n_components : int, optional Number of components to use. If none is passed, all are used. algorithm : {'parallel', 'deflation'} Apply parallel or deflational algorithm for FastICA. whiten : boolean, optional If whiten is false, the data is already considered to be whitened, and no whitening is performed. fun : string or function, optional. Default: 'logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. Example: def my_g(x): return x ** 3, 3 * x ** 2 fun_args : dictionary, optional Arguments to send to the functional form. If empty and if fun='logcosh', fun_args will take value {'alpha' : 1.0}. max_iter : int, optional Maximum number of iterations during fit. tol : float, optional Tolerance on update at each iteration. w_init : None of an (n_components, n_components) ndarray The mixing matrix to be used to initialize the algorithm. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : 2D array, shape (n_components, n_features) The unmixing matrix. mixing_ : array, shape (n_features, n_components) The mixing matrix. n_iter_ : int If the algorithm is "deflation", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. Notes ----- Implementation based on `A. Hyvarinen and E. Oja, Independent Component Analysis: Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430` """ def __init__(self, n_components=None, algorithm='parallel', whiten=True, fun='logcosh', fun_args=None, max_iter=200, tol=1e-4, w_init=None, random_state=None): super(FastICA, self).__init__() self.n_components = n_components self.algorithm = algorithm self.whiten = whiten self.fun = fun self.fun_args = fun_args self.max_iter = max_iter self.tol = tol self.w_init = w_init self.random_state = random_state def _fit(self, X, compute_sources=False): """Fit the model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. compute_sources : bool If False, sources are not computes but only the rotation matrix. This can save memory when working with big data. Defaults to False. Returns ------- X_new : array-like, shape (n_samples, n_components) """ fun_args = {} if self.fun_args is None else self.fun_args whitening, unmixing, sources, X_mean, self.n_iter_ = fastica( X=X, n_components=self.n_components, algorithm=self.algorithm, whiten=self.whiten, fun=self.fun, fun_args=fun_args, max_iter=self.max_iter, tol=self.tol, w_init=self.w_init, random_state=self.random_state, return_X_mean=True, compute_sources=compute_sources, return_n_iter=True) if self.whiten: self.components_ = np.dot(unmixing, whitening) self.mean_ = X_mean self.whitening_ = whitening else: self.components_ = unmixing self.mixing_ = linalg.pinv(self.components_) if compute_sources: self.__sources = sources return sources def fit_transform(self, X, y=None): """Fit the model and recover the sources from X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ return self._fit(X, compute_sources=True) def fit(self, X, y=None): """Fit the model to X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored. Returns ------- self """ self._fit(X, compute_sources=False) return self def transform(self, X, y='deprecated', copy=True): """Recover the sources from X (apply the unmixing matrix). Parameters ---------- X : array-like, shape (n_samples, n_features) Data to transform, where n_samples is the number of samples and n_features is the number of features. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool (optional) If False, data passed to fit are overwritten. Defaults to True. Returns ------- X_new : array-like, shape (n_samples, n_components) """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) check_is_fitted(self, 'mixing_') X = check_array(X, copy=copy, dtype=FLOAT_DTYPES) if self.whiten: X -= self.mean_ return np.dot(X, self.components_.T) def inverse_transform(self, X, copy=True): """Transform the sources back to the mixed data (apply mixing matrix). Parameters ---------- X : array-like, shape (n_samples, n_components) Sources, where n_samples is the number of samples and n_components is the number of components. copy : bool (optional) If False, data passed to fit are overwritten. Defaults to True. Returns ------- X_new : array-like, shape (n_samples, n_features) """ check_is_fitted(self, 'mixing_') X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES) X = np.dot(X, self.mixing_.T) if self.whiten: X += self.mean_ return X
19,120
31.298986
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/__init__.py
""" The :mod:`sklearn.decomposition` module includes matrix decomposition algorithms, including among others PCA, NMF or ICA. Most of the algorithms of this module can be regarded as dimensionality reduction techniques. """ from .nmf import NMF, non_negative_factorization from .pca import PCA, RandomizedPCA from .incremental_pca import IncrementalPCA from .kernel_pca import KernelPCA from .sparse_pca import SparsePCA, MiniBatchSparsePCA from .truncated_svd import TruncatedSVD from .fastica_ import FastICA, fastica from .dict_learning import (dict_learning, dict_learning_online, sparse_encode, DictionaryLearning, MiniBatchDictionaryLearning, SparseCoder) from .factor_analysis import FactorAnalysis from ..utils.extmath import randomized_svd from .online_lda import LatentDirichletAllocation __all__ = ['DictionaryLearning', 'FastICA', 'IncrementalPCA', 'KernelPCA', 'MiniBatchDictionaryLearning', 'MiniBatchSparsePCA', 'NMF', 'PCA', 'RandomizedPCA', 'SparseCoder', 'SparsePCA', 'dict_learning', 'dict_learning_online', 'fastica', 'non_negative_factorization', 'randomized_svd', 'sparse_encode', 'FactorAnalysis', 'TruncatedSVD', 'LatentDirichletAllocation']
1,433
33.97561
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_truncated_svd.py
"""Test truncated SVD transformer.""" import numpy as np import scipy.sparse as sp from sklearn.decomposition import TruncatedSVD from sklearn.utils import check_random_state from sklearn.utils.testing import (assert_array_almost_equal, assert_equal, assert_raises, assert_greater, assert_array_less) # Make an X that looks somewhat like a small tf-idf matrix. # XXX newer versions of SciPy have scipy.sparse.rand for this. shape = 60, 55 n_samples, n_features = shape rng = check_random_state(42) X = rng.randint(-100, 20, np.product(shape)).reshape(shape) X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64) X.data[:] = 1 + np.log(X.data) Xdense = X.A def test_algorithms(): svd_a = TruncatedSVD(30, algorithm="arpack") svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42) Xa = svd_a.fit_transform(X)[:, :6] Xr = svd_r.fit_transform(X)[:, :6] assert_array_almost_equal(Xa, Xr, decimal=5) comp_a = np.abs(svd_a.components_) comp_r = np.abs(svd_r.components_) # All elements are equal, but some elements are more equal than others. assert_array_almost_equal(comp_a[:9], comp_r[:9]) assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2) def test_attributes(): for n_components in (10, 25, 41): tsvd = TruncatedSVD(n_components).fit(X) assert_equal(tsvd.n_components, n_components) assert_equal(tsvd.components_.shape, (n_components, n_features)) def test_too_many_components(): for algorithm in ["arpack", "randomized"]: for n_components in (n_features, n_features + 1): tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) assert_raises(ValueError, tsvd.fit, X) def test_sparse_formats(): for fmt in ("array", "csr", "csc", "coo", "lil"): Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)() tsvd = TruncatedSVD(n_components=11) Xtrans = tsvd.fit_transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) Xtrans = tsvd.transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) def test_inverse_transform(): for algo in ("arpack", "randomized"): # We need a lot of components for the reconstruction to be "almost # equal" in all positions. XXX Test means or sums instead? tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo) Xt = tsvd.fit_transform(X) Xinv = tsvd.inverse_transform(Xt) assert_array_almost_equal(Xinv, Xdense, decimal=1) def test_integers(): Xint = X.astype(np.int64) tsvd = TruncatedSVD(n_components=6) Xtrans = tsvd.fit_transform(Xint) assert_equal(Xtrans.shape, (n_samples, tsvd.n_components)) def test_explained_variance(): # Test sparse data svd_a_10_sp = TruncatedSVD(10, algorithm="arpack") svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_sp = TruncatedSVD(20, algorithm="arpack") svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_sp = svd_a_10_sp.fit_transform(X) X_trans_r_10_sp = svd_r_10_sp.fit_transform(X) X_trans_a_20_sp = svd_a_20_sp.fit_transform(X) X_trans_r_20_sp = svd_r_20_sp.fit_transform(X) # Test dense data svd_a_10_de = TruncatedSVD(10, algorithm="arpack") svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_de = TruncatedSVD(20, algorithm="arpack") svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray()) X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray()) X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray()) X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray()) # helper arrays for tests below svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de, svd_r_10_de, svd_a_20_de, svd_r_20_de) svds_trans = ( (svd_a_10_sp, X_trans_a_10_sp), (svd_r_10_sp, X_trans_r_10_sp), (svd_a_20_sp, X_trans_a_20_sp), (svd_r_20_sp, X_trans_r_20_sp), (svd_a_10_de, X_trans_a_10_de), (svd_r_10_de, X_trans_r_10_de), (svd_a_20_de, X_trans_a_20_de), (svd_r_20_de, X_trans_r_20_de), ) svds_10_v_20 = ( (svd_a_10_sp, svd_a_20_sp), (svd_r_10_sp, svd_r_20_sp), (svd_a_10_de, svd_a_20_de), (svd_r_10_de, svd_r_20_de), ) svds_sparse_v_dense = ( (svd_a_10_sp, svd_a_10_de), (svd_a_20_sp, svd_a_20_de), (svd_r_10_sp, svd_r_10_de), (svd_r_20_sp, svd_r_20_de), ) # Assert the 1st component is equal for svd_10, svd_20 in svds_10_v_20: assert_array_almost_equal( svd_10.explained_variance_ratio_, svd_20.explained_variance_ratio_[:10], decimal=5, ) # Assert that 20 components has higher explained variance than 10 for svd_10, svd_20 in svds_10_v_20: assert_greater( svd_20.explained_variance_ratio_.sum(), svd_10.explained_variance_ratio_.sum(), ) # Assert that all the values are greater than 0 for svd in svds: assert_array_less(0.0, svd.explained_variance_ratio_) # Assert that total explained variance is less than 1 for svd in svds: assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) # Compare sparse vs. dense for svd_sparse, svd_dense in svds_sparse_v_dense: assert_array_almost_equal(svd_sparse.explained_variance_ratio_, svd_dense.explained_variance_ratio_) # Test that explained_variance is correct for svd, transformed in svds_trans: total_variance = np.var(X.toarray(), axis=0).sum() variances = np.var(transformed, axis=0) true_explained_variance_ratio = variances / total_variance assert_array_almost_equal( svd.explained_variance_ratio_, true_explained_variance_ratio, ) def test_singular_values(): # Check that the TruncatedSVD output has the correct singular values rng = np.random.RandomState(0) n_samples = 100 n_features = 80 X = rng.randn(n_samples, n_features) apca = TruncatedSVD(n_components=2, algorithm='arpack', random_state=rng).fit(X) rpca = TruncatedSVD(n_components=2, algorithm='arpack', random_state=rng).fit(X) assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12) # Compare to the Frobenius norm X_apca = apca.transform(X) X_rpca = rpca.transform(X) assert_array_almost_equal(np.sum(apca.singular_values_**2.0), np.linalg.norm(X_apca, "fro")**2.0, 12) assert_array_almost_equal(np.sum(rpca.singular_values_**2.0), np.linalg.norm(X_rpca, "fro")**2.0, 12) # Compare to the 2-norms of the score vectors assert_array_almost_equal(apca.singular_values_, np.sqrt(np.sum(X_apca**2.0, axis=0)), 12) assert_array_almost_equal(rpca.singular_values_, np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12) # Set the singular values and see what we get back rng = np.random.RandomState(0) n_samples = 100 n_features = 110 X = rng.randn(n_samples, n_features) apca = TruncatedSVD(n_components=3, algorithm='arpack', random_state=rng) rpca = TruncatedSVD(n_components=3, algorithm='randomized', random_state=rng) X_apca = apca.fit_transform(X) X_rpca = rpca.fit_transform(X) X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0)) X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0)) X_apca[:, 0] *= 3.142 X_apca[:, 1] *= 2.718 X_rpca[:, 0] *= 3.142 X_rpca[:, 1] *= 2.718 X_hat_apca = np.dot(X_apca, apca.components_) X_hat_rpca = np.dot(X_rpca, rpca.components_) apca.fit(X_hat_apca) rpca.fit(X_hat_rpca) assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
8,261
36.049327
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_nmf.py
import numpy as np import scipy.sparse as sp import numbers from scipy import linalg from sklearn.decomposition import NMF, non_negative_factorization from sklearn.decomposition import nmf # For testing internals from scipy.sparse import csc_matrix from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raise_message, assert_no_warnings from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import ignore_warnings from sklearn.utils.extmath import squared_norm from sklearn.base import clone from sklearn.exceptions import ConvergenceWarning def test_initialize_nn_output(): # Test that initialization does not return negative values rng = np.random.mtrand.RandomState(42) data = np.abs(rng.randn(10, 10)) for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'): W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) assert_false((W < 0).any() or (H < 0).any()) def test_parameter_checking(): A = np.ones((2, 2)) name = 'spam' msg = "Invalid solver parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, NMF(solver=name).fit, A) msg = "Invalid init parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, NMF(init=name).fit, A) msg = "Invalid beta_loss parameter: got 'spam' instead of one" assert_raise_message(ValueError, msg, NMF(solver='mu', beta_loss=name).fit, A) msg = "Invalid beta_loss parameter: solver 'cd' does not handle " msg += "beta_loss = 1.0" assert_raise_message(ValueError, msg, NMF(solver='cd', beta_loss=1.0).fit, A) msg = "Negative values in data passed to" assert_raise_message(ValueError, msg, NMF().fit, -A) assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A, 2, 'nndsvd') clf = NMF(2, tol=0.1).fit(A) assert_raise_message(ValueError, msg, clf.transform, -A) def test_initialize_close(): # Test NNDSVD error # Test that _initialize_nmf error is less than the standard deviation of # the entries in the matrix. rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) W, H = nmf._initialize_nmf(A, 10, init='nndsvd') error = linalg.norm(np.dot(W, H) - A) sdev = linalg.norm(A - A.mean()) assert_true(error <= sdev) def test_initialize_variants(): # Test NNDSVD variants correctness # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic # 'nndsvd' only where the basic version has zeros. rng = np.random.mtrand.RandomState(42) data = np.abs(rng.randn(10, 10)) W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd') Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda') War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar', random_state=0) for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): assert_almost_equal(evl[ref != 0], ref[ref != 0]) # ignore UserWarning raised when both solver='mu' and init='nndsvd' @ignore_warnings(category=UserWarning) def test_nmf_fit_nn_output(): # Test that the decomposition does not contain negative values A = np.c_[5 * np.ones(5) - np.arange(1, 6), 5 * np.ones(5) + np.arange(1, 6)] for solver in ('cd', 'mu'): for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'): model = NMF(n_components=2, solver=solver, init=init, random_state=0) transf = model.fit_transform(A) assert_false((model.components_ < 0).any() or (transf < 0).any()) def test_nmf_fit_close(): rng = np.random.mtrand.RandomState(42) # Test that the fit is not too far away for solver in ('cd', 'mu'): pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0, max_iter=600) X = np.abs(rng.randn(6, 5)) assert_less(pnmf.fit(X).reconstruction_err_, 0.1) def test_nmf_transform(): # Test that NMF.transform returns close values rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(6, 5)) for solver in ['cd', 'mu']: m = NMF(solver=solver, n_components=3, init='random', random_state=0, tol=1e-5) ft = m.fit_transform(A) t = m.transform(A) assert_array_almost_equal(ft, t, decimal=2) def test_nmf_transform_custom_init(): # Smoke test that checks if NMF.transform works with custom initialization random_state = np.random.RandomState(0) A = np.abs(random_state.randn(6, 5)) n_components = 4 avg = np.sqrt(A.mean() / n_components) H_init = np.abs(avg * random_state.randn(n_components, 5)) W_init = np.abs(avg * random_state.randn(6, n_components)) m = NMF(solver='cd', n_components=n_components, init='custom', random_state=0) m.fit_transform(A, W=W_init, H=H_init) m.transform(A) def test_nmf_inverse_transform(): # Test that NMF.inverse_transform returns close values random_state = np.random.RandomState(0) A = np.abs(random_state.randn(6, 4)) for solver in ('cd', 'mu'): m = NMF(solver=solver, n_components=4, init='random', random_state=0, max_iter=1000) ft = m.fit_transform(A) A_new = m.inverse_transform(ft) assert_array_almost_equal(A, A_new, decimal=2) def test_n_components_greater_n_features(): # Smoke test for the case of more components than features. rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(30, 10)) NMF(n_components=15, random_state=0, tol=1e-2).fit(A) def test_nmf_sparse_input(): # Test that sparse matrices are accepted as input from scipy.sparse import csc_matrix rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 A_sparse = csc_matrix(A) for solver in ('cd', 'mu'): est1 = NMF(solver=solver, n_components=5, init='random', random_state=0, tol=1e-2) est2 = clone(est1) W1 = est1.fit_transform(A) W2 = est2.fit_transform(A_sparse) H1 = est1.components_ H2 = est2.components_ assert_array_almost_equal(W1, W2) assert_array_almost_equal(H1, H2) def test_nmf_sparse_transform(): # Test that transform works on sparse data. Issue #2124 rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(3, 2)) A[1, 1] = 0 A = csc_matrix(A) for solver in ('cd', 'mu'): model = NMF(solver=solver, random_state=0, n_components=2, max_iter=400) A_fit_tr = model.fit_transform(A) A_tr = model.transform(A) assert_array_almost_equal(A_fit_tr, A_tr, decimal=1) def test_non_negative_factorization_consistency(): # Test that the function is called in the same way, either directly # or through the NMF class rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 for solver in ('cd', 'mu'): W_nmf, H, _ = non_negative_factorization( A, solver=solver, random_state=1, tol=1e-2) W_nmf_2, _, _ = non_negative_factorization( A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2) model_class = NMF(solver=solver, random_state=1, tol=1e-2) W_cls = model_class.fit_transform(A) W_cls_2 = model_class.transform(A) assert_array_almost_equal(W_nmf, W_cls, decimal=10) assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10) def test_non_negative_factorization_checking(): A = np.ones((2, 2)) # Test parameters checking is public function nnmf = non_negative_factorization assert_no_warnings(nnmf, A, A, A, np.int64(1)) msg = ("Number of components must be a positive integer; " "got (n_components=1.5)") assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5) msg = ("Number of components must be a positive integer; " "got (n_components='2')") assert_raise_message(ValueError, msg, nnmf, A, A, A, '2') msg = "Negative values in data passed to NMF (input H)" assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom') msg = "Negative values in data passed to NMF (input W)" assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom') msg = "Array passed to NMF (input H) is full of zeros" assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom') msg = "Invalid regularization parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True, 'cd', 2., 1e-4, 200, 0., 0., 'spam') def _beta_divergence_dense(X, W, H, beta): """Compute the beta-divergence of X and W.H for dense array only. Used as a reference for testing nmf._beta_divergence. """ if isinstance(X, numbers.Number): W = np.array([[W]]) H = np.array([[H]]) X = np.array([[X]]) WH = np.dot(W, H) if beta == 2: return squared_norm(X - WH) / 2 WH_Xnonzero = WH[X != 0] X_nonzero = X[X != 0] np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero) if beta == 1: res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero)) res += WH.sum() - X.sum() elif beta == 0: div = X_nonzero / WH_Xnonzero res = np.sum(div) - X.size - np.sum(np.log(div)) else: res = (X_nonzero ** beta).sum() res += (beta - 1) * (WH ** beta).sum() res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum() res /= beta * (beta - 1) return res def test_beta_divergence(): # Compare _beta_divergence with the reference _beta_divergence_dense n_samples = 20 n_features = 10 n_components = 5 beta_losses = [0., 0.5, 1., 1.5, 2.] # initialization rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) X[X < 0] = 0. X_csr = sp.csr_matrix(X) W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42) for beta in beta_losses: ref = _beta_divergence_dense(X, W, H, beta) loss = nmf._beta_divergence(X, W, H, beta) loss_csr = nmf._beta_divergence(X_csr, W, H, beta) assert_almost_equal(ref, loss, decimal=7) assert_almost_equal(ref, loss_csr, decimal=7) def test_special_sparse_dot(): # Test the function that computes np.dot(W, H), only where X is non zero. n_samples = 10 n_features = 5 n_components = 3 rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) X[X < 0] = 0. X_csr = sp.csr_matrix(X) W = np.abs(rng.randn(n_samples, n_components)) H = np.abs(rng.randn(n_components, n_features)) WH_safe = nmf._special_sparse_dot(W, H, X_csr) WH = nmf._special_sparse_dot(W, H, X) # test that both results have same values, in X_csr nonzero elements ii, jj = X_csr.nonzero() WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel() assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10) # test that WH_safe and X_csr have the same sparse structure assert_array_equal(WH_safe.indices, X_csr.indices) assert_array_equal(WH_safe.indptr, X_csr.indptr) assert_array_equal(WH_safe.shape, X_csr.shape) @ignore_warnings(category=ConvergenceWarning) def test_nmf_multiplicative_update_sparse(): # Compare sparse and dense input in multiplicative update NMF # Also test continuity of the results with respect to beta_loss parameter n_samples = 20 n_features = 10 n_components = 5 alpha = 0.1 l1_ratio = 0.5 n_iter = 20 # initialization rng = np.random.mtrand.RandomState(1337) X = rng.randn(n_samples, n_features) X = np.abs(X) X_csr = sp.csr_matrix(X) W0, H0 = nmf._initialize_nmf(X, n_components, init='random', random_state=42) for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5): # Reference with dense array X W, H = W0.copy(), H0.copy() W1, H1, _ = non_negative_factorization( X, W, H, n_components, init='custom', update_H=True, solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha, l1_ratio=l1_ratio, regularization='both', random_state=42) # Compare with sparse X W, H = W0.copy(), H0.copy() W2, H2, _ = non_negative_factorization( X_csr, W, H, n_components, init='custom', update_H=True, solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha, l1_ratio=l1_ratio, regularization='both', random_state=42) assert_array_almost_equal(W1, W2, decimal=7) assert_array_almost_equal(H1, H2, decimal=7) # Compare with almost same beta_loss, since some values have a specific # behavior, but the results should be continuous w.r.t beta_loss beta_loss -= 1.e-5 W, H = W0.copy(), H0.copy() W3, H3, _ = non_negative_factorization( X_csr, W, H, n_components, init='custom', update_H=True, solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha, l1_ratio=l1_ratio, regularization='both', random_state=42) assert_array_almost_equal(W1, W3, decimal=4) assert_array_almost_equal(H1, H3, decimal=4) def test_nmf_negative_beta_loss(): # Test that an error is raised if beta_loss < 0 and X contains zeros. # Test that the output has not NaN values when the input contains zeros. n_samples = 6 n_features = 5 n_components = 3 rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) X[X < 0] = 0 X_csr = sp.csr_matrix(X) def _assert_nmf_no_nan(X, beta_loss): W, H, _ = non_negative_factorization( X, n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000) assert_false(np.any(np.isnan(W))) assert_false(np.any(np.isnan(H))) msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." for beta_loss in (-0.6, 0.): assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss) _assert_nmf_no_nan(X + 1e-9, beta_loss) for beta_loss in (0.2, 1., 1.2, 2., 2.5): _assert_nmf_no_nan(X, beta_loss) _assert_nmf_no_nan(X_csr, beta_loss) def test_nmf_regularization(): # Test the effect of L1 and L2 regularizations n_samples = 6 n_features = 5 n_components = 3 rng = np.random.mtrand.RandomState(42) X = np.abs(rng.randn(n_samples, n_features)) # L1 regularization should increase the number of zeros l1_ratio = 1. for solver in ['cd', 'mu']: regul = nmf.NMF(n_components=n_components, solver=solver, alpha=0.5, l1_ratio=l1_ratio, random_state=42) model = nmf.NMF(n_components=n_components, solver=solver, alpha=0., l1_ratio=l1_ratio, random_state=42) W_regul = regul.fit_transform(X) W_model = model.fit_transform(X) H_regul = regul.components_ H_model = model.components_ W_regul_n_zeros = W_regul[W_regul == 0].size W_model_n_zeros = W_model[W_model == 0].size H_regul_n_zeros = H_regul[H_regul == 0].size H_model_n_zeros = H_model[H_model == 0].size assert_greater(W_regul_n_zeros, W_model_n_zeros) assert_greater(H_regul_n_zeros, H_model_n_zeros) # L2 regularization should decrease the mean of the coefficients l1_ratio = 0. for solver in ['cd', 'mu']: regul = nmf.NMF(n_components=n_components, solver=solver, alpha=0.5, l1_ratio=l1_ratio, random_state=42) model = nmf.NMF(n_components=n_components, solver=solver, alpha=0., l1_ratio=l1_ratio, random_state=42) W_regul = regul.fit_transform(X) W_model = model.fit_transform(X) H_regul = regul.components_ H_model = model.components_ assert_greater(W_model.mean(), W_regul.mean()) assert_greater(H_model.mean(), H_regul.mean()) @ignore_warnings(category=ConvergenceWarning) def test_nmf_decreasing(): # test that the objective function is decreasing at each iteration n_samples = 20 n_features = 15 n_components = 10 alpha = 0.1 l1_ratio = 0.5 tol = 0. # initialization rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) np.abs(X, X) W0, H0 = nmf._initialize_nmf(X, n_components, init='random', random_state=42) for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5): for solver in ('cd', 'mu'): if solver != 'mu' and beta_loss != 2: # not implemented continue W, H = W0.copy(), H0.copy() previous_loss = None for _ in range(30): # one more iteration starting from the previous results W, H, _ = non_negative_factorization( X, W, H, beta_loss=beta_loss, init='custom', n_components=n_components, max_iter=1, alpha=alpha, solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0, regularization='both', random_state=0, update_H=True) loss = nmf._beta_divergence(X, W, H, beta_loss) if previous_loss is not None: assert_greater(previous_loss, loss) previous_loss = loss
17,922
36.184647
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_incremental_pca.py
"""Tests for Incremental PCA.""" import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn import datasets from sklearn.decomposition import PCA, IncrementalPCA iris = datasets.load_iris() def test_incremental_pca(): # Incremental PCA on dense arrays. X = iris.data batch_size = X.shape[0] // 3 ipca = IncrementalPCA(n_components=2, batch_size=batch_size) pca = PCA(n_components=2) pca.fit_transform(X) X_transformed = ipca.fit_transform(X) np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2)) assert_almost_equal(ipca.explained_variance_ratio_.sum(), pca.explained_variance_ratio_.sum(), 1) for n_components in [1, 2, X.shape[1]]: ipca = IncrementalPCA(n_components, batch_size=batch_size) ipca.fit(X) cov = ipca.get_covariance() precision = ipca.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1])) def test_incremental_pca_check_projection(): # Test that the projection of data is correct. rng = np.random.RandomState(1999) n, p = 100, 3 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) # Get the reconstruction of the generated data X # Note that Xt has the same "components" as X, just separated # This is what we want to ensure is recreated correctly Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) # Normalize Yt /= np.sqrt((Yt ** 2).sum()) # Make sure that the first element of Yt is ~1, this means # the reconstruction worked as expected assert_almost_equal(np.abs(Yt[0][0]), 1., 1) def test_incremental_pca_inverse(): # Test that the projection of data can be inverted. rng = np.random.RandomState(1999) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X) Y = ipca.transform(X) Y_inverse = ipca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) def test_incremental_pca_validation(): # Test that n_components is >=1 and <= n_features. X = [[0, 1], [1, 0]] for n_components in [-1, 0, .99, 3]: assert_raises(ValueError, IncrementalPCA(n_components, batch_size=10).fit, X) def test_incremental_pca_set_params(): # Test that components_ sign is stable over batch sizes. rng = np.random.RandomState(1999) n_samples = 100 n_features = 20 X = rng.randn(n_samples, n_features) X2 = rng.randn(n_samples, n_features) X3 = rng.randn(n_samples, n_features) ipca = IncrementalPCA(n_components=20) ipca.fit(X) # Decreasing number of components ipca.set_params(n_components=10) assert_raises(ValueError, ipca.partial_fit, X2) # Increasing number of components ipca.set_params(n_components=15) assert_raises(ValueError, ipca.partial_fit, X3) # Returning to original setting ipca.set_params(n_components=20) ipca.partial_fit(X) def test_incremental_pca_num_features_change(): # Test that changing n_components will raise an error. rng = np.random.RandomState(1999) n_samples = 100 X = rng.randn(n_samples, 20) X2 = rng.randn(n_samples, 50) ipca = IncrementalPCA(n_components=None) ipca.fit(X) assert_raises(ValueError, ipca.partial_fit, X2) def test_incremental_pca_batch_signs(): # Test that components_ sign is stable over batch sizes. rng = np.random.RandomState(1999) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) all_components = [] batch_sizes = np.arange(10, 20) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in zip(all_components[:-1], all_components[1:]): assert_almost_equal(np.sign(i), np.sign(j), decimal=6) def test_incremental_pca_batch_values(): # Test that components_ values are stable over batch sizes. rng = np.random.RandomState(1999) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) all_components = [] batch_sizes = np.arange(20, 40, 3) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in zip(all_components[:-1], all_components[1:]): assert_almost_equal(i, j, decimal=1) def test_incremental_pca_partial_fit(): # Test that fit and partial_fit get equivalent results. rng = np.random.RandomState(1999) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) batch_size = 10 ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) pipca = IncrementalPCA(n_components=2, batch_size=batch_size) # Add one to make sure endpoint is included batch_itr = np.arange(0, n + 1, batch_size) for i, j in zip(batch_itr[:-1], batch_itr[1:]): pipca.partial_fit(X[i:j, :]) assert_almost_equal(ipca.components_, pipca.components_, decimal=3) def test_incremental_pca_against_pca_iris(): # Test that IncrementalPCA and PCA are approximate (to a sign flip). X = iris.data Y_pca = PCA(n_components=2).fit_transform(X) Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X) assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) def test_incremental_pca_against_pca_random_data(): # Test that IncrementalPCA and PCA are approximate (to a sign flip). rng = np.random.RandomState(1999) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features) Y_pca = PCA(n_components=3).fit_transform(X) Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X) assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) def test_explained_variances(): # Test that PCA and IncrementalPCA calculations match X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0., effective_rank=10, random_state=1999) prec = 3 n_samples, n_features = X.shape for nc in [None, 99]: pca = PCA(n_components=nc).fit(X) ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X) assert_almost_equal(pca.explained_variance_, ipca.explained_variance_, decimal=prec) assert_almost_equal(pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec) assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec) def test_singular_values(): # Check that the IncrementalPCA output has the correct singular values rng = np.random.RandomState(0) n_samples = 1000 n_features = 100 X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng) pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X) ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X) assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2) # Compare to the Frobenius norm X_pca = pca.transform(X) X_ipca = ipca.transform(X) assert_array_almost_equal(np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro")**2.0, 12) assert_array_almost_equal(np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro")**2.0, 2) # Compare to the 2-norms of the score vectors assert_array_almost_equal(pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12) assert_array_almost_equal(ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2) # Set the singular values and see what we get back rng = np.random.RandomState(0) n_samples = 100 n_features = 110 X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng) pca = PCA(n_components=3, svd_solver='full', random_state=rng) ipca = IncrementalPCA(n_components=3, batch_size=100) X_pca = pca.fit_transform(X) X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) X_pca[:, 0] *= 3.142 X_pca[:, 1] *= 2.718 X_hat = np.dot(X_pca, pca.components_) pca.fit(X_hat) ipca.fit(X_hat) assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14) def test_whitening(): # Test that PCA and IncrementalPCA transforms match to sign flip. X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0., effective_rank=2, random_state=1999) prec = 3 n_samples, n_features = X.shape for nc in [None, 9]: pca = PCA(whiten=True, n_components=nc).fit(X) ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X) Xt_pca = pca.transform(X) Xt_ipca = ipca.transform(X) assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec) Xinv_ipca = ipca.inverse_transform(Xt_ipca) Xinv_pca = pca.inverse_transform(Xt_pca) assert_almost_equal(X, Xinv_ipca, decimal=prec) assert_almost_equal(X, Xinv_pca, decimal=prec) assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
10,272
36.221014
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_sparse_pca.py
# Author: Vlad Niculae # License: BSD 3 clause import sys import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import if_safe_multiprocessing_with_blas from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA from sklearn.utils import check_random_state def generate_toy_data(n_components, n_samples, image_size, random_state=None): n_features = image_size[0] * image_size[1] rng = check_random_state(random_state) U = rng.randn(n_samples, n_components) V = rng.randn(n_components, n_features) centers = [(3, 3), (6, 7), (8, 1)] sz = [1, 2, 1] for k in range(n_components): img = np.zeros(image_size) xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k] ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k] img[xmin:xmax][:, ymin:ymax] = 1.0 V[k, :] = img.ravel() # Y is defined by : Y = UV + noise Y = np.dot(U, V) Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise return Y, U, V # SparsePCA can be a bit slow. To avoid having test times go up, we # test different aspects of the code in the same test def test_correct_shapes(): rng = np.random.RandomState(0) X = rng.randn(12, 10) spca = SparsePCA(n_components=8, random_state=rng) U = spca.fit_transform(X) assert_equal(spca.components_.shape, (8, 10)) assert_equal(U.shape, (12, 8)) # test overcomplete decomposition spca = SparsePCA(n_components=13, random_state=rng) U = spca.fit_transform(X) assert_equal(spca.components_.shape, (13, 10)) assert_equal(U.shape, (12, 13)) def test_fit_transform(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) # Test that CD gives similar results spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0, alpha=alpha) spca_lasso.fit(Y) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) # Test that deprecated ridge_alpha parameter throws warning warning_msg = "The ridge_alpha parameter on transform()" assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform, Y, ridge_alpha=0.01) assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform, Y, ridge_alpha=None) @if_safe_multiprocessing_with_blas def test_fit_transform_parallel(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) U1 = spca_lars.transform(Y) # Test multiple CPUs spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha, random_state=0).fit(Y) U2 = spca.transform(Y) assert_true(not np.all(spca_lars.components_ == 0)) assert_array_almost_equal(U1, U2) def test_transform_nan(): # Test that SparsePCA won't return NaN when there is 0 feature in all # samples. rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array Y[:, 0] = 0 estimator = SparsePCA(n_components=8) assert_false(np.any(np.isnan(estimator.fit_transform(Y)))) def test_fit_transform_tall(): rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array spca_lars = SparsePCA(n_components=3, method='lars', random_state=rng) U1 = spca_lars.fit_transform(Y) spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng) U2 = spca_lasso.fit(Y).transform(Y) assert_array_almost_equal(U1, U2) def test_initialization(): rng = np.random.RandomState(0) U_init = rng.randn(5, 3) V_init = rng.randn(3, 4) model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng) model.fit(rng.randn(5, 4)) assert_array_equal(model.components_, V_init) def test_mini_batch_correct_shapes(): rng = np.random.RandomState(0) X = rng.randn(12, 10) pca = MiniBatchSparsePCA(n_components=8, random_state=rng) U = pca.fit_transform(X) assert_equal(pca.components_.shape, (8, 10)) assert_equal(U.shape, (12, 8)) # test overcomplete decomposition pca = MiniBatchSparsePCA(n_components=13, random_state=rng) U = pca.fit_transform(X) assert_equal(pca.components_.shape, (13, 10)) assert_equal(U.shape, (12, 13)) def test_mini_batch_fit_transform(): raise SkipTest("skipping mini_batch_fit_transform.") alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y) U1 = spca_lars.transform(Y) # Test multiple CPUs if sys.platform == 'win32': # fake parallelism for win32 import sklearn.externals.joblib.parallel as joblib_par _mp = joblib_par.multiprocessing joblib_par.multiprocessing = None try: U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0).fit(Y).transform(Y) finally: joblib_par.multiprocessing = _mp else: # we can efficiently use parallelism U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0).fit(Y).transform(Y) assert_true(not np.all(spca_lars.components_ == 0)) assert_array_almost_equal(U1, U2) # Test that CD gives similar results spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha, random_state=0).fit(Y) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
6,459
36.777778
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_factor_analysis.py
# Author: Christian Osendorfer <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD3 import numpy as np from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.exceptions import ConvergenceWarning from sklearn.decomposition import FactorAnalysis from sklearn.utils.testing import ignore_warnings # Ignore warnings from switching to more power iterations in randomized_svd @ignore_warnings def test_factor_analysis(): # Test FactorAnalysis ability to recover the data covariance structure rng = np.random.RandomState(0) n_samples, n_features, n_components = 20, 5, 3 # Some random settings for the generative model W = rng.randn(n_components, n_features) # latent variable of dim 3, 20 of it h = rng.randn(n_samples, n_components) # using gamma to model different noise variance # per component noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features) # generate observations # wlog, mean is 0 X = np.dot(h, W) + noise assert_raises(ValueError, FactorAnalysis, svd_method='foo') fa_fail = FactorAnalysis() fa_fail.svd_method = 'foo' assert_raises(ValueError, fa_fail.fit, X) fas = [] for method in ['randomized', 'lapack']: fa = FactorAnalysis(n_components=n_components, svd_method=method) fa.fit(X) fas.append(fa) X_t = fa.transform(X) assert_equal(X_t.shape, (n_samples, n_components)) assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum()) assert_almost_equal(fa.score_samples(X).mean(), fa.score(X)) diff = np.all(np.diff(fa.loglike_)) assert_greater(diff, 0., 'Log likelihood dif not increase') # Sample Covariance scov = np.cov(X, rowvar=0., bias=1.) # Model Covariance mcov = fa.get_covariance() diff = np.sum(np.abs(scov - mcov)) / W.size assert_less(diff, 0.1, "Mean absolute difference is %f" % diff) fa = FactorAnalysis(n_components=n_components, noise_variance_init=np.ones(n_features)) assert_raises(ValueError, fa.fit, X[:, :2]) f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal fa1, fa2 = fas for attr in ['loglike_', 'components_', 'noise_variance_']: assert_almost_equal(f(fa1, attr), f(fa2, attr)) fa1.max_iter = 1 fa1.verbose = True assert_warns(ConvergenceWarning, fa1.fit, X) # Test get_covariance and get_precision with n_components == n_features # with n_components < n_features and with n_components == 0 for n_components in [0, 2, X.shape[1]]: fa.n_components = n_components fa.fit(X) cov = fa.get_covariance() precision = fa.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
3,203
36.255814
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_online_lda.py
import sys import numpy as np from scipy.linalg import block_diag from scipy.sparse import csr_matrix from scipy.special import psi from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d, _dirichlet_expectation_2d) from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import if_safe_multiprocessing_with_blas from sklearn.utils.testing import assert_warns from sklearn.exceptions import NotFittedError from sklearn.externals.six.moves import xrange from sklearn.externals.six import StringIO def _build_sparse_mtx(): # Create 3 topics and each topic has 3 distinct words. # (Each word only belongs to a single topic.) n_components = 3 block = n_components * np.ones((3, 3)) blocks = [block] * n_components X = block_diag(*blocks) X = csr_matrix(X) return (n_components, X) def test_lda_default_prior_params(): # default prior parameter should be `1 / topics` # and verbose params should not affect result n_components, X = _build_sparse_mtx() prior = 1. / n_components lda_1 = LatentDirichletAllocation(n_components=n_components, doc_topic_prior=prior, topic_word_prior=prior, random_state=0) lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0) topic_distr_1 = lda_1.fit_transform(X) topic_distr_2 = lda_2.fit_transform(X) assert_almost_equal(topic_distr_1, topic_distr_2) def test_lda_fit_batch(): # Test LDA batch learning_offset (`fit` method with 'batch' learning) rng = np.random.RandomState(0) n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, evaluate_every=1, learning_method='batch', random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_fit_online(): # Test LDA online learning (`fit` method with 'online' learning) rng = np.random.RandomState(0) n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, learning_offset=10., evaluate_every=1, learning_method='online', random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_partial_fit(): # Test LDA online learning (`partial_fit` method) # (same as test_lda_batch) rng = np.random.RandomState(0) n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, learning_offset=10., total_samples=100, random_state=rng) for i in xrange(3): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_dense_input(): # Test LDA with dense input. rng = np.random.RandomState(0) n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, learning_method='batch', random_state=rng) lda.fit(X.toarray()) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_transform(): # Test LDA transform. # Transform result cannot be negative and should be normalized rng = np.random.RandomState(0) X = rng.randint(5, size=(20, 10)) n_components = 3 lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) X_trans = lda.fit_transform(X) assert_true((X_trans > 0.0).any()) assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0])) def test_lda_fit_transform(): # Test LDA fit_transform & transform # fit_transform and transform result should be the same for method in ('online', 'batch'): rng = np.random.RandomState(0) X = rng.randint(10, size=(50, 20)) lda = LatentDirichletAllocation(n_components=5, learning_method=method, random_state=rng) X_fit = lda.fit_transform(X) X_trans = lda.transform(X) assert_array_almost_equal(X_fit, X_trans, 4) def test_lda_partial_fit_dim_mismatch(): # test `n_features` mismatch in `partial_fit` rng = np.random.RandomState(0) n_components = rng.randint(3, 6) n_col = rng.randint(6, 10) X_1 = np.random.randint(4, size=(10, n_col)) X_2 = np.random.randint(4, size=(10, n_col + 1)) lda = LatentDirichletAllocation(n_components=n_components, learning_offset=5., total_samples=20, random_state=rng) lda.partial_fit(X_1) assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2) def test_invalid_params(): # test `_check_params` method X = np.ones((5, 10)) invalid_models = ( ('n_components', LatentDirichletAllocation(n_components=0)), ('learning_method', LatentDirichletAllocation(learning_method='unknown')), ('total_samples', LatentDirichletAllocation(total_samples=0)), ('learning_offset', LatentDirichletAllocation(learning_offset=-1)), ) for param, model in invalid_models: regex = r"^Invalid %r parameter" % param assert_raises_regexp(ValueError, regex, model.fit, X) def test_lda_negative_input(): # test pass dense matrix with sparse negative input. X = -np.ones((5, 10)) lda = LatentDirichletAllocation() regex = r"^Negative values in data passed" assert_raises_regexp(ValueError, regex, lda.fit, X) def test_lda_no_component_error(): # test `transform` and `perplexity` before `fit` rng = np.random.RandomState(0) X = rng.randint(4, size=(20, 10)) lda = LatentDirichletAllocation() regex = r"^no 'components_' attribute" assert_raises_regexp(NotFittedError, regex, lda.transform, X) assert_raises_regexp(NotFittedError, regex, lda.perplexity, X) def test_lda_transform_mismatch(): # test `n_features` mismatch in partial_fit and transform rng = np.random.RandomState(0) X = rng.randint(4, size=(20, 10)) X_2 = rng.randint(4, size=(10, 8)) n_components = rng.randint(3, 6) lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) lda.partial_fit(X) assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2) @if_safe_multiprocessing_with_blas def test_lda_multi_jobs(): n_components, X = _build_sparse_mtx() # Test LDA batch training with multi CPU for method in ('online', 'batch'): rng = np.random.RandomState(0) lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2, learning_method=method, evaluate_every=1, random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) @if_safe_multiprocessing_with_blas def test_lda_partial_fit_multi_jobs(): # Test LDA online training with multi CPU rng = np.random.RandomState(0) n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2, learning_offset=5., total_samples=30, random_state=rng) for i in range(2): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_preplexity_mismatch(): # test dimension mismatch in `perplexity` method rng = np.random.RandomState(0) n_components = rng.randint(3, 6) n_samples = rng.randint(6, 10) X = np.random.randint(4, size=(n_samples, 10)) lda = LatentDirichletAllocation(n_components=n_components, learning_offset=5., total_samples=20, random_state=rng) lda.fit(X) # invalid samples invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components)) assert_raises_regexp(ValueError, r'Number of samples', lda._perplexity_precomp_distr, X, invalid_n_samples) # invalid topic number invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1)) assert_raises_regexp(ValueError, r'Number of topics', lda._perplexity_precomp_distr, X, invalid_n_components) def test_lda_perplexity(): # Test LDA perplexity for batch training # perplexity should be lower after each iteration n_components, X = _build_sparse_mtx() for method in ('online', 'batch'): lda_1 = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method=method, total_samples=100, random_state=0) lda_2 = LatentDirichletAllocation(n_components=n_components, max_iter=10, learning_method=method, total_samples=100, random_state=0) lda_1.fit(X) perp_1 = lda_1.perplexity(X, sub_sampling=False) lda_2.fit(X) perp_2 = lda_2.perplexity(X, sub_sampling=False) assert_greater_equal(perp_1, perp_2) perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True) perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True) assert_greater_equal(perp_1_subsampling, perp_2_subsampling) def test_lda_score(): # Test LDA score for batch training # score should be higher after each iteration n_components, X = _build_sparse_mtx() for method in ('online', 'batch'): lda_1 = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method=method, total_samples=100, random_state=0) lda_2 = LatentDirichletAllocation(n_components=n_components, max_iter=10, learning_method=method, total_samples=100, random_state=0) lda_1.fit_transform(X) score_1 = lda_1.score(X) lda_2.fit_transform(X) score_2 = lda_2.score(X) assert_greater_equal(score_2, score_1) def test_perplexity_input_format(): # Test LDA perplexity for sparse and dense input # score should be the same for both dense and sparse input n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method='batch', total_samples=100, random_state=0) lda.fit(X) perp_1 = lda.perplexity(X) perp_2 = lda.perplexity(X.toarray()) assert_almost_equal(perp_1, perp_2) def test_lda_score_perplexity(): # Test the relationship between LDA score and perplexity n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, max_iter=10, random_state=0) lda.fit(X) perplexity_1 = lda.perplexity(X, sub_sampling=False) score = lda.score(X) perplexity_2 = np.exp(-1. * (score / np.sum(X.data))) assert_almost_equal(perplexity_1, perplexity_2) def test_lda_fit_perplexity(): # Test that the perplexity computed during fit is consistent with what is # returned by the perplexity method n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method='batch', random_state=0, evaluate_every=1) lda.fit(X) # Perplexity computed at end of fit method perplexity1 = lda.bound_ # Result of perplexity method on the train set perplexity2 = lda.perplexity(X) assert_almost_equal(perplexity1, perplexity2) def test_doc_topic_distr_deprecation(): # Test that the appropriate warning message is displayed when a user # attempts to pass the doc_topic_distr argument to the perplexity method n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method='batch', total_samples=100, random_state=0) distr1 = lda.fit_transform(X) distr2 = None assert_warns(DeprecationWarning, lda.perplexity, X, distr1) assert_warns(DeprecationWarning, lda.perplexity, X, distr2) def test_lda_empty_docs(): """Test LDA on empty document (all-zero rows).""" Z = np.zeros((5, 4)) for X in [Z, csr_matrix(Z)]: lda = LatentDirichletAllocation(max_iter=750).fit(X) assert_almost_equal(lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])) def test_dirichlet_expectation(): """Test Cython version of Dirichlet expectation calculation.""" x = np.logspace(-100, 10, 10000) expectation = np.empty_like(x) _dirichlet_expectation_1d(x, 0, expectation) assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19) x = x.reshape(100, 100) assert_allclose(_dirichlet_expectation_2d(x), psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]), rtol=1e-11, atol=3e-9) def check_verbosity(verbose, evaluate_every, expected_lines, expected_perplexities): n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_components=n_components, max_iter=3, learning_method='batch', verbose=verbose, evaluate_every=evaluate_every, random_state=0) out = StringIO() old_out, sys.stdout = sys.stdout, out try: lda.fit(X) finally: sys.stdout = old_out n_lines = out.getvalue().count('\n') n_perplexity = out.getvalue().count('perplexity') assert_equal(expected_lines, n_lines) assert_equal(expected_perplexities, n_perplexity) def test_verbosity(): for verbose, evaluate_every, expected_lines, expected_perplexities in [ (False, 1, 0, 0), (False, 0, 0, 0), (True, 0, 3, 0), (True, 1, 3, 3), (True, 2, 3, 1), ]: yield (check_verbosity, verbose, evaluate_every, expected_lines, expected_perplexities) def test_lda_n_topics_deprecation(): n_components, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=10, learning_method='batch') assert_warns(DeprecationWarning, lda.fit, X)
16,445
38.064133
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_kernel_pca.py
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import (assert_array_almost_equal, assert_less, assert_equal, assert_not_equal, assert_raises) from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles from sklearn.linear_model import Perceptron from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.metrics.pairwise import rbf_kernel def test_kernel_pca(): rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) X_pred = rng.random_sample((2, 4)) def histogram(x, y, **kwargs): # Histogram kernel implemented as a callable. assert_equal(kwargs, {}) # no kernel_params that we didn't ask for return np.minimum(x, y).sum() for eigen_solver in ("auto", "dense", "arpack"): for kernel in ("linear", "rbf", "poly", histogram): # histogram kernel produces singular matrix inside linalg.solve # XXX use a least-squares approximation? inv = not callable(kernel) # transform fit data kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv) X_fit_transformed = kpca.fit_transform(X_fit) X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) assert_array_almost_equal(np.abs(X_fit_transformed), np.abs(X_fit_transformed2)) # non-regression test: previously, gamma would be 0 by default, # forcing all eigenvalues to 0 under the poly kernel assert_not_equal(X_fit_transformed.size, 0) # transform new data X_pred_transformed = kpca.transform(X_pred) assert_equal(X_pred_transformed.shape[1], X_fit_transformed.shape[1]) # inverse transform if inv: X_pred2 = kpca.inverse_transform(X_pred_transformed) assert_equal(X_pred2.shape, X_pred.shape) def test_kernel_pca_invalid_parameters(): assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True, kernel='precomputed') def test_kernel_pca_consistent_transform(): # X_fit_ needs to retain the old, unmodified copy of X state = np.random.RandomState(0) X = state.rand(10, 10) kpca = KernelPCA(random_state=state).fit(X) transformed1 = kpca.transform(X) X_copy = X.copy() X[:, 0] = 666 transformed2 = kpca.transform(X_copy) assert_array_almost_equal(transformed1, transformed2) def test_kernel_pca_sparse(): rng = np.random.RandomState(0) X_fit = sp.csr_matrix(rng.random_sample((5, 4))) X_pred = sp.csr_matrix(rng.random_sample((2, 4))) for eigen_solver in ("auto", "arpack"): for kernel in ("linear", "rbf", "poly"): # transform fit data kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=False) X_fit_transformed = kpca.fit_transform(X_fit) X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) assert_array_almost_equal(np.abs(X_fit_transformed), np.abs(X_fit_transformed2)) # transform new data X_pred_transformed = kpca.transform(X_pred) assert_equal(X_pred_transformed.shape[1], X_fit_transformed.shape[1]) # inverse transform # X_pred2 = kpca.inverse_transform(X_pred_transformed) # assert_equal(X_pred2.shape, X_pred.shape) def test_kernel_pca_linear_kernel(): rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) X_pred = rng.random_sample((2, 4)) # for a linear kernel, kernel PCA should find the same projection as PCA # modulo the sign (direction) # fit only the first four components: fifth is near zero eigenvalue, so # can be trimmed due to roundoff error assert_array_almost_equal( np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)), np.abs(PCA(4).fit(X_fit).transform(X_pred))) def test_kernel_pca_n_components(): rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) X_pred = rng.random_sample((2, 4)) for eigen_solver in ("dense", "arpack"): for c in [1, 2, 4]: kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver) shape = kpca.fit(X_fit).transform(X_pred).shape assert_equal(shape, (2, c)) def test_remove_zero_eig(): X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]]) # n_components=None (default) => remove_zero_eig is True kpca = KernelPCA() Xt = kpca.fit_transform(X) assert_equal(Xt.shape, (3, 0)) kpca = KernelPCA(n_components=2) Xt = kpca.fit_transform(X) assert_equal(Xt.shape, (3, 2)) kpca = KernelPCA(n_components=2, remove_zero_eig=True) Xt = kpca.fit_transform(X) assert_equal(Xt.shape, (3, 0)) def test_kernel_pca_precomputed(): rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) X_pred = rng.random_sample((2, 4)) for eigen_solver in ("dense", "arpack"): X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\ fit(X_fit).transform(X_pred) X_kpca2 = KernelPCA( 4, eigen_solver=eigen_solver, kernel='precomputed').fit( np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T)) X_kpca_train = KernelPCA( 4, eigen_solver=eigen_solver, kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T)) X_kpca_train2 = KernelPCA( 4, eigen_solver=eigen_solver, kernel='precomputed').fit( np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T)) assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2)) assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2)) def test_kernel_pca_invalid_kernel(): rng = np.random.RandomState(0) X_fit = rng.random_sample((2, 4)) kpca = KernelPCA(kernel="tototiti") assert_raises(ValueError, kpca.fit, X_fit) def test_gridsearch_pipeline(): # Test if we can do a grid-search to find parameters to separate # circles with a perceptron model. X, y = make_circles(n_samples=400, factor=.3, noise=.05, random_state=0) kpca = KernelPCA(kernel="rbf", n_components=2) pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2)) grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) grid_search.fit(X, y) assert_equal(grid_search.best_score_, 1) def test_gridsearch_pipeline_precomputed(): # Test if we can do a grid-search to find parameters to separate # circles with a perceptron model using a precomputed kernel. X, y = make_circles(n_samples=400, factor=.3, noise=.05, random_state=0) kpca = KernelPCA(kernel="precomputed", n_components=2) pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) X_kernel = rbf_kernel(X, gamma=2.) grid_search.fit(X_kernel, y) assert_equal(grid_search.best_score_, 1) def test_nested_circles(): # Test the linear separability of the first 2D KPCA transform X, y = make_circles(n_samples=400, factor=.3, noise=.05, random_state=0) # 2D nested circles are not linearly separable train_score = Perceptron(max_iter=5).fit(X, y).score(X, y) assert_less(train_score, 0.8) # Project the circles data into the first 2 components of a RBF Kernel # PCA model. # Note that the gamma value is data dependent. If this test breaks # and the gamma value has to be updated, the Kernel PCA example will # have to be updated too. kpca = KernelPCA(kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.) X_kpca = kpca.fit_transform(X) # The data is perfectly linearly separable in that space train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y) assert_equal(train_score, 1.0)
8,564
37.066667
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_fastica.py
""" Test the fastica algorithm. """ import itertools import warnings import numpy as np from scipy import stats from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_raises from sklearn.decomposition import FastICA, fastica, PCA from sklearn.decomposition.fastica_ import _gs_decorrelation from sklearn.externals.six import moves def center_and_norm(x, axis=-1): """ Centers and norms x **in place** Parameters ----------- x: ndarray Array with an axis of observations (statistical units) measured on random variables. axis: int, optional Axis along which the mean and variance are calculated. """ x = np.rollaxis(x, axis) x -= x.mean(axis=0) x /= x.std(axis=0) def test_gs(): # Test gram schmidt orthonormalization # generate a random orthogonal matrix rng = np.random.RandomState(0) W, _, _ = np.linalg.svd(rng.randn(10, 10)) w = rng.randn(10) _gs_decorrelation(w, W, 10) assert_less((w ** 2).sum(), 1.e-10) w = rng.randn(10) u = _gs_decorrelation(w, W, 5) tmp = np.dot(u, W.T) assert_less((tmp[:5] ** 2).sum(), 1.e-10) def test_fastica_simple(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) # scipy.stats uses the global RNG: np.random.seed(0) n_samples = 1000 # Generate two sources: s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 s2 = stats.t.rvs(1, size=n_samples) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing angle phi = 0.6 mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(2, 1000) center_and_norm(m) # function as fun arg def g_test(x): return x ** 3, (3 * x ** 2).mean(axis=-1) algos = ['parallel', 'deflation'] nls = ['logcosh', 'exp', 'cube', g_test] whitening = [True, False] for algo, nl, whiten in itertools.product(algos, nls, whitening): if whiten: k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo) assert_raises(ValueError, fastica, m.T, fun=np.tanh, algorithm=algo) else: X = PCA(n_components=2, whiten=True).fit_transform(m.T) k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False) assert_raises(ValueError, fastica, X, fun=np.tanh, algorithm=algo) s_ = s_.T # Check that the mixing model described in the docstring holds: if whiten: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) else: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1) # Test FastICA class _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0) ica = FastICA(fun=nl, algorithm=algo, random_state=0) sources = ica.fit_transform(m.T) assert_equal(ica.components_.shape, (2, 2)) assert_equal(sources.shape, (1000, 2)) assert_array_almost_equal(sources_fun, sources) assert_array_almost_equal(sources, ica.transform(m.T)) assert_equal(ica.mixing_.shape, (2, 2)) for fn in [np.tanh, "exp(-.5(x^2))"]: ica = FastICA(fun=fn, algorithm=algo, random_state=0) assert_raises(ValueError, ica.fit, m.T) assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T) def test_fastica_nowhiten(): m = [[0, 1], [1, 0]] # test for issue #697 ica = FastICA(n_components=1, whiten=False, random_state=0) assert_warns(UserWarning, ica.fit, m) assert_true(hasattr(ica, 'mixing_')) def test_non_square_fastica(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(6, n_samples) center_and_norm(m) k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng) s_ = s_.T # Check that the mixing model described in the docstring holds: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3) def test_fit_transform(): # Test FastICA.fit_transform rng = np.random.RandomState(0) X = rng.random_sample((100, 10)) for whiten, n_components in [[True, 5], [False, None]]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) Xt = ica.fit_transform(X) assert_equal(ica.components_.shape, (n_components_, 10)) assert_equal(Xt.shape, (100, n_components_)) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) ica.fit(X) assert_equal(ica.components_.shape, (n_components_, 10)) Xt2 = ica.transform(X) assert_array_almost_equal(Xt, Xt2) def test_inverse_transform(): # Test FastICA.inverse_transform n_features = 10 n_samples = 100 n1, n2 = 5, 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) expected = {(True, n1): (n_features, n1), (True, n2): (n_features, n2), (False, n1): (n_features, n2), (False, n2): (n_features, n2)} for whiten in [True, False]: for n_components in [n1, n2]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) with warnings.catch_warnings(record=True): # catch "n_components ignored" warning Xt = ica.fit_transform(X) expected_shape = expected[(whiten, n_components_)] assert_equal(ica.mixing_.shape, expected_shape) X2 = ica.inverse_transform(Xt) assert_equal(X.shape, X2.shape) # reversibility test in non-reduction case if n_components == X.shape[1]: assert_array_almost_equal(X, X2)
7,808
32.088983
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_dict_learning.py
import numpy as np import itertools from sklearn.exceptions import ConvergenceWarning from sklearn.utils import check_array from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import TempMemmap from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_sparse_encode_shapes_omp(): rng = np.random.RandomState(0) algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold'] for n_components, n_samples in itertools.product([1, 5], [1, 9]): X_ = rng.randn(n_samples, n_features) dictionary = rng.randn(n_components, n_features) for algorithm, n_jobs in itertools.product(algorithms, [1, 3]): code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs) assert_equal(code.shape, (n_samples, n_components)) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_equal(dico.components_.shape, (n_components, n_features)) n_components = 1 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_equal(dico.components_.shape, (n_components, n_features)) assert_equal(dico.transform(X).shape, (X.shape[0], n_components)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_lassocd_readonly_data(): n_components = 12 with TempMemmap(X) as X_read_only: dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd', transform_alpha=0.001, random_state=0, n_jobs=-1) with ignore_warnings(category=ConvergenceWarning): code = dico.fit(X_read_only).transform(X_read_only) assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[np.newaxis, 1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[np.newaxis, 1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample[np.newaxis, :]) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_input(): n_components = 100 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] Xf = check_array(X, order='F') for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): a = sparse_encode(X, V, algorithm=algo) b = sparse_encode(Xf, V, algorithm=algo) assert_array_almost_equal(a, b) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
10,084
37.056604
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_pca.py
import numpy as np import scipy as sp from itertools import product from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_less from sklearn import datasets from sklearn.decomposition import PCA from sklearn.decomposition import RandomizedPCA from sklearn.decomposition.pca import _assess_dimension_ from sklearn.decomposition.pca import _infer_dimension_ iris = datasets.load_iris() solver_list = ['full', 'arpack', 'randomized', 'auto'] def test_pca(): # PCA on dense arrays X = iris.data for n_comp in np.arange(X.shape[1]): pca = PCA(n_components=n_comp, svd_solver='full') X_r = pca.fit(X).transform(X) np.testing.assert_equal(X_r.shape[1], n_comp) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) X_r = pca.transform(X) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) # Test get_covariance and get_precision cov = pca.get_covariance() precision = pca.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) # test explained_variance_ratio_ == 1 with all components pca = PCA(svd_solver='full') pca.fit(X) assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3) def test_pca_arpack_solver(): # PCA on dense arrays X = iris.data d = X.shape[1] # Loop excluding the extremes, invalid inputs for arpack for n_comp in np.arange(1, d): pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0) X_r = pca.fit(X).transform(X) np.testing.assert_equal(X_r.shape[1], n_comp) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) X_r = pca.transform(X) assert_array_almost_equal(X_r, X_r2) # Test get_covariance and get_precision cov = pca.get_covariance() precision = pca.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(d), 12) pca = PCA(n_components=0, svd_solver='arpack', random_state=0) assert_raises(ValueError, pca.fit, X) # Check internal state assert_equal(pca.n_components, PCA(n_components=0, svd_solver='arpack', random_state=0).n_components) assert_equal(pca.svd_solver, PCA(n_components=0, svd_solver='arpack', random_state=0).svd_solver) pca = PCA(n_components=d, svd_solver='arpack', random_state=0) assert_raises(ValueError, pca.fit, X) assert_equal(pca.n_components, PCA(n_components=d, svd_solver='arpack', random_state=0).n_components) assert_equal(pca.svd_solver, PCA(n_components=0, svd_solver='arpack', random_state=0).svd_solver) def test_pca_randomized_solver(): # PCA on dense arrays X = iris.data # Loop excluding the 0, invalid for randomized for n_comp in np.arange(1, X.shape[1]): pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0) X_r = pca.fit(X).transform(X) np.testing.assert_equal(X_r.shape[1], n_comp) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) X_r = pca.transform(X) assert_array_almost_equal(X_r, X_r2) # Test get_covariance and get_precision cov = pca.get_covariance() precision = pca.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) pca = PCA(n_components=0, svd_solver='randomized', random_state=0) assert_raises(ValueError, pca.fit, X) pca = PCA(n_components=0, svd_solver='randomized', random_state=0) assert_raises(ValueError, pca.fit, X) # Check internal state assert_equal(pca.n_components, PCA(n_components=0, svd_solver='randomized', random_state=0).n_components) assert_equal(pca.svd_solver, PCA(n_components=0, svd_solver='randomized', random_state=0).svd_solver) def test_no_empty_slice_warning(): # test if we avoid numpy warnings for computing over empty arrays n_components = 10 n_features = n_components + 2 # anything > n_comps triggered it in 0.16 X = np.random.uniform(-1, 1, size=(n_components, n_features)) pca = PCA(n_components=n_components) assert_no_warnings(pca.fit, X) def test_whitening(): # Check that PCA output has unit-variance rng = np.random.RandomState(0) n_samples = 100 n_features = 80 n_components = 30 rank = 50 # some low rank data with correlated features X = np.dot(rng.randn(n_samples, rank), np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features))) # the component-wise variance of the first 50 features is 3 times the # mean component-wise variance of the remaining 30 features X[:, :50] *= 3 assert_equal(X.shape, (n_samples, n_features)) # the component-wise variance is thus highly varying: assert_greater(X.std(axis=0).std(), 43.8) for solver, copy in product(solver_list, (True, False)): # whiten the data while projecting to the lower dim subspace X_ = X.copy() # make sure we keep an original across iterations. pca = PCA(n_components=n_components, whiten=True, copy=copy, svd_solver=solver, random_state=0, iterated_power=7) # test fit_transform X_whitened = pca.fit_transform(X_.copy()) assert_equal(X_whitened.shape, (n_samples, n_components)) X_whitened2 = pca.transform(X_) assert_array_almost_equal(X_whitened, X_whitened2) assert_almost_equal(X_whitened.std(ddof=1, axis=0), np.ones(n_components), decimal=6) assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components)) X_ = X.copy() pca = PCA(n_components=n_components, whiten=False, copy=copy, svd_solver=solver).fit(X_) X_unwhitened = pca.transform(X_) assert_equal(X_unwhitened.shape, (n_samples, n_components)) # in that case the output components still have varying variances assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1) # we always center, so no test for non-centering. # Ignore warnings from switching to more power iterations in randomized_svd @ignore_warnings def test_explained_variance(): # Check that PCA output has unit-variance rng = np.random.RandomState(0) n_samples = 100 n_features = 80 X = rng.randn(n_samples, n_features) pca = PCA(n_components=2, svd_solver='full').fit(X) apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X) assert_array_almost_equal(pca.explained_variance_, apca.explained_variance_, 1) assert_array_almost_equal(pca.explained_variance_ratio_, apca.explained_variance_ratio_, 3) rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X) assert_array_almost_equal(pca.explained_variance_, rpca.explained_variance_, 1) assert_array_almost_equal(pca.explained_variance_ratio_, rpca.explained_variance_ratio_, 1) # compare to empirical variances expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0] expected_result = sorted(expected_result, reverse=True)[:2] X_pca = pca.transform(X) assert_array_almost_equal(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0)) assert_array_almost_equal(pca.explained_variance_, expected_result) X_pca = apca.transform(X) assert_array_almost_equal(apca.explained_variance_, np.var(X_pca, ddof=1, axis=0)) assert_array_almost_equal(apca.explained_variance_, expected_result) X_rpca = rpca.transform(X) assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, ddof=1, axis=0), decimal=1) assert_array_almost_equal(rpca.explained_variance_, expected_result, decimal=1) # Same with correlated data X = datasets.make_classification(n_samples, n_features, n_informative=n_features-2, random_state=rng)[0] pca = PCA(n_components=2).fit(X) rpca = PCA(n_components=2, svd_solver='randomized', random_state=rng).fit(X) assert_array_almost_equal(pca.explained_variance_ratio_, rpca.explained_variance_ratio_, 5) def test_singular_values(): # Check that the PCA output has the correct singular values rng = np.random.RandomState(0) n_samples = 100 n_features = 80 X = rng.randn(n_samples, n_features) pca = PCA(n_components=2, svd_solver='full', random_state=rng).fit(X) apca = PCA(n_components=2, svd_solver='arpack', random_state=rng).fit(X) rpca = PCA(n_components=2, svd_solver='randomized', random_state=rng).fit(X) assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12) assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1) assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1) # Compare to the Frobenius norm X_pca = pca.transform(X) X_apca = apca.transform(X) X_rpca = rpca.transform(X) assert_array_almost_equal(np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro")**2.0, 12) assert_array_almost_equal(np.sum(apca.singular_values_**2.0), np.linalg.norm(X_apca, "fro")**2.0, 9) assert_array_almost_equal(np.sum(rpca.singular_values_**2.0), np.linalg.norm(X_rpca, "fro")**2.0, 0) # Compare to the 2-norms of the score vectors assert_array_almost_equal(pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12) assert_array_almost_equal(apca.singular_values_, np.sqrt(np.sum(X_apca**2.0, axis=0)), 12) assert_array_almost_equal(rpca.singular_values_, np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2) # Set the singular values and see what we get back rng = np.random.RandomState(0) n_samples = 100 n_features = 110 X = rng.randn(n_samples, n_features) pca = PCA(n_components=3, svd_solver='full', random_state=rng) apca = PCA(n_components=3, svd_solver='arpack', random_state=rng) rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng) X_pca = pca.fit_transform(X) X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) X_pca[:, 0] *= 3.142 X_pca[:, 1] *= 2.718 X_hat = np.dot(X_pca, pca.components_) pca.fit(X_hat) apca.fit(X_hat) rpca.fit(X_hat) assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14) def test_pca_check_projection(): # Test that the projection of data is correct rng = np.random.RandomState(0) n, p = 100, 3 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) for solver in solver_list: Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt) Yt /= np.sqrt((Yt ** 2).sum()) assert_almost_equal(np.abs(Yt[0][0]), 1., 1) def test_pca_inverse(): # Test that the projection of data can be inverted rng = np.random.RandomState(0) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) pca = PCA(n_components=2, svd_solver='full').fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) # same as above with whitening (approximate reconstruction) for solver in solver_list: pca = PCA(n_components=2, whiten=True, svd_solver=solver) pca.fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) def test_pca_validation(): X = [[0, 1], [1, 0]] for solver in solver_list: for n_components in [-1, 3]: assert_raises(ValueError, PCA(n_components, svd_solver=solver).fit, X) def test_randomized_pca_check_projection(): # Test that the projection by randomized PCA on dense data is correct rng = np.random.RandomState(0) n, p = 100, 3 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) Yt = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X).transform(Xt) Yt /= np.sqrt((Yt ** 2).sum()) assert_almost_equal(np.abs(Yt[0][0]), 1., 1) def test_randomized_pca_check_list(): # Test that the projection by randomized PCA on list data is correct X = [[1.0, 0.0], [0.0, 1.0]] X_transformed = PCA(n_components=1, svd_solver='randomized', random_state=0).fit(X).transform(X) assert_equal(X_transformed.shape, (2, 1)) assert_almost_equal(X_transformed.mean(), 0.00, 2) assert_almost_equal(X_transformed.std(), 0.71, 2) def test_randomized_pca_inverse(): # Test that randomized PCA is inversible on dense data rng = np.random.RandomState(0) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed signal # (since the data is almost of rank n_components) pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=2) # same as above with whitening (approximate reconstruction) pca = PCA(n_components=2, whiten=True, svd_solver='randomized', random_state=0).fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max() assert_less(relative_max_delta, 1e-5) def test_pca_dim(): # Check automated dimensionality setting rng = np.random.RandomState(0) n, p = 100, 5 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) pca = PCA(n_components='mle', svd_solver='full').fit(X) assert_equal(pca.n_components, 'mle') assert_equal(pca.n_components_, 1) def test_infer_dim_1(): # TODO: explain what this is testing # Or at least use explicit variable names... n, p = 1000, 5 rng = np.random.RandomState(0) X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) + np.array([1, 0, 7, 4, 6])) pca = PCA(n_components=p, svd_solver='full') pca.fit(X) spect = pca.explained_variance_ ll = [] for k in range(p): ll.append(_assess_dimension_(spect, k, n, p)) ll = np.array(ll) assert_greater(ll[1], ll.max() - .01 * n) def test_infer_dim_2(): # TODO: explain what this is testing # Or at least use explicit variable names... n, p = 1000, 5 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) X[10:20] += np.array([6, 0, 7, 2, -1]) pca = PCA(n_components=p, svd_solver='full') pca.fit(X) spect = pca.explained_variance_ assert_greater(_infer_dimension_(spect, n, p), 1) def test_infer_dim_3(): n, p = 100, 5 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) X[10:20] += np.array([6, 0, 7, 2, -1]) X[30:40] += 2 * np.array([-1, 1, -1, 1, -1]) pca = PCA(n_components=p, svd_solver='full') pca.fit(X) spect = pca.explained_variance_ assert_greater(_infer_dimension_(spect, n, p), 2) def test_infer_dim_by_explained_variance(): X = iris.data pca = PCA(n_components=0.95, svd_solver='full') pca.fit(X) assert_equal(pca.n_components, 0.95) assert_equal(pca.n_components_, 2) pca = PCA(n_components=0.01, svd_solver='full') pca.fit(X) assert_equal(pca.n_components, 0.01) assert_equal(pca.n_components_, 1) rng = np.random.RandomState(0) # more features than samples X = rng.rand(5, 20) pca = PCA(n_components=.5, svd_solver='full').fit(X) assert_equal(pca.n_components, 0.5) assert_equal(pca.n_components_, 2) def test_pca_score(): # Test that probabilistic PCA scoring yields a reasonable score n, p = 1000, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) for solver in solver_list: pca = PCA(n_components=2, svd_solver=solver) pca.fit(X) ll1 = pca.score(X) h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p np.testing.assert_almost_equal(ll1 / h, 1, 0) def test_pca_score2(): # Test that probabilistic PCA correctly separated different datasets n, p = 100, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) for solver in solver_list: pca = PCA(n_components=2, svd_solver=solver) pca.fit(X) ll1 = pca.score(X) ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5])) assert_greater(ll1, ll2) # Test that it gives different scores if whiten=True pca = PCA(n_components=2, whiten=True, svd_solver=solver) pca.fit(X) ll2 = pca.score(X) assert_true(ll1 > ll2) def test_pca_score3(): # Check that probabilistic PCA selects the right model n, p = 200, 3 rng = np.random.RandomState(0) Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])) Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])) ll = np.zeros(p) for k in range(p): pca = PCA(n_components=k, svd_solver='full') pca.fit(Xl) ll[k] = pca.score(Xt) assert_true(ll.argmax() == 1) def test_pca_score_with_different_solvers(): digits = datasets.load_digits() X_digits = digits.data pca_dict = {svd_solver: PCA(n_components=30, svd_solver=svd_solver, random_state=0) for svd_solver in solver_list} for pca in pca_dict.values(): pca.fit(X_digits) # Sanity check for the noise_variance_. For more details see # https://github.com/scikit-learn/scikit-learn/issues/7568 # https://github.com/scikit-learn/scikit-learn/issues/8541 # https://github.com/scikit-learn/scikit-learn/issues/8544 assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0) # Compare scores with different svd_solvers score_dict = {svd_solver: pca.score(X_digits) for svd_solver, pca in pca_dict.items()} assert_almost_equal(score_dict['full'], score_dict['arpack']) assert_almost_equal(score_dict['full'], score_dict['randomized'], decimal=3) def test_pca_zero_noise_variance_edge_cases(): # ensure that noise_variance_ is 0 in edge cases # when n_components == min(n_samples, n_features) n, p = 100, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) # arpack raises ValueError for n_components == min(n_samples, # n_features) svd_solvers = ['full', 'randomized'] for svd_solver in svd_solvers: pca = PCA(svd_solver=svd_solver, n_components=p) pca.fit(X) assert pca.noise_variance_ == 0 pca.fit(X.T) assert pca.noise_variance_ == 0 def test_svd_solver_auto(): rng = np.random.RandomState(0) X = rng.uniform(size=(1000, 50)) # case: n_components in (0,1) => 'full' pca = PCA(n_components=.5) pca.fit(X) pca_test = PCA(n_components=.5, svd_solver='full') pca_test.fit(X) assert_array_almost_equal(pca.components_, pca_test.components_) # case: max(X.shape) <= 500 => 'full' pca = PCA(n_components=5, random_state=0) Y = X[:10, :] pca.fit(Y) pca_test = PCA(n_components=5, svd_solver='full', random_state=0) pca_test.fit(Y) assert_array_almost_equal(pca.components_, pca_test.components_) # case: n_components >= .8 * min(X.shape) => 'full' pca = PCA(n_components=50) pca.fit(X) pca_test = PCA(n_components=50, svd_solver='full') pca_test.fit(X) assert_array_almost_equal(pca.components_, pca_test.components_) # n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized' pca = PCA(n_components=10, random_state=0) pca.fit(X) pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0) pca_test.fit(X) assert_array_almost_equal(pca.components_, pca_test.components_) def test_deprecation_randomized_pca(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was " "deprecated in 0.18 and will be " "removed in 0.20. Use PCA(svd_solver='randomized') " "instead. The new implementation DOES NOT store " "whiten ``components_``. Apply transform to get them.") def fit_deprecated(X): global Y rpca = RandomizedPCA(random_state=0) Y = rpca.fit_transform(X) assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X) Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X) assert_array_almost_equal(Y, Y_pca) def test_pca_sparse_input(): X = np.random.RandomState(0).rand(5, 4) X = sp.sparse.csr_matrix(X) assert(sp.sparse.issparse(X)) for svd_solver in solver_list: pca = PCA(n_components=3, svd_solver=svd_solver) assert_raises(TypeError, pca.fit, X) def test_pca_bad_solver(): X = np.random.RandomState(0).rand(5, 4) pca = PCA(n_components=3, svd_solver='bad_argument') assert_raises(ValueError, pca.fit, X) def test_pca_dtype_preservation(): for svd_solver in solver_list: yield check_pca_float_dtype_preservation, svd_solver yield check_pca_int_dtype_upcast_to_double, svd_solver def check_pca_float_dtype_preservation(svd_solver): # Ensure that PCA does not upscale the dtype when input is float32 X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64) X_32 = X_64.astype(np.float32) pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64) pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32) assert pca_64.components_.dtype == np.float64 assert pca_32.components_.dtype == np.float32 assert pca_64.transform(X_64).dtype == np.float64 assert pca_32.transform(X_32).dtype == np.float32 assert_array_almost_equal(pca_64.components_, pca_32.components_, decimal=5) def check_pca_int_dtype_upcast_to_double(svd_solver): # Ensure that all int types will be upcast to float64 X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4)) X_i64 = X_i64.astype(np.int64) X_i32 = X_i64.astype(np.int32) pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64) pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32) assert pca_64.components_.dtype == np.float64 assert pca_32.components_.dtype == np.float64 assert pca_64.transform(X_i64).dtype == np.float64 assert pca_32.transform(X_i32).dtype == np.float64 assert_array_almost_equal(pca_64.components_, pca_32.components_, decimal=5)
24,916
35.111594
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/_function_transformer.py
import warnings from ..base import BaseEstimator, TransformerMixin from ..utils import check_array from ..externals.six import string_types def _identity(X): """The identity function. """ return X class FunctionTransformer(BaseEstimator, TransformerMixin): """Constructs a transformer from an arbitrary callable. A FunctionTransformer forwards its X (and optionally y) arguments to a user-defined function or function object and returns the result of this function. This is useful for stateless transformations such as taking the log of frequencies, doing custom scaling, etc. A FunctionTransformer will not do any checks on its function's output. Note: If a lambda is used as the function, then the resulting transformer will not be pickleable. .. versionadded:: 0.17 Read more in the :ref:`User Guide <function_transformer>`. Parameters ---------- func : callable, optional default=None The callable to use for the transformation. This will be passed the same arguments as transform, with args and kwargs forwarded. If func is None, then func will be the identity function. inverse_func : callable, optional default=None The callable to use for the inverse transformation. This will be passed the same arguments as inverse transform, with args and kwargs forwarded. If inverse_func is None, then inverse_func will be the identity function. validate : bool, optional default=True Indicate that the input X array should be checked before calling func. If validate is false, there will be no input validation. If it is true, then X will be converted to a 2-dimensional NumPy array or sparse matrix. If this conversion is not possible or X contains NaN or infinity, an exception is raised. accept_sparse : boolean, optional Indicate that func accepts a sparse matrix as input. If validate is False, this has no effect. Otherwise, if accept_sparse is false, sparse matrix inputs will cause an exception to be raised. pass_y : bool, optional default=False Indicate that transform should forward the y argument to the inner callable. .. deprecated::0.19 kw_args : dict, optional Dictionary of additional keyword arguments to pass to func. inv_kw_args : dict, optional Dictionary of additional keyword arguments to pass to inverse_func. """ def __init__(self, func=None, inverse_func=None, validate=True, accept_sparse=False, pass_y='deprecated', kw_args=None, inv_kw_args=None): self.func = func self.inverse_func = inverse_func self.validate = validate self.accept_sparse = accept_sparse self.pass_y = pass_y self.kw_args = kw_args self.inv_kw_args = inv_kw_args def fit(self, X, y=None): """Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : array-like, shape (n_samples, n_features) Input array. Returns ------- self """ if self.validate: check_array(X, self.accept_sparse) return self def transform(self, X, y='deprecated'): """Transform X using the forward function. Parameters ---------- X : array-like, shape (n_samples, n_features) Input array. y : (ignored) .. deprecated::0.19 Returns ------- X_out : array-like, shape (n_samples, n_features) Transformed input. """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) return self._transform(X, y=y, func=self.func, kw_args=self.kw_args) def inverse_transform(self, X, y='deprecated'): """Transform X using the inverse function. Parameters ---------- X : array-like, shape (n_samples, n_features) Input array. y : (ignored) .. deprecated::0.19 Returns ------- X_out : array-like, shape (n_samples, n_features) Transformed input. """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on inverse_transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) return self._transform(X, y=y, func=self.inverse_func, kw_args=self.inv_kw_args) def _transform(self, X, y=None, func=None, kw_args=None): if self.validate: X = check_array(X, self.accept_sparse) if func is None: func = _identity if (not isinstance(self.pass_y, string_types) or self.pass_y != 'deprecated'): # We do this to know if pass_y was set to False / True pass_y = self.pass_y warnings.warn("The parameter pass_y is deprecated since 0.19 and " "will be removed in 0.21", DeprecationWarning) else: pass_y = False return func(X, *((y,) if pass_y else ()), **(kw_args if kw_args else {}))
5,548
33.253086
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/imputation.py
# Authors: Nicolas Tresegnie <[email protected]> # License: BSD 3 clause import warnings import numpy as np import numpy.ma as ma from scipy import sparse from scipy import stats from ..base import BaseEstimator, TransformerMixin from ..utils import check_array from ..utils.sparsefuncs import _get_median from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'Imputer', ] def _get_mask(X, value_to_mask): """Compute the boolean mask X == missing_values.""" if value_to_mask == "NaN" or np.isnan(value_to_mask): return np.isnan(X) else: return X == value_to_mask def _most_frequent(array, extra_value, n_repeat): """Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.""" # Compute the most frequent value in array only if array.size > 0: mode = stats.mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # Ties the breaks. Copy the behaviour of scipy.stats.mode if most_frequent_value < extra_value: return most_frequent_value else: return extra_value class Imputer(BaseEstimator, TransformerMixin): """Imputation transformer for completing missing values. Read more in the :ref:`User Guide <imputation>`. Parameters ---------- missing_values : integer or "NaN", optional (default="NaN") The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For missing values encoded as np.nan, use the string value "NaN". strategy : string, optional (default="mean") The imputation strategy. - If "mean", then replace missing values using the mean along the axis. - If "median", then replace missing values using the median along the axis. - If "most_frequent", then replace missing using the most frequent value along the axis. axis : integer, optional (default=0) The axis along which to impute. - If `axis=0`, then impute along columns. - If `axis=1`, then impute along rows. verbose : integer, optional (default=0) Controls the verbosity of the imputer. copy : boolean, optional (default=True) If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. Note that, in the following cases, a new copy will always be made, even if `copy=False`: - If X is not an array of floating values; - If X is sparse and `missing_values=0`; - If `axis=0` and X is encoded as a CSR matrix; - If `axis=1` and X is encoded as a CSC matrix. Attributes ---------- statistics_ : array of shape (n_features,) The imputation fill value for each feature if axis == 0. Notes ----- - When ``axis=0``, columns which only contained missing values at `fit` are discarded upon `transform`. - When ``axis=1``, an exception is raised if there are rows for which it is not possible to fill in the missing values (e.g., because they only contain missing values). """ def __init__(self, missing_values="NaN", strategy="mean", axis=0, verbose=0, copy=True): self.missing_values = missing_values self.strategy = strategy self.axis = axis self.verbose = verbose self.copy = copy def fit(self, X, y=None): """Fit the imputer on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Returns ------- self : Imputer Returns self. """ # Check parameters allowed_strategies = ["mean", "median", "most_frequent"] if self.strategy not in allowed_strategies: raise ValueError("Can only use these strategies: {0} " " got strategy={1}".format(allowed_strategies, self.strategy)) if self.axis not in [0, 1]: raise ValueError("Can only impute missing values on axis 0 and 1, " " got axis={0}".format(self.axis)) # Since two different arrays can be provided in fit(X) and # transform(X), the imputation data will be computed in transform() # when the imputation is done per sample (i.e., when axis=1). if self.axis == 0: X = check_array(X, accept_sparse='csc', dtype=np.float64, force_all_finite=False) if sparse.issparse(X): self.statistics_ = self._sparse_fit(X, self.strategy, self.missing_values, self.axis) else: self.statistics_ = self._dense_fit(X, self.strategy, self.missing_values, self.axis) return self def _sparse_fit(self, X, strategy, missing_values, axis): """Fit the transformer on sparse data.""" # Imputation is done "by column", so if we want to do it # by row we only need to convert the matrix to csr format. if axis == 1: X = X.tocsr() else: X = X.tocsc() # Count the zeros if missing_values == 0: n_zeros_axis = np.zeros(X.shape[not axis], dtype=int) else: n_zeros_axis = X.shape[axis] - np.diff(X.indptr) # Mean if strategy == "mean": if missing_values != 0: n_non_missing = n_zeros_axis # Mask the missing elements mask_missing_values = _get_mask(X.data, missing_values) mask_valids = np.logical_not(mask_missing_values) # Sum only the valid elements new_data = X.data.copy() new_data[mask_missing_values] = 0 X = sparse.csc_matrix((new_data, X.indices, X.indptr), copy=False) sums = X.sum(axis=0) # Count the elements != 0 mask_non_zeros = sparse.csc_matrix( (mask_valids.astype(np.float64), X.indices, X.indptr), copy=False) s = mask_non_zeros.sum(axis=0) n_non_missing = np.add(n_non_missing, s) else: sums = X.sum(axis=axis) n_non_missing = np.diff(X.indptr) # Ignore the error, columns with a np.nan statistics_ # are not an error at this point. These columns will # be removed in transform with np.errstate(all="ignore"): return np.ravel(sums) / np.ravel(n_non_missing) # Median + Most frequent else: # Remove the missing values, for each column columns_all = np.hsplit(X.data, X.indptr[1:-1]) mask_missing_values = _get_mask(X.data, missing_values) mask_valids = np.hsplit(np.logical_not(mask_missing_values), X.indptr[1:-1]) # astype necessary for bug in numpy.hsplit before v1.9 columns = [col[mask.astype(bool, copy=False)] for col, mask in zip(columns_all, mask_valids)] # Median if strategy == "median": median = np.empty(len(columns)) for i, column in enumerate(columns): median[i] = _get_median(column, n_zeros_axis[i]) return median # Most frequent elif strategy == "most_frequent": most_frequent = np.empty(len(columns)) for i, column in enumerate(columns): most_frequent[i] = _most_frequent(column, 0, n_zeros_axis[i]) return most_frequent def _dense_fit(self, X, strategy, missing_values, axis): """Fit the transformer on dense data.""" X = check_array(X, force_all_finite=False) mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=mask) # Mean if strategy == "mean": mean_masked = np.ma.mean(masked_X, axis=axis) # Avoid the warning "Warning: converting a masked element to nan." mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = np.nan return mean # Median elif strategy == "median": if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5): # In old versions of numpy, calling a median on an array # containing nans returns nan. This is different is # recent versions of numpy, which we want to mimic masked_X.mask = np.logical_or(masked_X.mask, np.isnan(X)) median_masked = np.ma.median(masked_X, axis=axis) # Avoid the warning "Warning: converting a masked element to nan." median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = np.nan return median # Most frequent elif strategy == "most_frequent": # scipy.stats.mstats.mode cannot be used because it will no work # properly if the first element is masked and if its frequency # is equal to the frequency of the most frequent valid element # See https://github.com/scipy/scipy/issues/2636 # To be able access the elements by columns if axis == 0: X = X.transpose() mask = mask.transpose() most_frequent = np.empty(X.shape[0]) for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(np.bool) row = row[row_mask] most_frequent[i] = _most_frequent(row, np.nan, 0) return most_frequent def transform(self, X): """Impute all missing values in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] The input data to complete. """ if self.axis == 0: check_is_fitted(self, 'statistics_') X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError("X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0])) # Since two different arrays can be provided in fit(X) and # transform(X), the imputation data need to be recomputed # when the imputation is done per sample else: X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy) if sparse.issparse(X): statistics = self._sparse_fit(X, self.strategy, self.missing_values, self.axis) else: statistics = self._dense_fit(X, self.strategy, self.missing_values, self.axis) # Delete the invalid rows/columns invalid_mask = np.isnan(statistics) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.where(valid_mask)[0] missing = np.arange(X.shape[not self.axis])[invalid_mask] if self.axis == 0 and invalid_mask.any(): if self.verbose: warnings.warn("Deleting features without " "observed values: %s" % missing) X = X[:, valid_statistics_indexes] elif self.axis == 1 and invalid_mask.any(): raise ValueError("Some rows only contain " "missing values: %s" % missing) # Do actual imputation if sparse.issparse(X) and self.missing_values != 0: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int), np.diff(X.indptr))[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: if sparse.issparse(X): X = X.toarray() mask = _get_mask(X, self.missing_values) n_missing = np.sum(mask, axis=self.axis) values = np.repeat(valid_statistics, n_missing) if self.axis == 0: coordinates = np.where(mask.transpose())[::-1] else: coordinates = mask X[coordinates] = values return X
14,286
36.896552
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/data.py
# Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Andreas Mueller <[email protected]> # Eric Martin <[email protected]> # Giorgio Patrini <[email protected]> # License: BSD 3 clause from __future__ import division from itertools import chain, combinations import numbers import warnings from itertools import combinations_with_replacement as combinations_w_r import numpy as np from scipy import sparse from scipy import stats from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six import string_types from ..utils import check_array from ..utils.extmath import row_norms from ..utils.extmath import _incremental_mean_and_var from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import (check_is_fitted, check_random_state, FLOAT_DTYPES) BOUNDS_THRESHOLD = 1e-7 zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'QuantileTransformer', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'quantile_transform', ] def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. See also -------- StandardScaler: Performs scaling to unit variance using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* Examples -------- >>> from sklearn.preprocessing import MinMaxScaler >>> >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]] >>> scaler = MinMaxScaler() >>> print(scaler.fit(data)) MinMaxScaler(copy=True, feature_range=(0, 1)) >>> print(scaler.data_max_) [ 1. 18.] >>> print(scaler.transform(data)) [[ 0. 0. ] [ 0.25 0.25] [ 0.5 0.5 ] [ 1. 1. ]] >>> print(scaler.transform([[2, 2]])) [[ 1.5 0. ]] See also -------- minmax_scale: Equivalent function without the estimator API. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- X : array-like, shape (n_samples, n_features) The data. feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MinMaxScaler: Performs scaling to a given range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ # noqa # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. Examples -------- >>> from sklearn.preprocessing import StandardScaler >>> >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]] >>> scaler = StandardScaler() >>> print(scaler.fit(data)) StandardScaler(copy=True, with_mean=True, with_std=True) >>> print(scaler.mean_) [ 0.5 0.5] >>> print(scaler.transform(data)) [[-1. -1.] [-1. -1.] [ 1. 1.] [ 1. 1.]] >>> print(scaler.transform([[2, 2]])) [[ 3. 3.]] See also -------- scale: Equivalent function without the estimator API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ # noqa def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y='deprecated', copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool, optional (default: None) Copy the input X or not. """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. copy : bool, optional (default: None) Copy the input X or not. Returns ------- X_tr : array-like, shape [n_samples, n_features] Transformed array. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- maxabs_scale: Equivalent function without the estimator API. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- X : array-like, shape (n_samples, n_features) The data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ # noqa # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the ``axis`` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the ``transform`` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This will cause ``transform`` to raise an exception when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- robust_scale: Equivalent function without the estimator API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. https://en.wikipedia.org/wiki/Median_(statistics) https://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.quantile_range = quantile_range self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q_min, q_max = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) q = np.percentile(X, self.quantile_range, axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X): """Center and scale the data. Can be called on sparse input, provided that ``RobustScaler`` has been fitted to dense input and ``with_centering=False``. Parameters ---------- X : {array-like, sparse matrix} The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. See also -------- RobustScaler: Performs centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, quantile_range=quantile_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_output_features, n_input_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def get_feature_names(self, input_features=None): """ Return feature names for output features Parameters ---------- input_features : list of string, length n_features, optional String names for input features if available. By default, "x0", "x1", ... "xn_features" is used. Returns ------- output_feature_names : list of string, length n_output_features """ powers = self.powers_ if input_features is None: input_features = ['x%d' % i for i in range(powers.shape[1])] feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join("%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds])) else: name = "1" feature_names.append(name) return feature_names def fit(self, X, y=None): """ Compute number of output features. Parameters ---------- X : array-like, shape (n_samples, n_features) The data. Returns ------- self : instance """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : boolean, default False whether to return the computed norms Returns ------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Normalized input X. norms : array, shape [n_samples] if axis=1 else [n_features] An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See also -------- Normalizer: Performs normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in ('l1', 'l2'): raise NotImplementedError("return_norm=True is not implemented " "for sparse matrices with norm 'l1' " "or norm 'l2'") if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. See also -------- normalize: Equivalent function without the estimator API. """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. Parameters ---------- X : array-like """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y='deprecated', copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool, optional (default: None) Copy the input X or not. """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- Binarizer: Performs binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- binarize: Equivalent function without the estimator API. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. Parameters ---------- X : array-like """ check_array(X, accept_sparse='csr') return self def transform(self, X, y='deprecated', copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool Copy the input X or not. """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y='deprecated', copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ if not isinstance(y, string_types) or y != 'deprecated': warnings.warn("The parameter y on transform() is " "deprecated since 0.19 and will be removed in 0.21", DeprecationWarning) check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K @property def _pairwise(self): return True def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if isinstance(selected, six.string_types) and selected == "all": return transform(X) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Note: a one-hot encoding of y labels should use a LabelBinarizer instead. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features : "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and four samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all fashion. sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of iterables and a multilabel format, e.g. a (samples x classes) binary matrix indicating the presence of a class label. sklearn.preprocessing.LabelEncoder : encodes labels with values between 0 and n_classes-1. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True) class QuantileTransformer(BaseEstimator, TransformerMixin): """Transform features using quantiles information. This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. The transformation is applied on each feature independently. The cumulative density function of a feature is used to project the original values. Features values of new/unseen data that fall below or above the fitted range will be mapped to the bounds of the output distribution. Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- n_quantiles : int, optional (default=1000) Number of quantiles to be computed. It corresponds to the number of landmarks used to discretize the cumulative density function. output_distribution : str, optional (default='uniform') Marginal distribution for the transformed data. The choices are 'uniform' (default) or 'normal'. ignore_implicit_zeros : bool, optional (default=False) Only applies to sparse matrices. If True, the sparse entries of the matrix are discarded to compute the quantile statistics. If False, these entries are treated as zeros. subsample : int, optional (default=1e5) Maximum number of samples used to estimate the quantiles for computational efficiency. Note that the subsampling procedure may differ for value-identical sparse and dense matrices. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Note that this is used by subsampling and smoothing noise. copy : boolean, optional, (default=True) Set to False to perform inplace transformation and avoid a copy (if the input is already a numpy array). Attributes ---------- quantiles_ : ndarray, shape (n_quantiles, n_features) The values corresponding the quantiles of reference. references_ : ndarray, shape(n_quantiles, ) Quantiles of references. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import QuantileTransformer >>> rng = np.random.RandomState(0) >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) >>> qt = QuantileTransformer(n_quantiles=10, random_state=0) >>> qt.fit_transform(X) # doctest: +ELLIPSIS array([...]) See also -------- quantile_transform : Equivalent function without the estimator API. StandardScaler : perform standardization that is faster, but less robust to outliers. RobustScaler : perform robust standardization that removes the influence of outliers but does not put outliers and inliers on the same scale. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ def __init__(self, n_quantiles=1000, output_distribution='uniform', ignore_implicit_zeros=False, subsample=int(1e5), random_state=None, copy=True): self.n_quantiles = n_quantiles self.output_distribution = output_distribution self.ignore_implicit_zeros = ignore_implicit_zeros self.subsample = subsample self.random_state = random_state self.copy = copy def _dense_fit(self, X, random_state): """Compute percentiles for dense matrices. Parameters ---------- X : ndarray, shape (n_samples, n_features) The data used to scale along the features axis. """ if self.ignore_implicit_zeros: warnings.warn("'ignore_implicit_zeros' takes effect only with" " sparse matrix. This parameter has no effect.") n_samples, n_features = X.shape # for compatibility issue with numpy<=1.8.X, references # need to be a list scaled between 0 and 100 references = (self.references_ * 100).tolist() self.quantiles_ = [] for col in X.T: if self.subsample < n_samples: subsample_idx = random_state.choice(n_samples, size=self.subsample, replace=False) col = col.take(subsample_idx, mode='clip') self.quantiles_.append(np.percentile(col, references)) self.quantiles_ = np.transpose(self.quantiles_) def _sparse_fit(self, X, random_state): """Compute percentiles for sparse matrices. Parameters ---------- X : sparse matrix CSC, shape (n_samples, n_features) The data used to scale along the features axis. The sparse matrix needs to be nonnegative. """ n_samples, n_features = X.shape # for compatibility issue with numpy<=1.8.X, references # need to be a list scaled between 0 and 100 references = list(map(lambda x: x * 100, self.references_)) self.quantiles_ = [] for feature_idx in range(n_features): column_nnz_data = X.data[X.indptr[feature_idx]: X.indptr[feature_idx + 1]] if len(column_nnz_data) > self.subsample: column_subsample = (self.subsample * len(column_nnz_data) // n_samples) if self.ignore_implicit_zeros: column_data = np.zeros(shape=column_subsample, dtype=X.dtype) else: column_data = np.zeros(shape=self.subsample, dtype=X.dtype) column_data[:column_subsample] = random_state.choice( column_nnz_data, size=column_subsample, replace=False) else: if self.ignore_implicit_zeros: column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype) else: column_data = np.zeros(shape=n_samples, dtype=X.dtype) column_data[:len(column_nnz_data)] = column_nnz_data if not column_data.size: # if no nnz, an error will be raised for computing the # quantiles. Force the quantiles to be zeros. self.quantiles_.append([0] * len(references)) else: self.quantiles_.append( np.percentile(column_data, references)) self.quantiles_ = np.transpose(self.quantiles_) def fit(self, X, y=None): """Compute the quantiles used for transforming. Parameters ---------- X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- self : object Returns self """ if self.n_quantiles <= 0: raise ValueError("Invalid value for 'n_quantiles': %d. " "The number of quantiles must be at least one." % self.n_quantiles) if self.subsample <= 0: raise ValueError("Invalid value for 'subsample': %d. " "The number of subsamples must be at least one." % self.subsample) if self.n_quantiles > self.subsample: raise ValueError("The number of quantiles cannot be greater than" " the number of samples used. Got {} quantiles" " and {} samples.".format(self.n_quantiles, self.subsample)) X = self._check_inputs(X) rng = check_random_state(self.random_state) # Create the quantiles of reference self.references_ = np.linspace(0, 1, self.n_quantiles, endpoint=True) if sparse.issparse(X): self._sparse_fit(X, rng) else: self._dense_fit(X, rng) return self def _transform_col(self, X_col, quantiles, inverse): """Private function to transform a single feature""" if self.output_distribution == 'normal': output_distribution = 'norm' else: output_distribution = self.output_distribution output_distribution = getattr(stats, output_distribution) # older version of scipy do not handle tuple as fill_value # clipping the value before transform solve the issue if not inverse: lower_bound_x = quantiles[0] upper_bound_x = quantiles[-1] lower_bound_y = 0 upper_bound_y = 1 else: lower_bound_x = 0 upper_bound_x = 1 lower_bound_y = quantiles[0] upper_bound_y = quantiles[-1] # for inverse transform, match a uniform PDF X_col = output_distribution.cdf(X_col) # find index for lower and higher bounds lower_bounds_idx = (X_col - BOUNDS_THRESHOLD < lower_bound_x) upper_bounds_idx = (X_col + BOUNDS_THRESHOLD > upper_bound_x) if not inverse: # Interpolate in one direction and in the other and take the # mean. This is in case of repeated values in the features # and hence repeated quantiles # # If we don't do this, only one extreme of the duplicated is # used (the upper when we do assending, and the # lower for descending). We take the mean of these two X_col = .5 * (np.interp(X_col, quantiles, self.references_) - np.interp(-X_col, -quantiles[::-1], -self.references_[::-1])) else: X_col = np.interp(X_col, self.references_, quantiles) X_col[upper_bounds_idx] = upper_bound_y X_col[lower_bounds_idx] = lower_bound_y # for forward transform, match the output PDF if not inverse: X_col = output_distribution.ppf(X_col) # find the value to clip the data to avoid mapping to # infinity. Clip such that the inverse transform will be # consistent clip_min = output_distribution.ppf(BOUNDS_THRESHOLD - np.spacing(1)) clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1))) X_col = np.clip(X_col, clip_min, clip_max) return X_col def _check_inputs(self, X, accept_sparse_negative=False): """Check inputs before fit and transform""" X = check_array(X, accept_sparse='csc', copy=self.copy, dtype=[np.float64, np.float32]) # we only accept positive sparse matrix when ignore_implicit_zeros is # false and that we call fit or transform. if (not accept_sparse_negative and not self.ignore_implicit_zeros and (sparse.issparse(X) and np.any(X.data < 0))): raise ValueError('QuantileTransformer only accepts non-negative' ' sparse matrices.') # check the output PDF if self.output_distribution not in ('normal', 'uniform'): raise ValueError("'output_distribution' has to be either 'normal'" " or 'uniform'. Got '{}' instead.".format( self.output_distribution)) return X def _check_is_fitted(self, X): """Check the inputs before transforming""" check_is_fitted(self, 'quantiles_') # check that the dimension of X are adequate with the fitted data if X.shape[1] != self.quantiles_.shape[1]: raise ValueError('X does not have the same number of features as' ' the previously fitted data. Got {} instead of' ' {}.'.format(X.shape[1], self.quantiles_.shape[1])) def _transform(self, X, inverse=False): """Forward and inverse transform. Parameters ---------- X : ndarray, shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, optional (default=False) If False, apply forward transform. If True, apply inverse transform. Returns ------- X : ndarray, shape (n_samples, n_features) Projected data """ if sparse.issparse(X): for feature_idx in range(X.shape[1]): column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1]) X.data[column_slice] = self._transform_col( X.data[column_slice], self.quantiles_[:, feature_idx], inverse) else: for feature_idx in range(X.shape[1]): X[:, feature_idx] = self._transform_col( X[:, feature_idx], self.quantiles_[:, feature_idx], inverse) return X def transform(self, X): """Feature-wise transformation of the data. Parameters ---------- X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- Xt : ndarray or sparse matrix, shape (n_samples, n_features) The projected data. """ X = self._check_inputs(X) self._check_is_fitted(X) return self._transform(X, inverse=False) def inverse_transform(self, X): """Back-projection to the original space. Parameters ---------- X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- Xt : ndarray or sparse matrix, shape (n_samples, n_features) The projected data. """ X = self._check_inputs(X, accept_sparse_negative=True) self._check_is_fitted(X) return self._transform(X, inverse=True) def quantile_transform(X, axis=0, n_quantiles=1000, output_distribution='uniform', ignore_implicit_zeros=False, subsample=int(1e5), random_state=None, copy=False): """Transform features using quantiles information. This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. The transformation is applied on each feature independently. The cumulative density function of a feature is used to project the original values. Features values of new/unseen data that fall below or above the fitted range will be mapped to the bounds of the output distribution. Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- X : array-like, sparse matrix The data to transform. axis : int, (default=0) Axis used to compute the means and standard deviations along. If 0, transform each feature, otherwise (if 1) transform each sample. n_quantiles : int, optional (default=1000) Number of quantiles to be computed. It corresponds to the number of landmarks used to discretize the cumulative density function. output_distribution : str, optional (default='uniform') Marginal distribution for the transformed data. The choices are 'uniform' (default) or 'normal'. ignore_implicit_zeros : bool, optional (default=False) Only applies to sparse matrices. If True, the sparse entries of the matrix are discarded to compute the quantile statistics. If False, these entries are treated as zeros. subsample : int, optional (default=1e5) Maximum number of samples used to estimate the quantiles for computational efficiency. Note that the subsampling procedure may differ for value-identical sparse and dense matrices. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Note that this is used by subsampling and smoothing noise. copy : boolean, optional, (default=True) Set to False to perform inplace transformation and avoid a copy (if the input is already a numpy array). Attributes ---------- quantiles_ : ndarray, shape (n_quantiles, n_features) The values corresponding the quantiles of reference. references_ : ndarray, shape(n_quantiles, ) Quantiles of references. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import quantile_transform >>> rng = np.random.RandomState(0) >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) >>> quantile_transform(X, n_quantiles=10, random_state=0) ... # doctest: +ELLIPSIS array([...]) See also -------- QuantileTransformer : Performs quantile-based scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). scale : perform standardization that is faster, but less robust to outliers. robust_scale : perform robust standardization that removes the influence of outliers but does not put outliers and inliers on the same scale. Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. """ n = QuantileTransformer(n_quantiles=n_quantiles, output_distribution=output_distribution, subsample=subsample, ignore_implicit_zeros=ignore_implicit_zeros, random_state=random_state, copy=copy) if axis == 0: return n.fit_transform(X) elif axis == 1: return n.fit_transform(X.T).T else: raise ValueError("axis should be either equal to 0 or 1. Got" " axis={}".format(axis))
94,481
35.935887
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/label.py
# Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Andreas Mueller <[email protected]> # Joel Nothman <[email protected]> # Hamzeh Alsalhi <[email protected]> # License: BSD 3 clause from collections import defaultdict import itertools import array import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..utils.fixes import sparse_min_max from ..utils import column_or_1d from ..utils.validation import check_array from ..utils.validation import check_is_fitted from ..utils.validation import _num_samples from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'label_binarize', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', ] class LabelEncoder(BaseEstimator, TransformerMixin): """Encode labels with value between 0 and n_classes-1. Read more in the :ref:`User Guide <preprocessing_targets>`. Attributes ---------- classes_ : array of shape (n_class,) Holds the label for each class. Examples -------- `LabelEncoder` can be used to normalize labels. >>> from sklearn import preprocessing >>> le = preprocessing.LabelEncoder() >>> le.fit([1, 2, 2, 6]) LabelEncoder() >>> le.classes_ array([1, 2, 6]) >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS array([0, 0, 1, 2]...) >>> le.inverse_transform([0, 0, 1, 2]) array([1, 1, 2, 6]) It can also be used to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels. >>> le = preprocessing.LabelEncoder() >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) LabelEncoder() >>> list(le.classes_) ['amsterdam', 'paris', 'tokyo'] >>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS array([2, 2, 1]...) >>> list(le.inverse_transform([2, 2, 1])) ['tokyo', 'tokyo', 'paris'] See also -------- sklearn.preprocessing.OneHotEncoder : encode categorical integer features using a one-hot aka one-of-K scheme. """ def fit(self, y): """Fit label encoder Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. """ y = column_or_1d(y, warn=True) self.classes_ = np.unique(y) return self def fit_transform(self, y): """Fit label encoder and return encoded labels Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ y = column_or_1d(y, warn=True) self.classes_, y = np.unique(y, return_inverse=True) return y def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) classes = np.unique(y) if len(np.intersect1d(classes, self.classes_)) < len(classes): diff = np.setdiff1d(classes, self.classes_) raise ValueError("y contains new labels: %s" % str(diff)) return np.searchsorted(self.classes_, y) def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y : numpy array of shape [n_samples] """ check_is_fitted(self, 'classes_') diff = np.setdiff1d(y, np.arange(len(self.classes_))) if diff: raise ValueError("y contains new labels: %s" % str(diff)) y = np.asarray(y) return self.classes_[y] class LabelBinarizer(BaseEstimator, TransformerMixin): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. At learning time, this simply consists in learning one regressor or binary classifier per class. In doing so, one needs to convert multi-class labels to binary labels (belong or does not belong to the class). LabelBinarizer makes this process easy with the transform method. At prediction time, one assigns the class for which the corresponding model gave the greatest confidence. LabelBinarizer makes this easy with the inverse_transform method. Read more in the :ref:`User Guide <preprocessing_targets>`. Parameters ---------- neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False) True if the returned array from transform is desired to be in sparse CSR format. Attributes ---------- classes_ : array of shape [n_class] Holds the label for each class. y_type_ : str, Represents the type of the target data as evaluated by utils.multiclass.type_of_target. Possible type are 'continuous', 'continuous-multioutput', 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'. sparse_input_ : boolean, True if the input data to transform is given as a sparse matrix, False otherwise. Examples -------- >>> from sklearn import preprocessing >>> lb = preprocessing.LabelBinarizer() >>> lb.fit([1, 2, 6, 4, 2]) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([1, 2, 4, 6]) >>> lb.transform([1, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) Binary targets transform to a column vector >>> lb = preprocessing.LabelBinarizer() >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) array([[1], [0], [0], [1]]) Passing a 2D matrix for multilabel classification >>> import numpy as np >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) >>> lb.classes_ array([0, 1, 2]) >>> lb.transform([0, 1, 2, 1]) array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) See also -------- label_binarize : function to perform the transform operation of LabelBinarizer with fixed classes. sklearn.preprocessing.OneHotEncoder : encode categorical integer features using a one-hot aka one-of-K scheme. """ def __init__(self, neg_label=0, pos_label=1, sparse_output=False): if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if sparse_output and (pos_label == 0 or neg_label != 0): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) self.neg_label = neg_label self.pos_label = pos_label self.sparse_output = sparse_output def fit(self, y): """Fit label binarizer Parameters ---------- y : array of shape [n_samples,] or [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : returns an instance of self. """ self.y_type_ = type_of_target(y) if 'multioutput' in self.y_type_: raise ValueError("Multioutput target data is not supported with " "label binarization") if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) self.sparse_input_ = sp.issparse(y) self.classes_ = unique_labels(y) return self def fit_transform(self, y): """Fit label binarizer and transform multi-class labels to binary labels. The output of transform is sometimes referred to as the 1-of-K coding scheme. Parameters ---------- y : array or sparse matrix of shape [n_samples,] or \ [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. """ return self.fit(y).transform(y) def transform(self, y): """Transform multi-class labels to binary labels The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : array or sparse matrix of shape [n_samples,] or \ [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. """ check_is_fitted(self, 'classes_') y_is_multilabel = type_of_target(y).startswith('multilabel') if y_is_multilabel and not self.y_type_.startswith('multilabel'): raise ValueError("The object was not fitted with multilabel" " input.") return label_binarize(y, self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output) def inverse_transform(self, Y, threshold=None): """Transform binary labels back to multi-class labels Parameters ---------- Y : numpy array or sparse matrix with shape [n_samples, n_classes] Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float or None Threshold used in the binary and multi-label cases. Use 0 when ``Y`` contains the output of decision_function (classifier). Use 0.5 when ``Y`` contains the output of predict_proba. If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y : numpy array or CSR matrix of shape [n_samples] Target values. Notes ----- In the case when the binary labels are fractional (probabilistic), inverse_transform chooses the class with the greatest value. Typically, this allows to use the output of a linear model's decision_function method directly as the input of inverse_transform. """ check_is_fitted(self, 'classes_') if threshold is None: threshold = (self.pos_label + self.neg_label) / 2. if self.y_type_ == "multiclass": y_inv = _inverse_binarize_multiclass(Y, self.classes_) else: y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold) if self.sparse_input_: y_inv = sp.csr_matrix(y_inv) elif sp.issparse(y_inv): y_inv = y_inv.toarray() return y_inv def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False): """Binarize labels in a one-vs-all fashion Several regression and binary classification algorithms are available in the scikit. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like Sequence of integer labels or multilabel data to encode. classes : array-like of shape [n_classes] Uniquely holds the label for each class. neg_label : int (default: 0) Value with which negative labels must be encoded. pos_label : int (default: 1) Value with which positive labels must be encoded. sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]]) See also -------- LabelBinarizer : class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation """ if not isinstance(y, list): # XXX Workaround that will be removed when list of list format is # dropped y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None) else: if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if (sparse_output and (pos_label == 0 or neg_label != 0)): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) # To account for pos_label == 0 in the dense case pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if 'multioutput' in y_type: raise ValueError("Multioutput target data is not supported with label " "binarization") if y_type == 'unknown': raise ValueError("The type of target data is not known") n_samples = y.shape[0] if sp.issparse(y) else len(y) n_classes = len(classes) classes = np.asarray(classes) if y_type == "binary": if n_classes == 1: if sparse_output: return sp.csr_matrix((n_samples, 1), dtype=int) else: Y = np.zeros((len(y), 1), dtype=np.int) Y += neg_label return Y elif len(classes) >= 3: y_type = "multiclass" sorted_class = np.sort(classes) if (y_type == "multilabel-indicator" and classes.size != y.shape[1]): raise ValueError("classes {0} missmatch with the labels {1}" "found in the data".format(classes, unique_labels(y))) if y_type in ("binary", "multiclass"): y = column_or_1d(y) # pick out the known labels from y y_in_classes = np.in1d(y, classes) y_seen = y[y_in_classes] indices = np.searchsorted(sorted_class, y_seen) indptr = np.hstack((0, np.cumsum(y_in_classes))) data = np.empty_like(indices) data.fill(pos_label) Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) elif y_type == "multilabel-indicator": Y = sp.csr_matrix(y) if pos_label != 1: data = np.empty_like(Y.data) data.fill(pos_label) Y.data = data else: raise ValueError("%s target data is not supported with label " "binarization" % y_type) if not sparse_output: Y = Y.toarray() Y = Y.astype(int, copy=False) if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 else: Y.data = Y.data.astype(int, copy=False) # preserve label ordering if np.any(classes != sorted_class): indices = np.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y.getcol(-1) else: Y = Y[:, -1].reshape((-1, 1)) return Y def _inverse_binarize_multiclass(y, classes): """Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold. """ classes = np.asarray(classes) if sp.issparse(y): # Find the argmax for each row in y where y is a CSR matrix y = y.tocsr() n_samples, n_outputs = y.shape outputs = np.arange(n_outputs) row_max = sparse_min_max(y, 1)[1] row_nnz = np.diff(y.indptr) y_data_repeated_max = np.repeat(row_max, row_nnz) # picks out all indices obtaining the maximum per row y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) # For corner case where last row has a max of 0 if row_max[-1] == 0: y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) # Gets the index of the first argmax in each row from y_i_all_argmax index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) # first argmax of each row y_ind_ext = np.append(y.indices, [0]) y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] # Handle rows of all 0 y_i_argmax[np.where(row_nnz == 0)[0]] = 0 # Handles rows with max of 0 that contain negative numbers samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] for i in samples: ind = y.indices[y.indptr[i]:y.indptr[i + 1]] y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] return classes[y_i_argmax] else: return classes.take(y.argmax(axis=1), mode="clip") def _inverse_binarize_thresholding(y, output_type, classes, threshold): """Inverse label binarization transformation using thresholding.""" if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: raise ValueError("output_type='binary', but y.shape = {0}". format(y.shape)) if output_type != "binary" and y.shape[1] != len(classes): raise ValueError("The number of class is not equal to the number of " "dimension of y.") classes = np.asarray(classes) # Perform thresholding if sp.issparse(y): if threshold > 0: if y.format not in ('csr', 'csc'): y = y.tocsr() y.data = np.array(y.data > threshold, dtype=np.int) y.eliminate_zeros() else: y = np.array(y.toarray() > threshold, dtype=np.int) else: y = np.array(y > threshold, dtype=np.int) # Inverse transform data if output_type == "binary": if sp.issparse(y): y = y.toarray() if y.ndim == 2 and y.shape[1] == 2: return classes[y[:, 1]] else: if len(classes) == 1: return np.repeat(classes[0], len(y)) else: return classes[y.ravel()] elif output_type == "multilabel-indicator": return y else: raise ValueError("{0} format is not supported".format(output_type)) class MultiLabelBinarizer(BaseEstimator, TransformerMixin): """Transform between iterable of iterables and a multilabel format Although a list of sets or tuples is a very intuitive format for multilabel data, it is unwieldy to process. This transformer converts between this intuitive format and the supported multilabel format: a (samples x classes) binary matrix indicating the presence of a class label. Parameters ---------- classes : array-like of shape [n_classes] (optional) Indicates an ordering for the class labels sparse_output : boolean (default: False), Set to true if output binary array is desired in CSR sparse format Attributes ---------- classes_ : array of labels A copy of the `classes` parameter where provided, or otherwise, the sorted set of classes found when fitting. Examples -------- >>> from sklearn.preprocessing import MultiLabelBinarizer >>> mlb = MultiLabelBinarizer() >>> mlb.fit_transform([(1, 2), (3,)]) array([[1, 1, 0], [0, 0, 1]]) >>> mlb.classes_ array([1, 2, 3]) >>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])]) array([[0, 1, 1], [1, 0, 0]]) >>> list(mlb.classes_) ['comedy', 'sci-fi', 'thriller'] See also -------- sklearn.preprocessing.OneHotEncoder : encode categorical integer features using a one-hot aka one-of-K scheme. """ def __init__(self, classes=None, sparse_output=False): self.classes = classes self.sparse_output = sparse_output def fit(self, y): """Fit the label sets binarizer, storing `classes_` Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : returns this MultiLabelBinarizer instance """ if self.classes is None: classes = sorted(set(itertools.chain.from_iterable(y))) else: classes = self.classes dtype = np.int if all(isinstance(c, int) for c in classes) else object self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self def fit_transform(self, y): """Fit the label sets binarizer and transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ if self.classes is not None: return self.fit(y).transform(y) # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = np.int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) # ensure yt.indices keeps its current dtype yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype, copy=False) if not self.sparse_output: yt = yt.toarray() return yt def transform(self, y): """Transform the given label sets Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ check_is_fitted(self, 'classes_') class_to_index = dict(zip(self.classes_, range(len(self.classes_)))) yt = self._transform(y, class_to_index) if not self.sparse_output: yt = yt.toarray() return yt def _transform(self, y, class_mapping): """Transforms the label sets with a given mapping Parameters ---------- y : iterable of iterables class_mapping : Mapping Maps from label to column index in label indicator matrix Returns ------- y_indicator : sparse CSR matrix, shape (n_samples, n_classes) Label indicator matrix """ indices = array.array('i') indptr = array.array('i', [0]) for labels in y: indices.extend(set(class_mapping[label] for label in labels)) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) return sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping))) def inverse_transform(self, yt): """Transform the given indicator matrix into label sets Parameters ---------- yt : array or sparse matrix of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`. """ check_is_fitted(self, 'classes_') if yt.shape[1] != len(self.classes_): raise ValueError('Expected indicator for {0} classes, but got {1}' .format(len(self.classes_), yt.shape[1])) if sp.issparse(yt): yt = yt.tocsr() if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: raise ValueError('Expected only 0s and 1s in label indicator.') return [tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:])] else: unexpected = np.setdiff1d(yt, [0, 1]) if len(unexpected) > 0: raise ValueError('Expected only 0s and 1s in label indicator. ' 'Also got {0}'.format(unexpected)) return [tuple(self.classes_.compress(indicators)) for indicators in yt]
27,529
32.208685
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/__init__.py
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import QuantileTransformer from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import quantile_transform from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'QuantileTransformer', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', 'quantile_transform', ]
1,447
22.354839
68
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/tests/test_data.py
# Authors: # # Giorgio Patrini # # License: BSD 3 clause from __future__ import division import warnings import numpy as np import numpy.linalg as la from scipy import sparse from distutils.version import LooseVersion from sklearn.utils import gen_batches from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import clean_warning_registry from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_less from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import skip_if_32bit from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected from sklearn.preprocessing.data import _handle_zeros_in_scale from sklearn.preprocessing.data import Binarizer from sklearn.preprocessing.data import KernelCenterer from sklearn.preprocessing.data import Normalizer from sklearn.preprocessing.data import normalize from sklearn.preprocessing.data import OneHotEncoder from sklearn.preprocessing.data import StandardScaler from sklearn.preprocessing.data import scale from sklearn.preprocessing.data import MinMaxScaler from sklearn.preprocessing.data import minmax_scale from sklearn.preprocessing.data import QuantileTransformer from sklearn.preprocessing.data import quantile_transform from sklearn.preprocessing.data import MaxAbsScaler from sklearn.preprocessing.data import maxabs_scale from sklearn.preprocessing.data import RobustScaler from sklearn.preprocessing.data import robust_scale from sklearn.preprocessing.data import add_dummy_feature from sklearn.preprocessing.data import PolynomialFeatures from sklearn.exceptions import DataConversionWarning from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_predict from sklearn.svm import SVR from sklearn import datasets iris = datasets.load_iris() # Make some data to be used many times rng = np.random.RandomState(0) n_features = 30 n_samples = 1000 offsets = rng.uniform(-1, 1, size=n_features) scales = rng.uniform(1, 10, size=n_features) X_2d = rng.randn(n_samples, n_features) * scales + offsets X_1row = X_2d[0, :].reshape(1, n_features) X_1col = X_2d[:, 0].reshape(n_samples, 1) X_list_1row = X_1row.tolist() X_list_1col = X_1col.tolist() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def _check_dim_1axis(a): if isinstance(a, list): return np.array(a).shape[0] return a.shape[0] def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen): if batch_stop != n: assert_equal((i + 1) * chunk_size, n_samples_seen) else: assert_equal(i * chunk_size + (batch_stop - batch_start), n_samples_seen) def test_polynomial_features(): # Test Polynomial Features X1 = np.arange(6)[:, np.newaxis] P1 = np.hstack([np.ones_like(X1), X1, X1 ** 2, X1 ** 3]) deg1 = 3 X2 = np.arange(6).reshape((3, 2)) x1 = X2[:, :1] x2 = X2[:, 1:] P2 = np.hstack([x1 ** 0 * x2 ** 0, x1 ** 1 * x2 ** 0, x1 ** 0 * x2 ** 1, x1 ** 2 * x2 ** 0, x1 ** 1 * x2 ** 1, x1 ** 0 * x2 ** 2]) deg2 = 2 for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]: P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X) assert_array_almost_equal(P_test, P) P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X) assert_array_almost_equal(P_test, P[:, 1:]) interact = PolynomialFeatures(2, interaction_only=True, include_bias=True) X_poly = interact.fit_transform(X) assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]]) assert_equal(interact.powers_.shape, (interact.n_output_features_, interact.n_input_features_)) def test_polynomial_feature_names(): X = np.arange(30).reshape(10, 3) poly = PolynomialFeatures(degree=2, include_bias=True).fit(X) feature_names = poly.get_feature_names() assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1', 'x0 x2', 'x1^2', 'x1 x2', 'x2^2'], feature_names) poly = PolynomialFeatures(degree=3, include_bias=False).fit(X) feature_names = poly.get_feature_names(["a", "b", "c"]) assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2', 'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c', 'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c', 'b c^2', 'c^3'], feature_names) # test some unicode poly = PolynomialFeatures(degree=1, include_bias=True).fit(X) feature_names = poly.get_feature_names( [u"\u0001F40D", u"\u262E", u"\u05D0"]) assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"], feature_names) def test_standard_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_almost_equal(scaler.mean_, X.ravel()) assert_almost_equal(scaler.scale_, np.ones(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features)) else: assert_almost_equal(scaler.mean_, X.mean()) assert_almost_equal(scaler.scale_, X.std()) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), .0) assert_array_almost_equal(X_scaled.std(axis=0), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_almost_equal(scaler.mean_, 1.) assert_almost_equal(scaler.scale_, 1.) assert_array_almost_equal(X_scaled.mean(axis=0), .0) assert_array_almost_equal(X_scaled.std(axis=0), .0) assert_equal(scaler.n_samples_seen_, X.shape[0]) def test_scale_1d(): # 1-d inputs X_list = [1., 3., 5., 0.] X_arr = np.array(X_list) for X in [X_list, X_arr]: X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(), 0.0) assert_array_almost_equal(X_scaled.std(), 1.0) assert_array_equal(scale(X, with_mean=False, with_std=False), X) @skip_if_32bit def test_standard_scaler_numerical_stability(): # Test numerical stability of scaling # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) if LooseVersion(np.__version__) >= LooseVersion('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(scale(x), np.zeros(8)) else: w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64) w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.ones(10, dtype=np.float64) * 1e-100 x_small_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.ones(10, dtype=np.float64) * 1e100 w = "Dataset may contain too large values" x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) x_big_centered = assert_warns_message(UserWarning, w, scale, x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) n_features = 5 n_samples = 4 X = rng.randn(n_samples, n_features) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_equal(scaler.n_samples_seen_, n_samples) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied assert_true(X_scaled is not X) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) # Check that the data hasn't been modified assert_true(X_scaled is not X) X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is X) X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) def test_handle_zeros_in_scale(): s1 = np.array([0, 1, 2, 3]) s2 = _handle_zeros_in_scale(s1, copy=True) assert_false(s1[0] == s2[0]) assert_array_equal(s1, np.array([0, 1, 2, 3])) assert_array_equal(s2, np.array([1, 1, 2, 3])) def test_minmax_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_batch = MinMaxScaler().fit(X[batch0]) scaler_incr = MinMaxScaler().partial_fit(X[batch0]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std until the end of partial fits, and scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) def test_standard_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = StandardScaler(with_std=False).fit(X) scaler_incr = StandardScaler(with_std=False) for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_incr = StandardScaler().partial_fit(X[batch0]) if chunk_size == 1: assert_array_almost_equal(np.zeros(n_features, dtype=np.float64), scaler_incr.var_) assert_array_almost_equal(np.ones(n_features, dtype=np.float64), scaler_incr.scale_) else: assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_) assert_array_almost_equal(np.std(X[batch0], axis=0), scaler_incr.scale_) # no constants # Test std until the end of partial fits, and scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) def test_standard_scaler_partial_fit_numerical_stability(): # Test if the incremental computation introduces significative errors # for large datasets with values of large magniture rng = np.random.RandomState(0) n_features = 2 n_samples = 100 offsets = rng.uniform(-1e15, 1e15, size=n_features) scales = rng.uniform(1e3, 1e6, size=n_features) X = rng.randn(n_samples, n_features) * scales + offsets scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() for chunk in X: scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features)) # Regardless of abs values, they must not be more diff 6 significant digits tol = 10 ** (-6) assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol) assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol) # NOTE Be aware that for much larger offsets std is very unstable (last # assert) while mean is OK. # Sparse input size = (100, 3) scale = 1e20 X = rng.randint(0, 2, size).astype(np.float64) * scale X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) for X in [X_csr, X_csc]: # with_mean=False is required with sparse input scaler = StandardScaler(with_mean=False).fit(X) scaler_incr = StandardScaler(with_mean=False) for chunk in X: # chunk = sparse.csr_matrix(data_chunks) scaler_incr = scaler_incr.partial_fit(chunk) # Regardless of magnitude, they must not differ more than of 6 digits tol = 10 ** (-6) assert_true(scaler.mean_ is not None) assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) def test_partial_fit_sparse_input(): # Check that sparsity is not destroyed X = np.array([[1.], [0.], [0.], [5.]]) X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) for X in [X_csr, X_csc]: X_null = null_transform.partial_fit(X).transform(X) assert_array_equal(X_null.data, X.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_null.data) assert_array_equal(X_orig.data, X.data) def test_standard_scaler_trasform_with_partial_fit(): # Check some postconditions after applying partial_fit and transform X = X_2d[:100, :] scaler_incr = StandardScaler() for i, batch in enumerate(gen_batches(X.shape[0], 1)): X_sofar = X[:(i + 1), :] chunks_copy = X_sofar.copy() scaled_batch = StandardScaler().fit_transform(X_sofar) scaler_incr = scaler_incr.partial_fit(X[batch]) scaled_incr = scaler_incr.transform(X_sofar) assert_array_almost_equal(scaled_batch, scaled_incr) assert_array_almost_equal(X_sofar, chunks_copy) # No change right_input = scaler_incr.inverse_transform(scaled_incr) assert_array_almost_equal(X_sofar, right_input) zero = np.zeros(X.shape[1]) epsilon = np.nextafter(0, 1) assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal assert_array_less(zero, scaler_incr.scale_ + epsilon) # (i+1) because the Scaler has been already fitted assert_equal((i + 1), scaler_incr.n_samples_seen_) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-.5, .6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -.5) assert_array_almost_equal(X_trans.max(axis=0), .6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) assert_raises(ValueError, scaler.fit, X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0., 0., 0.5], [0., 0., 0.0], [0., 0., 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0., 1., 0.500], [-1., 0., 0.083], [+0., 0., 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1., 1., 1.5], [1., 1., 1.0], [1., 1., 2.0]] assert_array_almost_equal(X_trans, X_expected_1_2) # function interface X_trans = minmax_scale(X) assert_array_almost_equal(X_trans, X_expected_0_1) X_trans = minmax_scale(X, feature_range=(1, 2)) assert_array_almost_equal(X_trans, X_expected_1_2) def test_minmax_scale_axis1(): X = iris.data X_trans = minmax_scale(X, axis=1) assert_array_almost_equal(np.min(X_trans, axis=1), 0) assert_array_almost_equal(np.max(X_trans, axis=1), 1) def test_min_max_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = MinMaxScaler(copy=True) X_scaled = scaler.fit(X).transform(X) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_array_almost_equal(X_scaled.min(axis=0), np.zeros(n_features)) assert_array_almost_equal(X_scaled.max(axis=0), np.zeros(n_features)) else: assert_array_almost_equal(X_scaled.min(axis=0), .0) assert_array_almost_equal(X_scaled.max(axis=0), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_greater_equal(X_scaled.min(), 0.) assert_less_equal(X_scaled.max(), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # Function interface X_1d = X_1row.ravel() min_ = X_1d.min() max_ = X_1d.max() assert_array_almost_equal((X_1d - min_) / (max_ - min_), minmax_scale(X_1d, copy=True)) def test_scaler_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) assert_raises(ValueError, StandardScaler().fit, X_csr) assert_raises(ValueError, StandardScaler().fit, X_csc) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csc.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_array_almost_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.var_, scaler_csr.var_) assert_array_almost_equal(scaler.scale_, scaler_csr.scale_) assert_array_almost_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.var_, scaler_csc.var_) assert_array_almost_equal(scaler.scale_, scaler_csc.scale_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_int(): # test that scaler converts integer input to floating # for both sparse and dense matrices rng = np.random.RandomState(42) X = rng.randint(20, size=(4, 5)) X[:, 0] = 0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) clean_warning_registry() with warnings.catch_warnings(record=True): X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) clean_warning_registry() with warnings.catch_warnings(record=True): scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csc.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_array_almost_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.var_, scaler_csr.var_) assert_array_almost_equal(scaler.scale_, scaler_csr.scale_) assert_array_almost_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.var_, scaler_csc.var_) assert_array_almost_equal(scaler.scale_, scaler_csc.scale_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., 1.109, 1.856, 21., 1.559], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis( X_csr_scaled.astype(np.float), 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_without_copy(): # Check that StandardScaler.fit does not change input rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_copy = X.copy() StandardScaler(copy=False).fit(X) assert_array_equal(X, X_copy) X_csr_copy = X_csr.copy() StandardScaler(with_mean=False, copy=False).fit(X_csr) assert_array_equal(X_csr.toarray(), X_csr_copy.toarray()) X_csc_copy = X_csc.copy() StandardScaler(with_mean=False, copy=False).fit(X_csc) assert_array_equal(X_csc.toarray(), X_csc_copy.toarray()) def test_scale_sparse_with_mean_raise_exception(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) # check scaling and fit with direct calls on sparse data assert_raises(ValueError, scale, X_csr, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr) assert_raises(ValueError, scale, X_csc, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc) # check transform and inverse_transform after a fit on a dense array scaler = StandardScaler(with_mean=True).fit(X) assert_raises(ValueError, scaler.transform, X_csr) assert_raises(ValueError, scaler.transform, X_csc) X_transformed_csr = sparse.csr_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr) X_transformed_csc = sparse.csc_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc) def test_scale_input_finiteness_validation(): # Check if non finite inputs raise ValueError X = [[np.nan, 5, 6, 7, 8]] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) X = [[np.inf, 5, 6, 7, 8]] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) def test_robust_scaler_2d_arrays(): # Test robust scaling of 2d array along first axis rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = RobustScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) def test_robust_scaler_transform_one_row_csr(): # Check RobustScaler on transforming csr matrix with one row rng = np.random.RandomState(0) X = rng.randn(4, 5) single_row = np.array([[0.1, 1., 2., 0., -1.]]) scaler = RobustScaler(with_centering=False) scaler = scaler.fit(X) row_trans = scaler.transform(sparse.csr_matrix(single_row)) row_expected = single_row / scaler.scale_ assert_array_almost_equal(row_trans.toarray(), row_expected) row_scaled_back = scaler.inverse_transform(row_trans) assert_array_almost_equal(single_row, row_scaled_back.toarray()) def test_robust_scaler_iris(): X = iris.data scaler = RobustScaler() X_trans = scaler.fit_transform(X) assert_array_almost_equal(np.median(X_trans, axis=0), 0) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) q = np.percentile(X_trans, q=(25, 75), axis=0) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scaler_iris_quantiles(): X = iris.data scaler = RobustScaler(quantile_range=(10, 90)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(np.median(X_trans, axis=0), 0) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) q = np.percentile(X_trans, q=(10, 90), axis=0) q_range = q[1] - q[0] assert_array_almost_equal(q_range, 1) def test_quantile_transform_iris(): X = iris.data # uniform output distribution transformer = QuantileTransformer(n_quantiles=30) X_trans = transformer.fit_transform(X) X_trans_inv = transformer.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # normal output distribution transformer = QuantileTransformer(n_quantiles=30, output_distribution='normal') X_trans = transformer.fit_transform(X) X_trans_inv = transformer.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure it is possible to take the inverse of a sparse matrix # which contain negative value; this is the case in the iris dataset X_sparse = sparse.csc_matrix(X) X_sparse_tran = transformer.fit_transform(X_sparse) X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran) assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A) def test_quantile_transform_check_error(): X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [2, 4, 0, 0, 6, 8, 0, 10, 0, 0], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]]) X = sparse.csc_matrix(X) X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [-2, 4, 0, 0, 6, 8, 0, 10, 0, 0], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]]) X_neg = sparse.csc_matrix(X_neg) assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.", QuantileTransformer(n_quantiles=0).fit, X) assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.", QuantileTransformer(subsample=0).fit, X) assert_raises_regex(ValueError, "The number of quantiles cannot be" " greater than the number of samples used. Got" " 1000 quantiles and 10 samples.", QuantileTransformer(subsample=10).fit, X) transformer = QuantileTransformer(n_quantiles=10) assert_raises_regex(ValueError, "QuantileTransformer only accepts " "non-negative sparse matrices.", transformer.fit, X_neg) transformer.fit(X) assert_raises_regex(ValueError, "QuantileTransformer only accepts " "non-negative sparse matrices.", transformer.transform, X_neg) X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]]) assert_raises_regex(ValueError, "X does not have the same number of " "features as the previously fitted data. Got 2" " instead of 3.", transformer.transform, X_bad_feat) assert_raises_regex(ValueError, "X does not have the same number of " "features as the previously fitted data. Got 2" " instead of 3.", transformer.inverse_transform, X_bad_feat) transformer = QuantileTransformer(n_quantiles=10, output_distribution='rnd') # check that an error is raised at fit time assert_raises_regex(ValueError, "'output_distribution' has to be either" " 'normal' or 'uniform'. Got 'rnd' instead.", transformer.fit, X) # check that an error is raised at transform time transformer.output_distribution = 'uniform' transformer.fit(X) X_tran = transformer.transform(X) transformer.output_distribution = 'rnd' assert_raises_regex(ValueError, "'output_distribution' has to be either" " 'normal' or 'uniform'. Got 'rnd' instead.", transformer.transform, X) # check that an error is raised at inverse_transform time assert_raises_regex(ValueError, "'output_distribution' has to be either" " 'normal' or 'uniform'. Got 'rnd' instead.", transformer.inverse_transform, X_tran) def test_quantile_transform_sparse_ignore_zeros(): X = np.array([[0, 1], [0, 0], [0, 2], [0, 2], [0, 1]]) X_sparse = sparse.csc_matrix(X) transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) # dense case -> warning raise assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect" " only with sparse matrix. This parameter has no" " effect.", transformer.fit, X) X_expected = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [0, 0]]) X_trans = transformer.fit_transform(X_sparse) assert_almost_equal(X_expected, X_trans.A) # consider the case where sparse entries are missing values and user-given # zeros are to be considered X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0]) X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]) X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8]) X_sparse = sparse.csc_matrix((X_data, (X_row, X_col))) X_trans = transformer.fit_transform(X_sparse) X_expected = np.array([[0., 0.5], [0., 0.], [0., 1.], [0., 1.], [0., 0.5], [0., 0.], [0., 0.5], [0., 1.], [0., 0.]]) assert_almost_equal(X_expected, X_trans.A) transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1]) X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1]) X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6]) X_sparse = sparse.csc_matrix((X_data, (X_row, X_col))) X_trans = transformer.fit_transform(X_sparse) X_expected = np.array([[0, 1], [0, 0.375], [0, 0.375], [0, 0.375], [0, 1], [0, 0], [0, 1]]) assert_almost_equal(X_expected, X_trans.A) assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A) # check in conjunction with subsampling transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5, subsample=8, random_state=0) X_trans = transformer.fit_transform(X_sparse) assert_almost_equal(X_expected, X_trans.A) assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A) def test_quantile_transform_dense_toy(): X = np.array([[0, 2, 2.6], [25, 4, 4.1], [50, 6, 2.3], [75, 8, 9.5], [100, 10, 0.1]]) transformer = QuantileTransformer(n_quantiles=5) transformer.fit(X) # using the a uniform output, each entry of X should be map between 0 and 1 # and equally spaced X_trans = transformer.fit_transform(X) X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T assert_almost_equal(np.sort(X_trans, axis=0), X_expected) X_test = np.array([ [-1, 1, 0], [101, 11, 10], ]) X_expected = np.array([ [0, 0, 0], [1, 1, 1], ]) assert_array_almost_equal(transformer.transform(X_test), X_expected) X_trans_inv = transformer.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) def test_quantile_transform_subsampling(): # Test that subsampling the input yield to a consistent results We check # that the computed quantiles are almost mapped to a [0, 1] vector where # values are equally spaced. The infinite norm is checked to be smaller # than a given threshold. This is repeated 5 times. # dense support n_samples = 1000000 n_quantiles = 1000 X = np.sort(np.random.sample((n_samples, 1)), axis=0) ROUND = 5 inf_norm_arr = [] for random_state in range(ROUND): transformer = QuantileTransformer(random_state=random_state, n_quantiles=n_quantiles, subsample=n_samples // 10) transformer.fit(X) diff = (np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)) inf_norm = np.max(np.abs(diff)) assert_true(inf_norm < 1e-2) inf_norm_arr.append(inf_norm) # each random subsampling yield a unique approximation to the expected # linspace CDF assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr)) # sparse support # TODO: rng should be seeded once we drop support for older versions of # scipy (< 0.13) that don't support seeding. X = sparse.rand(n_samples, 1, density=.99, format='csc') inf_norm_arr = [] for random_state in range(ROUND): transformer = QuantileTransformer(random_state=random_state, n_quantiles=n_quantiles, subsample=n_samples // 10) transformer.fit(X) diff = (np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)) inf_norm = np.max(np.abs(diff)) assert_true(inf_norm < 1e-1) inf_norm_arr.append(inf_norm) # each random subsampling yield a unique approximation to the expected # linspace CDF assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr)) def test_quantile_transform_sparse_toy(): X = np.array([[0., 2., 0.], [25., 4., 0.], [50., 0., 2.6], [0., 0., 4.1], [0., 6., 0.], [0., 8., 0.], [75., 0., 2.3], [0., 10., 0.], [0., 0., 9.5], [100., 0., 0.1]]) X = sparse.csc_matrix(X) transformer = QuantileTransformer(n_quantiles=10) transformer.fit(X) X_trans = transformer.fit_transform(X) assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.) assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.) X_trans_inv = transformer.inverse_transform(X_trans) assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) transformer_dense = QuantileTransformer(n_quantiles=10).fit( X.toarray()) X_trans = transformer_dense.transform(X) assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.) assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.) X_trans_inv = transformer_dense.inverse_transform(X_trans) assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) def test_quantile_transform_axis1(): X = np.array([[0, 25, 50, 75, 100], [2, 4, 6, 8, 10], [2.6, 4.1, 2.3, 9.5, 0.1]]) X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5) X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5) assert_array_almost_equal(X_trans_a0, X_trans_a1.T) def test_quantile_transform_bounds(): # Lower and upper bounds are manually mapped. We checked that in the case # of a constant feature and binary feature, the bounds are properly mapped. X_dense = np.array([[0, 0], [0, 0], [1, 0]]) X_sparse = sparse.csc_matrix(X_dense) # check sparse and dense are consistent X_trans = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(X_dense) assert_array_almost_equal(X_trans, X_dense) X_trans_sp = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(X_sparse) assert_array_almost_equal(X_trans_sp.A, X_dense) assert_array_almost_equal(X_trans, X_trans_sp.A) # check the consistency of the bounds by learning on 1 matrix # and transforming another X = np.array([[0, 1], [0, 0.5], [1, 0]]) X1 = np.array([[0, 0.1], [0, 0.5], [1, 0.1]]) transformer = QuantileTransformer(n_quantiles=3).fit(X) X_trans = transformer.transform(X1) assert_array_almost_equal(X_trans, X1) # check that values outside of the range learned will be mapped properly. X = np.random.random((1000, 1)) transformer = QuantileTransformer() transformer.fit(X) assert_equal(transformer.transform(-10), transformer.transform(np.min(X))) assert_equal(transformer.transform(10), transformer.transform(np.max(X))) assert_equal(transformer.inverse_transform(-10), transformer.inverse_transform( np.min(transformer.references_))) assert_equal(transformer.inverse_transform(10), transformer.inverse_transform( np.max(transformer.references_))) def test_quantile_transform_and_inverse(): # iris dataset X = iris.data transformer = QuantileTransformer(n_quantiles=1000, random_state=0) X_trans = transformer.fit_transform(X) X_trans_inv = transformer.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) def test_robust_scaler_invalid_range(): for range_ in [ (-1, 90), (-2, -3), (10, 101), (100.5, 101), (90, 50), ]: scaler = RobustScaler(quantile_range=range_) assert_raises_regex(ValueError, 'Invalid quantile range: \(', scaler.fit, iris.data) def test_scale_function_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_scaled = scale(X, with_mean=False) assert_false(np.any(np.isnan(X_scaled))) X_csr_scaled = scale(X_csr, with_mean=False) assert_false(np.any(np.isnan(X_csr_scaled.data))) # test csc has same outcome X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) # raises value error on axis != 0 assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1) assert_array_almost_equal(X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # null scale X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True) assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray()) def test_robust_scale_axis1(): X = iris.data X_trans = robust_scale(X, axis=1) assert_array_almost_equal(np.median(X_trans, axis=1), 0) q = np.percentile(X_trans, q=(25, 75), axis=1) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scaler_zero_variance_features(): # Check RobustScaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] scaler = RobustScaler() X_trans = scaler.fit_transform(X) # NOTE: for such a small sample size, what we expect in the third column # depends HEAVILY on the method used to calculate quantiles. The values # here were calculated to fit the quantiles produces by np.percentile # using numpy 1.9 Calculating quantiles with # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles # would yield very different results! X_expected = [[0., 0., +0.0], [0., 0., -1.0], [0., 0., +1.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 1., +0.], [-1., 0., -0.83333], [+0., 0., +1.66667]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) def test_maxabs_scaler_zero_variance_features(): # Check MaxAbsScaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.3], [0., 1., +1.5], [0., 0., +0.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 2.0, 1.0 / 3.0], [-1., 1.0, 0.0], [+0., 1.0, 1.0]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) # function interface X_trans = maxabs_scale(X) assert_array_almost_equal(X_trans, X_expected) # sparse data X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_trans_csr = scaler.fit_transform(X_csr) X_trans_csc = scaler.fit_transform(X_csc) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans_csr.A, X_expected) assert_array_almost_equal(X_trans_csc.A, X_expected) X_trans_csr_inv = scaler.inverse_transform(X_trans_csr) X_trans_csc_inv = scaler.inverse_transform(X_trans_csc) assert_array_almost_equal(X, X_trans_csr_inv.A) assert_array_almost_equal(X, X_trans_csc_inv.A) def test_maxabs_scaler_large_negative_value(): # Check MaxAbsScaler on toy data with a large negative value X = [[0., 1., +0.5, -1.0], [0., 1., -0.3, -0.5], [0., 1., -100.0, 0.0], [0., 0., +0.0, -2.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 0.005, -0.5], [0., 1., -0.003, -0.25], [0., 1., -1.0, 0.0], [0., 0., 0.0, -1.0]] assert_array_almost_equal(X_trans, X_expected) def test_maxabs_scaler_transform_one_row_csr(): # Check MaxAbsScaler on transforming csr matrix with one row X = sparse.csr_matrix([[0.5, 1., 1.]]) scaler = MaxAbsScaler() scaler = scaler.fit(X) X_trans = scaler.transform(X) X_expected = sparse.csr_matrix([[1., 1., 1.]]) assert_array_almost_equal(X_trans.toarray(), X_expected.toarray()) X_scaled_back = scaler.inverse_transform(X_trans) assert_array_almost_equal(X.toarray(), X_scaled_back.toarray()) def test_warning_scaling_integers(): # Check warning when scaling integer data X = np.array([[1, 2, 0], [0, 0, 0]], dtype=np.uint8) w = "Data with input dtype uint8 was converted to float64" clean_warning_registry() assert_warns_message(DataConversionWarning, w, scale, X) assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X) assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X) def test_maxabs_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = MaxAbsScaler(copy=True) X_scaled = scaler.fit(X).transform(X) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), np.ones(n_features)) else: assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = MaxAbsScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # function interface X_1d = X_1row.ravel() max_abs = np.abs(X_1d).max() assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True)) def test_maxabs_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d[:100, :] n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = MaxAbsScaler().fit(X) scaler_incr = MaxAbsScaler() scaler_incr_csr = MaxAbsScaler() scaler_incr_csc = MaxAbsScaler() for batch in gen_batches(n, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) X_csr = sparse.csr_matrix(X[batch]) scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr) X_csc = sparse.csc_matrix(X[batch]) scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csr.max_abs_) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csc.max_abs_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr_csr.n_samples_seen_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr_csc.n_samples_seen_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_) assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_batch = MaxAbsScaler().fit(X[batch0]) scaler_incr = MaxAbsScaler().partial_fit(X[batch0]) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) # Test std until the end of partial fits, and scaler_batch = MaxAbsScaler().fit(X) scaler_incr = MaxAbsScaler() # Clean estimator for i, batch in enumerate(gen_batches(n, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert_true(X_norm is not X) X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert_true(X_norm is X) X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_l2(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_max(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='max', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='max', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): row_maxs = X_norm.max(axis=1) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(row_maxs[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalize(): # Test normalize function # Only tests functionality not used by the tests for Normalizer. X = np.random.RandomState(37).randn(3, 2) assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) assert_raises(ValueError, normalize, [[0]], axis=2) assert_raises(ValueError, normalize, [[0]], norm='l3') rs = np.random.RandomState(0) X_dense = rs.randn(10, 5) X_sparse = sparse.csr_matrix(X_dense) ones = np.ones((10)) for X in (X_dense, X_sparse): for dtype in (np.float32, np.float64): for norm in ('l1', 'l2'): X = X.astype(dtype) X_norm = normalize(X, norm=norm) assert_equal(X_norm.dtype, dtype) X_norm = toarray(X_norm) if norm == 'l1': row_sums = np.abs(X_norm).sum(axis=1) else: X_norm_squared = X_norm**2 row_sums = X_norm_squared.sum(axis=1) assert_array_almost_equal(row_sums, ones) # Test return_norm X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]]) for norm in ('l1', 'l2', 'max'): _, norms = normalize(X_dense, norm=norm, return_norm=True) if norm == 'l1': assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0])) elif norm == 'l2': assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127])) else: assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) X_sparse = sparse.csr_matrix(X_dense) for norm in ('l1', 'l2'): assert_raises(NotImplementedError, normalize, X_sparse, norm=norm, return_norm=True) _, norms = normalize(X_sparse, norm='max', return_norm=True) assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) def test_binarizer(): X_ = np.array([[1, 0, 5], [2, 3, -1]]) for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix): X = init(X_.copy()) binarizer = Binarizer(threshold=2.0, copy=True) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 4) assert_equal(np.sum(X_bin == 1), 2) X_bin = binarizer.transform(X) assert_equal(sparse.issparse(X), sparse.issparse(X_bin)) binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) assert_true(X_bin is not X) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) assert_true(X_bin is not X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: assert_true(X_bin is X) binarizer = Binarizer(copy=False) X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64) X_bin = binarizer.transform(X_float) if init is not list: assert_true(X_bin is X_float) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(threshold=-0.5, copy=True) for init in (np.array, list): X = init(X_.copy()) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 1) assert_equal(np.sum(X_bin == 1), 5) X_bin = binarizer.transform(X) # Cannot use threshold < 0 for sparse assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X)) def test_center_kernel(): # Test that KernelCenterer is equivalent to StandardScaler # in feature space rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) scaler = StandardScaler(with_std=False) scaler.fit(X_fit) X_fit_centered = scaler.transform(X_fit) K_fit = np.dot(X_fit, X_fit.T) # center fit time matrix centerer = KernelCenterer() K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) K_fit_centered2 = centerer.fit_transform(K_fit) assert_array_almost_equal(K_fit_centered, K_fit_centered2) # center predict time matrix X_pred = rng.random_sample((2, 4)) K_pred = np.dot(X_pred, X_fit.T) X_pred_centered = scaler.transform(X_pred) K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) K_pred_centered2 = centerer.transform(K_pred) assert_array_almost_equal(K_pred_centered, K_pred_centered2) def test_cv_pipeline_precomputed(): # Cross-validate a regression on four coplanar points with the same # value. Use precomputed kernel to ensure Pipeline with KernelCenterer # is treated as a _pairwise operation. X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]]) y_true = np.ones((4,)) K = X.dot(X.T) kcent = KernelCenterer() pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())]) # did the pipeline set the _pairwise attribute? assert_true(pipeline._pairwise) # test cross-validation, score should be almost perfect # NB: this test is pretty vacuous -- it's mainly to test integration # of Pipeline and KernelCenterer y_pred = cross_val_predict(pipeline, K, y_true, cv=2) assert_array_almost_equal(y_true, y_pred) def test_fit_transform(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for obj in ((StandardScaler(), Normalizer(), Binarizer())): X_transformed = obj.fit(X).transform(X) X_transformed2 = obj.fit_transform(X) assert_array_equal(X_transformed, X_transformed2) def test_add_dummy_feature(): X = [[1, 0], [0, 1], [0, 1]] X = add_dummy_feature(X) assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_coo(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csc(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csr(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_one_hot_encoder_sparse(): # Test OneHotEncoder's fit and transform. X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder() # discover max values automatically X_trans = enc.fit_transform(X).toarray() assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, [[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]]) # max value given as 3 enc = OneHotEncoder(n_values=4) X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 4 * 3)) assert_array_equal(enc.feature_indices_, [0, 4, 8, 12]) # max value given per feature enc = OneHotEncoder(n_values=[3, 2, 2]) X = [[1, 0, 1], [0, 1, 1]] X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 3 + 2 + 2)) assert_array_equal(enc.n_values_, [3, 2, 2]) # check that testing with larger feature works: X = np.array([[2, 0, 1], [0, 1, 1]]) enc.transform(X) # test that an error is raised when out of bounds: X_too_large = [[0, 2, 1], [0, 1, 1]] assert_raises(ValueError, enc.transform, X_too_large) error_msg = "unknown categorical feature present \[2\] during transform." assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large) assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X) # test that error is raised when wrong number of features assert_raises(ValueError, enc.transform, X[:, :-1]) # test that error is raised when wrong number of features in fit # with prespecified n_values assert_raises(ValueError, enc.fit, X[:, :-1]) # test exception on wrong init param assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X) enc = OneHotEncoder() # test negative input to fit assert_raises(ValueError, enc.fit, [[0], [-1]]) # test negative input to transform enc.fit([[0], [1]]) assert_raises(ValueError, enc.transform, [[0], [-1]]) def test_one_hot_encoder_dense(): # check for sparse=False X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder(sparse=False) # discover max values automatically X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, np.array([[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]])) def _check_transform_selected(X, X_expected, sel): for M in (X, sparse.csr_matrix(X)): Xtr = _transform_selected(M, Binarizer().transform, sel) assert_array_equal(toarray(Xtr), X_expected) def test_transform_selected(): X = [[3, 2, 1], [0, 1, 1]] X_expected = [[1, 2, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0]) _check_transform_selected(X, X_expected, [True, False, False]) X_expected = [[1, 1, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0, 1, 2]) _check_transform_selected(X, X_expected, [True, True, True]) _check_transform_selected(X, X_expected, "all") _check_transform_selected(X, X, []) _check_transform_selected(X, X, [False, False, False]) def test_transform_selected_copy_arg(): # transformer that alters X def _mutating_transformer(X): X[0, 0] = X[0, 0] + 1 return X original_X = np.asarray([[1, 2], [3, 4]]) expected_Xtr = [[2, 2], [3, 4]] X = original_X.copy() Xtr = _transform_selected(X, _mutating_transformer, copy=True, selected='all') assert_array_equal(toarray(X), toarray(original_X)) assert_array_equal(toarray(Xtr), expected_Xtr) def _run_one_hot(X, X2, cat): enc = OneHotEncoder(categorical_features=cat) Xtr = enc.fit_transform(X) X2tr = enc.transform(X2) return Xtr, X2tr def _check_one_hot(X, X2, cat, n_features): ind = np.where(cat)[0] # With mask A, B = _run_one_hot(X, X2, cat) # With indices C, D = _run_one_hot(X, X2, ind) # Check shape assert_equal(A.shape, (2, n_features)) assert_equal(B.shape, (1, n_features)) assert_equal(C.shape, (2, n_features)) assert_equal(D.shape, (1, n_features)) # Check that mask and indices give the same results assert_array_equal(toarray(A), toarray(C)) assert_array_equal(toarray(B), toarray(D)) def test_one_hot_encoder_categorical_features(): X = np.array([[3, 2, 1], [0, 1, 1]]) X2 = np.array([[1, 1, 1]]) cat = [True, False, False] _check_one_hot(X, X2, cat, 4) # Edge case: all non-categorical cat = [False, False, False] _check_one_hot(X, X2, cat, 3) # Edge case: all categorical cat = [True, True, True] _check_one_hot(X, X2, cat, 5) def test_one_hot_encoder_unknown_transform(): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) y = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown='error') oh.fit(X) assert_raises(ValueError, oh.transform, y) # Test the ignore option, ignores unknown features. oh = OneHotEncoder(handle_unknown='ignore') oh.fit(X) assert_array_equal( oh.transform(y).toarray(), np.array([[0., 0., 0., 0., 1., 0., 0.]])) # Raise error if handle_unknown is neither ignore or error. oh = OneHotEncoder(handle_unknown='42') oh.fit(X) assert_raises(ValueError, oh.transform, y) def test_fit_cold_start(): X = iris.data X_2d = X[:, :2] # Scalers that have a partial_fit method scalers = [StandardScaler(with_mean=False, with_std=False), MinMaxScaler(), MaxAbsScaler()] for scaler in scalers: scaler.fit_transform(X) # with a different shape, this may break the scaler unless the internal # state is reset scaler.fit_transform(X_2d) def test_quantile_transform_valid_axis(): X = np.array([[0, 25, 50, 75, 100], [2, 4, 6, 8, 10], [2.6, 4.1, 2.3, 9.5, 0.1]]) assert_raises_regex(ValueError, "axis should be either equal to 0 or 1" ". Got axis=2", quantile_transform, X.T, axis=2)
75,601
37.221436
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/tests/test_function_transformer.py
import numpy as np from sklearn.preprocessing import FunctionTransformer from sklearn.utils.testing import assert_equal, assert_array_equal from sklearn.utils.testing import assert_warns_message def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): def _func(X, *args, **kwargs): args_store.append(X) args_store.extend(args) kwargs_store.update(kwargs) return func(X) return _func def test_delegate_to_func(): # (args|kwargs)_store will hold the positional and keyword arguments # passed to the function inside the FunctionTransformer. args_store = [] kwargs_store = {} X = np.arange(10).reshape((5, 2)) assert_array_equal( FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), X, 'transform should have returned X unchanged', ) # The function should only have received X. assert_equal( args_store, [X], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) # reset the argument stores. args_store[:] = [] # python2 compatible inplace list clear. kwargs_store.clear() y = object() transformed = assert_warns_message( DeprecationWarning, "pass_y is deprecated", FunctionTransformer( _make_func(args_store, kwargs_store), pass_y=True).transform, X, y) assert_array_equal(transformed, X, err_msg='transform should have returned X unchanged') # The function should have received X and y. assert_equal( args_store, [X, y], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) def test_np_log(): X = np.arange(10).reshape((5, 2)) # Test that the numpy.log example still works. assert_array_equal( FunctionTransformer(np.log1p).transform(X), np.log1p(X), ) def test_kw_arg(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=3)) def test_kw_arg_update(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) F.kw_args['decimals'] = 1 # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=1)) def test_kw_arg_reset(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) F.kw_args = dict(decimals=1) # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=1)) def test_inverse_transform(): X = np.array([1, 4, 9, 16]).reshape((2, 2)) # Test that inverse_transform works correctly F = FunctionTransformer( func=np.sqrt, inverse_func=np.around, inv_kw_args=dict(decimals=3), ) assert_array_equal( F.inverse_transform(F.transform(X)), np.around(np.sqrt(X), decimals=3), )
3,503
26.162791
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/tests/test_imputation.py
import numpy as np from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false from sklearn.preprocessing.imputation import Imputer from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test: - along the two axes - with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "axis = {0}, sparse = {1}" % (strategy, missing_values) # Normal matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) X_trans = imputer.fit(X).transform(X.copy()) assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, False)) assert_array_equal(X_trans, X_true, err_msg.format(0, False)) # Normal matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(X.transpose()) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, X.copy().transpose()) else: X_trans = imputer.transform(X.copy().transpose()) assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, False)) # Sparse matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, True)) assert_array_equal(X_trans, X_true, err_msg.format(0, True)) # Sparse matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(sparse.csc_matrix(X.transpose())) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, sparse.csc_matrix(X.copy().transpose())) else: X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, True)) def test_imputation_shape(): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan for strategy in ['mean', 'median', 'most_frequent']: imputer = Imputer(strategy=strategy) X_imputed = imputer.fit_transform(X) assert_equal(X_imputed.shape, (10, 2)) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert_equal(X_imputed.shape, (10, 2)) def test_imputation_mean_median_only_zero(): # Test imputation using the mean and median strategies, when # missing_values == 0. X = np.array([ [np.nan, 0, 0, 0, 5], [np.nan, 1, 0, np.nan, 3], [np.nan, 2, 0, 0, 0], [np.nan, 6, 0, 5, 13], ]) X_imputed_mean = np.array([ [3, 5], [1, 3], [2, 7], [6, 13], ]) statistics_mean = [np.nan, 3, np.nan, np.nan, 7] # Behaviour of median with NaN is undefined, e.g. different results in # np.median and np.ma.median X_for_median = X[:, [0, 1, 2, 4]] X_imputed_median = np.array([ [2, 5], [1, 3], [2, 5], [6, 13], ]) statistics_median = [np.nan, 2, np.nan, 5] _check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0) _check_statistics(X_for_median, X_imputed_median, "median", statistics_median, 0) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))), ("mean", 0, lambda z, v, p: np.mean(v)), ("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))), ("median", 0, lambda z, v, p: np.median(v))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, 'NaN') def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in Imputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, Imputer will need to be updated # to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. pipeline = Pipeline([('imputer', Imputer(missing_values=0)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"], 'imputer__axis': [0, 1] } l = 100 X = sparse_random_matrix(l, l, density=0.10) Y = sparse_random_matrix(l, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_pickle(): # Test for pickling imputers. import pickle l = 100 X = sparse_random_matrix(l, l, density=0.10) for strategy in ["mean", "median", "most_frequent"]: imputer = Imputer(missing_values=0, strategy=strategy) imputer.fit(X) imputer_pickled = pickle.loads(pickle.dumps(imputer)) assert_array_equal(imputer.transform(X.copy()), imputer_pickled.transform(X.copy()), "Fail to transform the data after pickling " "(strategy = %s)" % (strategy)) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_false(np.all(X == Xt)) # copy=True, sparse csr => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_equal(X, Xt) # copy=False, sparse csr, axis=1 => no copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_equal(X.data, Xt.data) # copy=False, sparse csc, axis=0 => no copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_equal(X.data, Xt.data) # copy=False, sparse csr, axis=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=1 => copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=1, missing_values=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=0, strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) assert_false(sparse.issparse(Xt)) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False.
12,300
33.169444
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/preprocessing/tests/test_label.py
import numpy as np from scipy.sparse import issparse from scipy.sparse import coo_matrix from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.utils.multiclass import type_of_target from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.preprocessing.label import LabelBinarizer from sklearn.preprocessing.label import MultiLabelBinarizer from sklearn.preprocessing.label import LabelEncoder from sklearn.preprocessing.label import label_binarize from sklearn.preprocessing.label import _inverse_binarize_thresholding from sklearn.preprocessing.label import _inverse_binarize_multiclass from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_label_binarizer(): # one-class case defaults to negative label # For dense case: inp = ["pos", "pos", "pos", "pos"] lb = LabelBinarizer(sparse_output=False) expected = np.array([[0, 0, 0, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) # For sparse case: lb = LabelBinarizer(sparse_output=True) got = lb.fit_transform(inp) assert_true(issparse(got)) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got.toarray()) assert_array_equal(lb.inverse_transform(got.toarray()), inp) lb = LabelBinarizer(sparse_output=False) # two-class case inp = ["neg", "pos", "pos", "neg"] expected = np.array([[0, 1, 1, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["neg", "pos"]) assert_array_equal(expected, got) to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) assert_array_equal(lb.inverse_transform(to_invert), inp) # multi-class case inp = ["spam", "ham", "eggs", "ham", "0"] expected = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]) got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam']) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) def test_label_binarizer_unseen_labels(): lb = LabelBinarizer() expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) got = lb.fit_transform(['b', 'd', 'e']) assert_array_equal(expected, got) expected = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]) got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f']) assert_array_equal(expected, got) def test_label_binarizer_set_label_encoding(): lb = LabelBinarizer(neg_label=-2, pos_label=0) # two-class case with pos_label=0 inp = np.array([0, 1, 1, 0]) expected = np.array([[-2, 0, 0, -2]]).T got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) lb = LabelBinarizer(neg_label=-2, pos_label=2) # multi-class case inp = np.array([3, 2, 1, 2, 0]) expected = np.array([[-2, -2, -2, +2], [-2, -2, +2, -2], [-2, +2, -2, -2], [-2, -2, +2, -2], [+2, -2, -2, -2]]) got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) @ignore_warnings def test_label_binarizer_errors(): # Check that invalid arguments yield ValueError one_class = np.array([0, 0, 0, 0]) lb = LabelBinarizer().fit(one_class) multi_label = [(2, 3), (0,), (0, 2)] assert_raises(ValueError, lb.transform, multi_label) lb = LabelBinarizer() assert_raises(ValueError, lb.transform, []) assert_raises(ValueError, lb.inverse_transform, []) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1) assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2) assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2, sparse_output=True) # Fail on y_type assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2], threshold=0) # Sequence of seq type should raise ValueError y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs) # Fail on the number of classes assert_raises(ValueError, _inverse_binarize_thresholding, y=csr_matrix([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2, 3], threshold=0) # Fail on the dimension of 'binary' assert_raises(ValueError, _inverse_binarize_thresholding, y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary", classes=[1, 2, 3], threshold=0) # Fail on multioutput data assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]])) assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]), [1, 2, 3]) def test_label_encoder(): # Test LabelEncoder's transform and inverse_transform methods le = LabelEncoder() le.fit([1, 1, 4, 5, -1, 0]) assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1]) assert_raises(ValueError, le.transform, [0, 6]) le.fit(["apple", "orange"]) msg = "bad input shape" assert_raise_message(ValueError, msg, le.transform, "apple") def test_label_encoder_fit_transform(): # Test fit_transform le = LabelEncoder() ret = le.fit_transform([1, 1, 4, 5, -1, 0]) assert_array_equal(ret, [2, 2, 3, 4, 0, 1]) le = LabelEncoder() ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"]) assert_array_equal(ret, [1, 1, 2, 0]) def test_label_encoder_errors(): # Check that invalid arguments yield ValueError le = LabelEncoder() assert_raises(ValueError, le.transform, []) assert_raises(ValueError, le.inverse_transform, []) # Fail on unseen labels le = LabelEncoder() le.fit([1, 2, 3, 1, -1]) assert_raises(ValueError, le.inverse_transform, [-1]) def test_sparse_output_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for sparse_output in [True, False]: for inp in inputs: # With fit_transform mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit_transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: # verify CSR assumption that indices and indptr have same dtype assert_equal(got.indices.dtype, got.indptr.dtype) got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit(inp()).transform(inp()) assert_equal(issparse(got), sparse_output) if sparse_output: # verify CSR assumption that indices and indptr have same dtype assert_equal(got.indices.dtype, got.indptr.dtype) got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) assert_raises(ValueError, mlb.inverse_transform, csr_matrix(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]]))) def test_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: (set([2, 3]), set([1]), set([1, 2])), lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for inp in inputs: # With fit_transform mlb = MultiLabelBinarizer() got = mlb.fit_transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) # With fit mlb = MultiLabelBinarizer() got = mlb.fit(inp()).transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert_equal(mlb.inverse_transform(got), inverse) def test_multilabel_binarizer_empty_sample(): mlb = MultiLabelBinarizer() y = [[1, 2], [1], []] Y = np.array([[1, 1], [1, 0], [0, 0]]) assert_array_equal(mlb.fit_transform(y), Y) def test_multilabel_binarizer_unknown_class(): mlb = MultiLabelBinarizer() y = [[1, 2]] assert_raises(KeyError, mlb.fit(y).transform, [[0]]) mlb = MultiLabelBinarizer(classes=[1, 2]) assert_raises(KeyError, mlb.fit_transform, [[0]]) def test_multilabel_binarizer_given_classes(): inp = [(2, 3), (1,), (1, 2)] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # fit().transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # ensure works with extra class mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat))) assert_array_equal(mlb.classes_, [4, 1, 3, 2]) # ensure fit is no-op as iterable is not consumed inp = iter(inp) mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) def test_multilabel_binarizer_same_length_sequence(): # Ensure sequences of the same length are not interpreted as a 2-d array inp = [[1], [0], [2]] indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) def test_multilabel_binarizer_non_integer_labels(): tuple_classes = np.empty(3, dtype=object) tuple_classes[:] = [(1,), (2,), (3,)] inputs = [ ([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']), ([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']), ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) for inp, classes in inputs: # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) mlb = MultiLabelBinarizer() assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})]) def test_multilabel_binarizer_non_unique(): inp = [(1, 1, 1, 0)] indicator_mat = np.array([[1, 1]]) mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) def test_multilabel_binarizer_inverse_validation(): inp = [(1, 1, 1, 0)] mlb = MultiLabelBinarizer() mlb.fit_transform(inp) # Not binary assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]])) # The following binary cases are fine, however mlb.inverse_transform(np.array([[0, 0]])) mlb.inverse_transform(np.array([[1, 1]])) mlb.inverse_transform(np.array([[1, 0]])) # Wrong shape assert_raises(ValueError, mlb.inverse_transform, np.array([[1]])) assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]])) def test_label_binarize_with_class_order(): out = label_binarize([1, 6], classes=[1, 2, 4, 6]) expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) assert_array_equal(out, expected) # Modified class order out = label_binarize([1, 6], classes=[1, 6, 4, 2]) expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) assert_array_equal(out, expected) out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(out, expected) def check_binarized_results(y, classes, pos_label, neg_label, expected): for sparse_output in [True, False]: if ((pos_label == 0 or neg_label != 0) and sparse_output): assert_raises(ValueError, label_binarize, y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) continue # check label_binarize binarized = label_binarize(y, classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) # check inverse y_type = type_of_target(y) if y_type == "multiclass": inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding(binarized, output_type=y_type, classes=classes, threshold=((neg_label + pos_label) / 2.)) assert_array_equal(toarray(inversed), toarray(y)) # Check label binarizer lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert_equal(issparse(binarized), sparse_output) inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert_equal(issparse(inverse_output), issparse(y)) def test_label_binarize_binary(): y = [0, 1, 0] classes = [0, 1] pos_label = 2 neg_label = -1 expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected # Binary case where sparse_output = True will not result in a ValueError y = [0, 1, 0] classes = [0, 1] pos_label = 3 neg_label = 0 expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) yield check_binarized_results, y, classes, pos_label, neg_label, expected def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) yield check_binarized_results, y, classes, pos_label, neg_label, expected assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_label_binarize_multilabel(): y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = pos_label * y_ind y_sparse = [sparse_matrix(y_ind) for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix]] for y in [y_ind] + y_sparse: yield (check_binarized_results, y, classes, pos_label, neg_label, expected) assert_raises(ValueError, label_binarize, y, classes, neg_label=-1, pos_label=pos_label, sparse_output=True) def test_invalid_input_label_binarize(): assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2], pos_label=0, neg_label=1) def test_inverse_binarize_multiclass(): got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3)) assert_array_equal(got, np.array([1, 1, 0]))
18,521
35.035019
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/__main__.py
from __future__ import absolute_import import os import sys # If we are running from a wheel, add the wheel to sys.path # This allows the usage python pip-*.whl/pip install pip-*.whl if __package__ == '': # __file__ is pip-*.whl/pip/__main__.py # first dirname call strips of '/__main__.py', second strips off '/pip' # Resulting path is the name of the wheel itself # Add that to sys.path so we can import pip path = os.path.dirname(os.path.dirname(__file__)) sys.path.insert(0, path) from pip._internal import main as _main # noqa if __name__ == '__main__': sys.exit(_main())
610
29.55
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/__init__.py
__version__ = "10.0.1"
23
11
22
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/appdirs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2005-2010 ActiveState Software Inc. # Copyright (c) 2013 Eddy Petrișor """Utilities for determining application-specific dirs. See <http://github.com/ActiveState/appdirs> for details and usage. """ # Dev Notes: # - MSDN on where to store app data files: # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html __version_info__ = (1, 4, 3) __version__ = '.'.join(map(str, __version_info__)) import sys import os PY3 = sys.version_info[0] == 3 if PY3: unicode = str if sys.platform.startswith('java'): import platform os_name = platform.java_ver()[3][0] if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. system = 'win32' elif os_name.startswith('Mac'): # "Mac OS X", etc. system = 'darwin' else: # "Linux", "SunOS", "FreeBSD", etc. # Setting this to "linux2" is not ideal, but only Windows or Mac # are actually checked for and the rest of the module expects # *sys.platform* style strings. system = 'linux2' else: system = sys.platform def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/<AppName> Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/<AppName>". """ if system == "win32": if appauthor is None: appauthor = appname const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.normpath(_get_win_folder(const)) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('~/Library/Application Support/') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set Typical site config directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system in ["win32", "darwin"]: path = site_data_dir(appname, appauthor) if appname and version: path = os.path.join(path, version) else: # XDG default for $XDG_CONFIG_DIRS # only first, if multipath is False path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) if opinion: path = os.path.join(path, "Cache") elif system == 'darwin': path = os.path.expanduser('~/Library/Caches') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific state dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user state directories are: Mac OS X: same as user_data_dir Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined Win *: same as user_data_dir For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> to extend the XDG spec and support $XDG_STATE_HOME. That means, by default "~/.local/state/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user log directories are: Mac OS X: ~/Library/Logs/<AppName> Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. """ if system == "darwin": path = os.path.join( os.path.expanduser('~/Library/Logs'), appname) elif system == "win32": path = user_data_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "Logs") else: path = user_cache_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "log") if appname and version: path = os.path.join(path, version) return path class AppDirs(object): """Convenience wrapper for getting application dirs.""" def __init__(self, appname=None, appauthor=None, version=None, roaming=False, multipath=False): self.appname = appname self.appauthor = appauthor self.version = version self.roaming = roaming self.multipath = multipath @property def user_data_dir(self): return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_data_dir(self): return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_config_dir(self): return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_config_dir(self): return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_cache_dir(self): return user_cache_dir(self.appname, self.appauthor, version=self.version) @property def user_state_dir(self): return user_state_dir(self.appname, self.appauthor, version=self.version) @property def user_log_dir(self): return user_log_dir(self.appname, self.appauthor, version=self.version) #---- internal support stuff def _get_win_folder_from_registry(csidl_name): """This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ if PY3: import winreg as _winreg else: import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir def _get_win_folder_with_pywin32(csidl_name): from win32com.shell import shellcon, shell dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) # Try to make this a unicode path because SHGetFolderPath does # not return unicode strings when there is unicode data in the # path. try: dir = unicode(dir) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: try: import win32api dir = win32api.GetShortPathName(dir) except ImportError: pass except UnicodeError: pass return dir def _get_win_folder_with_ctypes(csidl_name): import ctypes csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def _get_win_folder_with_jna(csidl_name): import array from com.sun import jna from com.sun.jna.platform import win32 buf_size = win32.WinDef.MAX_PATH * 2 buf = array.zeros('c', buf_size) shell = win32.Shell32.INSTANCE shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) dir = jna.Native.toString(buf.tostring()).rstrip("\0") # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: buf = array.zeros('c', buf_size) kernel = win32.Kernel32.INSTANCE if kernel.GetShortPathName(dir, buf, buf_size): dir = jna.Native.toString(buf.tostring()).rstrip("\0") return dir if system == "win32": try: from ctypes import windll _get_win_folder = _get_win_folder_with_ctypes except ImportError: try: import com.sun.jna _get_win_folder = _get_win_folder_with_jna except ImportError: _get_win_folder = _get_win_folder_from_registry #---- self test code if __name__ == "__main__": appname = "MyApp" appauthor = "MyCompany" props = ("user_data_dir", "user_config_dir", "user_cache_dir", "user_state_dir", "user_log_dir", "site_data_dir", "site_config_dir") print("-- app dirs %s --" % __version__) print("-- app dirs (with optional 'version')") dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'version')") dirs = AppDirs(appname, appauthor) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'appauthor')") dirs = AppDirs(appname) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (with disabled 'appauthor')") dirs = AppDirs(appname, appauthor=False) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop)))
24,546
39.573554
122
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/six.py
# Copyright (c) 2010-2017 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Utilities for writing code that runs on Python 2 and 3""" from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <[email protected]>" __version__ = "1.11.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), MovedAttribute("parse_http_list", "urllib2", "urllib.request"), MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): try: if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value finally: value = None tb = None else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): try: raise tp, value, tb finally: tb = None """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): try: if from_value is None: raise value raise value from from_value finally: value = None """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): try: raise value from from_value finally: value = None """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
30,888
33.628924
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pyparsing.py
# module pyparsing.py # # Copyright (c) 2003-2016 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __doc__ = \ """ pyparsing module - Classes and methods to define and execute parsing grammars The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements (L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to L{Literal} expressions):: from pip._vendor.pyparsing import Word, alphas # define grammar of a greeting greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments """ __version__ = "2.2.0" __versionTime__ = "06 Mar 2017 02:06 UTC" __author__ = "Paul McGuire <[email protected]>" import string from weakref import ref as wkref import copy import sys import warnings import re import sre_constants import collections import pprint import traceback import types from datetime import datetime try: from _thread import RLock except ImportError: from threading import RLock try: from collections import OrderedDict as _OrderedDict except ImportError: try: from ordereddict import OrderedDict as _OrderedDict except ImportError: _OrderedDict = None #~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) __all__ = [ 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', 'CloseMatch', 'tokenMap', 'pyparsing_common', ] system_version = tuple(sys.version_info)[:3] PY_3 = system_version[0] == 3 if PY_3: _MAX_INT = sys.maxsize basestring = str unichr = chr _ustr = str # build list of single arg builtins, that can be used as parse actions singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] else: _MAX_INT = sys.maxint range = xrange def _ustr(obj): """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It then < returns the unicode object | encodes it with the default encoding | ... >. """ if isinstance(obj,unicode): return obj try: # If this works, then _ustr(obj) has the same behaviour as str(obj), so # it won't break any existing code. return str(obj) except UnicodeEncodeError: # Else encode it ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') xmlcharref = Regex(r'&#\d+;') xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) return xmlcharref.transformString(ret) # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] import __builtin__ for fname in "sum len sorted reversed list tuple set any all min max".split(): try: singleArgBuiltins.append(getattr(__builtin__,fname)) except AttributeError: continue _generatorType = type((y for y in range(1))) def _xml_escape(data): """Escape &, <, >, ", ', etc. in a string of data.""" # ampersand must be replaced first from_symbols = '&><"\'' to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) for from_,to_ in zip(from_symbols, to_symbols): data = data.replace(from_, to_) return data class _Constants(object): pass alphas = string.ascii_uppercase + string.ascii_lowercase nums = "0123456789" hexnums = nums + "ABCDEFabcdef" alphanums = alphas + nums _bslash = chr(92) printables = "".join(c for c in string.printable if c not in string.whitespace) class ParseBaseException(Exception): """base exception class for all parsing runtime exceptions""" # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__( self, pstr, loc=0, msg=None, elem=None ): self.loc = loc if msg is None: self.msg = pstr self.pstr = "" else: self.msg = msg self.pstr = pstr self.parserElement = elem self.args = (pstr, loc, msg) @classmethod def _from_exception(cls, pe): """ internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses """ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) def __getattr__( self, aname ): """supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ if( aname == "lineno" ): return lineno( self.loc, self.pstr ) elif( aname in ("col", "column") ): return col( self.loc, self.pstr ) elif( aname == "line" ): return line( self.loc, self.pstr ) else: raise AttributeError(aname) def __str__( self ): return "%s (at char %d), (line:%d, col:%d)" % \ ( self.msg, self.loc, self.lineno, self.column ) def __repr__( self ): return _ustr(self) def markInputline( self, markerString = ">!<" ): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip() def __dir__(self): return "lineno col line".split() + dir(type(self)) class ParseException(ParseBaseException): """ Exception thrown when parse expressions don't match class; supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text Example:: try: Word(nums).setName("integer").parseString("ABC") except ParseException as pe: print(pe) print("column: {}".format(pe.col)) prints:: Expected integer (at char 0), (line:1, col:1) column: 1 """ pass class ParseFatalException(ParseBaseException): """user-throwable exception thrown when inconsistent parse content is found; stops all parsing immediately""" pass class ParseSyntaxException(ParseFatalException): """just like L{ParseFatalException}, but thrown internally when an L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop immediately because an unbacktrackable syntax error has been found""" pass #~ class ReparseException(ParseBaseException): #~ """Experimental class - parse actions can raise this exception to cause #~ pyparsing to reparse the input string: #~ - with a modified input string, and/or #~ - with a modified start location #~ Set the values of the ReparseException in the constructor, and raise the #~ exception in a parse action to cause pyparsing to use the new string/location. #~ Setting the values as None causes no change to be made. #~ """ #~ def __init_( self, newstring, restartLoc ): #~ self.newParseText = newstring #~ self.reparseLoc = restartLoc class RecursiveGrammarException(Exception): """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" def __init__( self, parseElementList ): self.parseElementTrace = parseElementList def __str__( self ): return "RecursiveGrammarException: %s" % self.parseElementTrace class _ParseResultsWithOffset(object): def __init__(self,p1,p2): self.tup = (p1,p2) def __getitem__(self,i): return self.tup[i] def __repr__(self): return repr(self.tup[0]) def setOffset(self,i): self.tup = (self.tup[0],i) class ParseResults(object): """ Structured parse results, to provide multiple means of access to the parsed data: - as a list (C{len(results)}) - by list index (C{results[0], results[1]}, etc.) - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) Example:: integer = Word(nums) date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") # parseString returns a ParseResults object result = date_str.parseString("1999/12/31") def test(s, fn=repr): print("%s -> %s" % (s, fn(eval(s)))) test("list(result)") test("result[0]") test("result['month']") test("result.day") test("'month' in result") test("'minutes' in result") test("result.dump()", str) prints:: list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' result.day -> '31' 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - day: 31 - month: 12 - year: 1999 """ def __new__(cls, toklist=None, name=None, asList=True, modal=True ): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) retobj.__doinit = True return retobj # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} self.__asList = asList self.__modal = modal if toklist is None: toklist = [] if isinstance(toklist, list): self.__toklist = toklist[:] elif isinstance(toklist, _generatorType): self.__toklist = list(toklist) else: self.__toklist = [toklist] self.__tokdict = dict() if name is not None and name: if not modal: self.__accumNames[name] = 0 if isinstance(name,int): name = _ustr(name) # will always return a str, but use _ustr for consistency self.__name = name if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): if isinstance(toklist,basestring): toklist = [ toklist ] if asList: if isinstance(toklist,ParseResults): self[name] = _ParseResultsWithOffset(toklist.copy(),0) else: self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) self[name].__name = name else: try: self[name] = toklist[0] except (KeyError,TypeError,IndexError): self[name] = toklist def __getitem__( self, i ): if isinstance( i, (int,slice) ): return self.__toklist[i] else: if i not in self.__accumNames: return self.__tokdict[i][-1][0] else: return ParseResults([ v[0] for v in self.__tokdict[i] ]) def __setitem__( self, k, v, isinstance=isinstance ): if isinstance(v,_ParseResultsWithOffset): self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] sub = v[0] elif isinstance(k,(int,slice)): self.__toklist[k] = v sub = v else: self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] sub = v if isinstance(sub,ParseResults): sub.__parent = wkref(self) def __delitem__( self, i ): if isinstance(i,(int,slice)): mylen = len( self.__toklist ) del self.__toklist[i] # convert int to slice if isinstance(i, int): if i < 0: i += mylen i = slice(i, i+1) # get removed indices removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary for name,occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) else: del self.__tokdict[i] def __contains__( self, k ): return k in self.__tokdict def __len__( self ): return len( self.__toklist ) def __bool__(self): return ( not not self.__toklist ) __nonzero__ = __bool__ def __iter__( self ): return iter( self.__toklist ) def __reversed__( self ): return iter( self.__toklist[::-1] ) def _iterkeys( self ): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) def _itervalues( self ): return (self[k] for k in self._iterkeys()) def _iteritems( self ): return ((k, self[k]) for k in self._iterkeys()) if PY_3: keys = _iterkeys """Returns an iterator of all named result keys (Python 3.x only).""" values = _itervalues """Returns an iterator of all named result values (Python 3.x only).""" items = _iteritems """Returns an iterator of all named result key-value tuples (Python 3.x only).""" else: iterkeys = _iterkeys """Returns an iterator of all named result keys (Python 2.x only).""" itervalues = _itervalues """Returns an iterator of all named result values (Python 2.x only).""" iteritems = _iteritems """Returns an iterator of all named result key-value tuples (Python 2.x only).""" def keys( self ): """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) def values( self ): """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) def items( self ): """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) def haskeys( self ): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict) def pop( self, *args, **kwargs): """ Removes and returns item at specified index (default=C{last}). Supports both C{list} and C{dict} semantics for C{pop()}. If passed no argument or an integer argument, it will use C{list} semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use C{dict} semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in C{dict.pop()}. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] """ if not args: args = [-1] for k,v in kwargs.items(): if k == 'default': args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) if (isinstance(args[0], int) or len(args) == 1 or args[0] in self): index = args[0] ret = self[index] del self[index] return ret else: defaultvalue = args[1] return defaultvalue def get(self, key, defaultValue=None): """ Returns named result matching the given key, or if there is no such name, then returns the given C{defaultValue} or C{None} if no C{defaultValue} is specified. Similar to C{dict.get()}. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' print(result.get("hour", "not specified")) # -> 'not specified' print(result.get("hour")) # -> None """ if key in self: return self[key] else: return defaultValue def insert( self, index, insStr ): """ Inserts new element at location index in the list of parsed tokens. Similar to C{list.insert()}. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary for name,occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) def append( self, item ): """ Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] """ self.__toklist.append(item) def extend( self, itemseq ): """ Add sequence of elements to end of ParseResults list of elements. Example:: patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) return ''.join(tokens) print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' """ if isinstance(itemseq, ParseResults): self += itemseq else: self.__toklist.extend(itemseq) def clear( self ): """ Clear all elements and results names. """ del self.__toklist[:] self.__tokdict.clear() def __getattr__( self, name ): try: return self[name] except KeyError: return "" if name in self.__tokdict: if name not in self.__accumNames: return self.__tokdict[name][-1][0] else: return ParseResults([ v[0] for v in self.__tokdict[name] ]) else: return "" def __add__( self, other ): ret = self.copy() ret += other return ret def __iadd__( self, other ): if other.__tokdict: offset = len(self.__toklist) addoffset = lambda a: offset if a<0 else a+offset otheritems = other.__tokdict.items() otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) for (k,vlist) in otheritems for v in vlist] for k,v in otherdictitems: self[k] = v if isinstance(v[0],ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist self.__accumNames.update( other.__accumNames ) return self def __radd__(self, other): if isinstance(other,int) and other == 0: # useful for merging many ParseResults using sum() builtin return self.copy() else: # this may raise a TypeError - so be it return other + self def __repr__( self ): return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) def __str__( self ): return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' def _asStringList( self, sep='' ): out = [] for item in self.__toklist: if out and sep: out.append(sep) if isinstance( item, ParseResults ): out += item._asStringList() else: out.append( _ustr(item) ) return out def asList( self ): """ Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] """ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] def asDict( self ): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """ if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn()) def copy( self ): """ Returns a new copy of a C{ParseResults} object. """ ret = ParseResults( self.__toklist ) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update( self.__accumNames ) ret.__name = self.__name return ret def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): """ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. """ nl = "\n" out = [] namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() for v in vlist) nextLevelIndent = indent + " " # collapse out indents if formatting is not desired if not formatted: indent = "" nextLevelIndent = "" nl = "" selfTag = None if doctag is not None: selfTag = doctag else: if self.__name: selfTag = self.__name if not selfTag: if namedItemsOnly: return "" else: selfTag = "ITEM" out += [ nl, indent, "<", selfTag, ">" ] for i,res in enumerate(self.__toklist): if isinstance(res,ParseResults): if i in namedItems: out += [ res.asXML(namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: out += [ res.asXML(None, namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: # individual token, see if there is a name for it resTag = None if i in namedItems: resTag = namedItems[i] if not resTag: if namedItemsOnly: continue else: resTag = "ITEM" xmlBodyText = _xml_escape(_ustr(res)) out += [ nl, nextLevelIndent, "<", resTag, ">", xmlBodyText, "</", resTag, ">" ] out += [ nl, indent, "</", selfTag, ">" ] return "".join(out) def __lookup(self,sub): for k,vlist in self.__tokdict.items(): for v,loc in vlist: if sub is v: return k return None def getName(self): r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) prints:: age : 22 ssn : 111-22-3333 house_number : 221B """ if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif (len(self) == 1 and len(self.__tokdict) == 1 and next(iter(self.__tokdict.values()))[0][1] in (0,-1)): return next(iter(self.__tokdict.keys())) else: return None def dump(self, indent='', depth=0, full=True): """ Diagnostic method for listing out the contents of a C{ParseResults}. Accepts an optional C{indent} argument so that this string can be embedded in a nested display of other data. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(result.dump()) prints:: ['12', '/', '31', '/', '1999'] - day: 1999 - month: 31 - year: 12 """ out = [] NL = '\n' out.append( indent+_ustr(self.asList()) ) if full: if self.haskeys(): items = sorted((str(k), v) for k,v in self.items()) for k,v in items: if out: out.append(NL) out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) if isinstance(v,ParseResults): if v: out.append( v.dump(indent,depth+1) ) else: out.append(_ustr(v)) else: out.append(repr(v)) elif any(isinstance(vv,ParseResults) for vv in self): v = self for i,vv in enumerate(v): if isinstance(vv,ParseResults): out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) else: out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) return "".join(out) def pprint(self, *args, **kwargs): """ Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] """ pprint.pprint(self.asList(), *args, **kwargs) # add support for pickle protocol def __getstate__(self): return ( self.__toklist, ( self.__tokdict.copy(), self.__parent is not None and self.__parent() or None, self.__accumNames, self.__name ) ) def __setstate__(self,state): self.__toklist = state[0] (self.__tokdict, par, inAccumNames, self.__name) = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: self.__parent = wkref(par) else: self.__parent = None def __getnewargs__(self): return self.__toklist, self.__name, self.__asList, self.__modal def __dir__(self): return (dir(type(self)) + list(self.keys())) collections.MutableMapping.register(ParseResults) def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) def lineno(loc,strg): """Returns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ return strg.count("\n",0,loc) + 1 def line( loc, strg ): """Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: return strg[lastCR+1:nextCR] else: return strg[lastCR+1:] def _defaultStartDebugAction( instring, loc, expr ): print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) def _defaultExceptionDebugAction( instring, loc, expr, exc ): print ("Exception raised:" + _ustr(exc)) def nullDebugAction(*args): """'Do-nothing' debug action, to suppress debugging output during parsing.""" pass # Only works on Python 3.x - nonlocal is toxic to Python 2 installs #~ 'decorator to trim function calls to match the arity of the target' #~ def _trim_arity(func, maxargs=3): #~ if func in singleArgBuiltins: #~ return lambda s,l,t: func(t) #~ limit = 0 #~ foundArity = False #~ def wrapper(*args): #~ nonlocal limit,foundArity #~ while 1: #~ try: #~ ret = func(*args[limit:]) #~ foundArity = True #~ return ret #~ except TypeError: #~ if limit == maxargs or foundArity: #~ raise #~ limit += 1 #~ continue #~ return wrapper # this version is Python 2.x-3.x cross-compatible 'decorator to trim function calls to match the arity of the target' def _trim_arity(func, maxargs=2): if func in singleArgBuiltins: return lambda s,l,t: func(t) limit = [0] foundArity = [False] # traceback return data structure changed in Py3.5 - normalize back to plain tuples if system_version[:2] >= (3,5): def extract_stack(limit=0): # special handling for Python 3.5.0 - extra deep call stack by 1 offset = -3 if system_version == (3,5,0) else -2 frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] return [(frame_summary.filename, frame_summary.lineno)] def extract_tb(tb, limit=0): frames = traceback.extract_tb(tb, limit=limit) frame_summary = frames[-1] return [(frame_summary.filename, frame_summary.lineno)] else: extract_stack = traceback.extract_stack extract_tb = traceback.extract_tb # synthesize what would be returned by traceback.extract_stack at the call to # user's parse action 'func', so that we don't incur call penalty at parse time LINE_DIFF = 6 # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! this_line = extract_stack(limit=2)[-1] pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) def wrapper(*args): while 1: try: ret = func(*args[limit[0]:]) foundArity[0] = True return ret except TypeError: # re-raise TypeErrors if they did not come from our arity testing if foundArity[0]: raise else: try: tb = sys.exc_info()[-1] if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: raise finally: del tb if limit[0] <= maxargs: limit[0] += 1 continue raise # copy func name to wrapper for sensible debug output func_name = "<parse action>" try: func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) wrapper.__name__ = func_name return wrapper class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" verbose_stacktrace = False @staticmethod def setDefaultWhitespaceChars( chars ): r""" Overrides the default whitespace chars Example:: # default whitespace chars are space, <TAB> and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars @staticmethod def inlineLiteralsUsing(cls): """ Set class to be used for inclusion of string literals into a parser. Example:: # default literal class used is Literal integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] """ ParserElement._literalStringClass = cls def __init__( self, savelist=False ): self.parseAction = list() self.failAction = None #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall self.strRepr = None self.resultsName = None self.saveAsList = savelist self.skipWhitespace = True self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS self.copyDefaultWhiteChars = True self.mayReturnEmpty = False # used when checking for left-recursion self.keepTabs = False self.ignoreExprs = list() self.debug = False self.streamlined = False self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index self.errmsg = "" self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) self.debugActions = ( None, None, None ) #custom debug actions self.re = None self.callPreparse = True # used to avoid redundant calls to preParse self.callDuringTry = False def copy( self ): """ Make a copy of this C{ParserElement}. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of C{expr.copy()} is just C{expr()}:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") """ cpy = copy.copy( self ) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy def setName( self, name ): """ Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """ self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): self.exception.msg = self.errmsg return self def setResultsName( self, name, listAllMatches=False ): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ newself = self.copy() if name.endswith("*"): name = name[:-1] listAllMatches=True newself.resultsName = name newself.modalResults = not listAllMatches return newself def setBreak(self,breakFlag = True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. """ if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() return _parseMethod( instring, loc, doActions, callPreParse ) breaker._originalParseMethod = _parseMethod self._parse = breaker else: if hasattr(self._parse,"_originalParseMethod"): self._parse = self._parse._originalParseMethod return self def setParseAction( self, *fns, **kwargs ): """ Define one or more actions to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}<parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. Example:: integer = Word(nums) date_str = integer + '/' + integer + '/' + integer date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # use parse action to convert to ints at parse time integer = Word(nums).setParseAction(lambda toks: int(toks[0])) date_str = integer + '/' + integer + '/' + integer # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] """ self.parseAction = list(map(_trim_arity, list(fns))) self.callDuringTry = kwargs.get("callDuringTry", False) return self def addParseAction( self, *fns, **kwargs ): """ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. See examples in L{I{copy}<copy>}. """ self.parseAction += list(map(_trim_arity, list(fns))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def addCondition(self, *fns, **kwargs): """Add a boolean predicate function to expression's list of parse actions. See L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, functions passed to C{addCondition} need to return boolean success/fail of the condition. Optional keyword arguments: - message = define a custom message to be used in the raised exception - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") date_str = year_int + '/' + integer + '/' + integer result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) """ msg = kwargs.get("message", "failed user-defined condition") exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: def pa(s,l,t): if not bool(_trim_arity(fn)(s,l,t)): raise exc_type(s,l,msg) self.parseAction.append(pa) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def setFailAction( self, fn ): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments C{fn(s,loc,expr,err)} where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw C{L{ParseFatalException}} if it is desired to stop parsing immediately.""" self.failAction = fn return self def _skipIgnorables( self, instring, loc ): exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: loc,dummy = e._parse( instring, loc ) exprsFound = True except ParseException: pass return loc def preParse( self, instring, loc ): if self.ignoreExprs: loc = self._skipIgnorables( instring, loc ) if self.skipWhitespace: wt = self.whiteChars instrlen = len(instring) while loc < instrlen and instring[loc] in wt: loc += 1 return loc def parseImpl( self, instring, loc, doActions=True ): return loc, [] def postParse( self, instring, loc, tokenlist ): return tokenlist #~ @profile def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): debugging = ( self.debug ) #and doActions ) if debugging or self.failAction: #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) if (self.debugActions[0] ): self.debugActions[0]( instring, loc, self ) if callPreParse and self.callPreparse: preloc = self.preParse( instring, loc ) else: preloc = loc tokensStart = preloc try: try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: raise ParseException( instring, len(instring), self.errmsg, self ) except ParseBaseException as err: #~ print ("Exception raised:", err) if self.debugActions[2]: self.debugActions[2]( instring, tokensStart, self, err ) if self.failAction: self.failAction( instring, tokensStart, self, err ) raise else: if callPreParse and self.callPreparse: preloc = self.preParse( instring, loc ) else: preloc = loc tokensStart = preloc if self.mayIndexError or loc >= len(instring): try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: raise ParseException( instring, len(instring), self.errmsg, self ) else: loc,tokens = self.parseImpl( instring, preloc, doActions ) tokens = self.postParse( instring, loc, tokens ) retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) if self.parseAction and (doActions or self.callDuringTry): if debugging: try: for fn in self.parseAction: tokens = fn( instring, tokensStart, retTokens ) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) except ParseBaseException as err: #~ print "Exception raised in user parse action:", err if (self.debugActions[2] ): self.debugActions[2]( instring, tokensStart, self, err ) raise else: for fn in self.parseAction: tokens = fn( instring, tokensStart, retTokens ) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) if debugging: #~ print ("Matched",self,"->",retTokens.asList()) if (self.debugActions[1] ): self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) return loc, retTokens def tryParse( self, instring, loc ): try: return self._parse( instring, loc, doActions=False )[0] except ParseFatalException: raise ParseException( instring, loc, self.errmsg, self) def canParseNext(self, instring, loc): try: self.tryParse(instring, loc) except (ParseException, IndexError): return False else: return True class _UnboundedCache(object): def __init__(self): cache = {} self.not_in_cache = not_in_cache = object() def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value def clear(self): cache.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) if _OrderedDict is not None: class _FifoCache(object): def __init__(self, size): self.not_in_cache = not_in_cache = object() cache = _OrderedDict() def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value while len(cache) > size: try: cache.popitem(False) except KeyError: pass def clear(self): cache.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) else: class _FifoCache(object): def __init__(self, size): self.not_in_cache = not_in_cache = object() cache = {} key_fifo = collections.deque([], size) def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value while len(key_fifo) > size: cache.pop(key_fifo.popleft(), None) key_fifo.append(key) def clear(self): cache.clear() key_fifo.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) # argument cache for optimizing repeated calls when backtracking through recursive expressions packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail packrat_cache_lock = RLock() packrat_cache_stats = [0, 0] # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): HIT, MISS = 0, 1 lookup = (self, instring, loc, callPreParse, doActions) with ParserElement.packrat_cache_lock: cache = ParserElement.packrat_cache value = cache.get(lookup) if value is cache.not_in_cache: ParserElement.packrat_cache_stats[MISS] += 1 try: value = self._parseNoCache(instring, loc, doActions, callPreParse) except ParseBaseException as pe: # cache a copy of the exception, without the traceback cache.set(lookup, pe.__class__(*pe.args)) raise else: cache.set(lookup, (value[0], value[1].copy())) return value else: ParserElement.packrat_cache_stats[HIT] += 1 if isinstance(value, Exception): raise value return (value[0], value[1].copy()) _parse = _parseNoCache @staticmethod def resetCache(): ParserElement.packrat_cache.clear() ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) _packratEnabled = False @staticmethod def enablePackrat(cache_size_limit=128): """Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. Parameters: - cache_size_limit - (default=C{128}) - if an integer value is provided will limit the size of the packrat cache; if None is passed, then the cache size will be unbounded; if 0 is passed, the cache will be effectively disabled. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method C{ParserElement.enablePackrat()}. If your program uses C{psyco} to "compile as you go", you must call C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. Example:: from pip._vendor import pyparsing pyparsing.ParserElement.enablePackrat() """ if not ParserElement._packratEnabled: ParserElement._packratEnabled = True if cache_size_limit is None: ParserElement.packrat_cache = ParserElement._UnboundedCache() else: ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache def parseString( self, instring, parseAll=False ): """ Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set C{parseAll} to True (equivalent to ending the grammar with C{L{StringEnd()}}). Note: C{parseString} implicitly calls C{expandtabs()} on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the C{loc} argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling C{parseWithTabs} on your grammar before calling C{parseString} (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full C{(s,loc,toks)} signature, and reference the input string using the parse action's C{s} argument - explictly expand the tabs in your input string before calling C{parseString} Example:: Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text """ ParserElement.resetCache() if not self.streamlined: self.streamline() #~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: loc, tokens = self._parse( instring, 0 ) if parseAll: loc = self.preParse( instring, loc ) se = Empty() + StringEnd() se._parse( instring, loc ) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc else: return tokens def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): """ Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional C{maxMatches} argument, to clip scanning after 'n' matches are found. If C{overlap} is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}<parseString>} for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd """ if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn( instring, loc ) nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) except ParseException: loc = preloc+1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn( instring, loc ) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc+1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def transformString( self, instring ): """ Extension to C{L{scanString}}, to modify matching text with modified tokens that may be returned from a parse action. To use C{transformString}, define a grammar and attach a parse action to it that modifies the returned token list. Invoking C{transformString()} on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. C{transformString()} returns the resulting transformed string. Example:: wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) Prints:: Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. """ out = [] lastE = 0 # force preservation of <TAB>s, to minimize unwanted transformation of string, and to # keep string locs straight between transformString and scanString self.keepTabs = True try: for t,s,e in self.scanString( instring ): out.append( instring[lastE:s] ) if t: if isinstance(t,ParseResults): out += t.asList() elif isinstance(t,list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) out = [o for o in out if o] return "".join(map(_ustr,_flatten(out))) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def searchString( self, instring, maxMatches=_MAX_INT ): """ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) # the sum() builtin can be used to merge results into a single ParseResults object print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) prints:: [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): """ Generator method to split a string using the given expression as a separator. May be called with optional C{maxsplit} argument, to limit the number of splits; and the optional C{includeSeparators} argument (default=C{False}), if the separating matching text should be included in the split results. Example:: punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) prints:: ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] """ splits = 0 last = 0 for t,s,e in self.scanString(instring, maxMatches=maxsplit): yield instring[last:s] if includeSeparators: yield t[0] last = e yield instring[last:] def __add__(self, other ): """ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement converts them to L{Literal}s by default. Example:: greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) Prints:: Hello, World! -> ['Hello', ',', 'World', '!'] """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return And( [ self, other ] ) def __radd__(self, other ): """ Implementation of + operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other + self def __sub__(self, other): """ Implementation of - operator, returns C{L{And}} with error stop """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return self + And._ErrorStop() + other def __rsub__(self, other ): """ Implementation of - operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other - self def __mul__(self,other): """ Implementation of * operator, allows use of C{expr * 3} in place of C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples may also include C{None} as in: - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} Note that C{expr*(None,n)} does not raise an exception if more than n exprs exist in the input stream; that is, C{expr*(None,n)} does not enforce a maximum number of expr occurrences. If this behavior is desired, then write C{expr*(None,n) + ~expr} """ if isinstance(other,int): minElements, optElements = other,0 elif isinstance(other,tuple): other = (other + (None, None))[:2] if other[0] is None: other = (0, other[1]) if isinstance(other[0],int) and other[1] is None: if other[0] == 0: return ZeroOrMore(self) if other[0] == 1: return OneOrMore(self) else: return self*other[0] + ZeroOrMore(self) elif isinstance(other[0],int) and isinstance(other[1],int): minElements, optElements = other optElements -= minElements else: raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) else: raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) if minElements < 0: raise ValueError("cannot multiply ParserElement by negative value") if optElements < 0: raise ValueError("second tuple value must be greater or equal to first tuple value") if minElements == optElements == 0: raise ValueError("cannot multiply ParserElement by 0 or (0,0)") if (optElements): def makeOptionalList(n): if n>1: return Optional(self + makeOptionalList(n-1)) else: return Optional(self) if minElements: if minElements == 1: ret = self + makeOptionalList(optElements) else: ret = And([self]*minElements) + makeOptionalList(optElements) else: ret = makeOptionalList(optElements) else: if minElements == 1: ret = self else: ret = And([self]*minElements) return ret def __rmul__(self, other): return self.__mul__(other) def __or__(self, other ): """ Implementation of | operator - returns C{L{MatchFirst}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return MatchFirst( [ self, other ] ) def __ror__(self, other ): """ Implementation of | operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other | self def __xor__(self, other ): """ Implementation of ^ operator - returns C{L{Or}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Or( [ self, other ] ) def __rxor__(self, other ): """ Implementation of ^ operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other ^ self def __and__(self, other ): """ Implementation of & operator - returns C{L{Each}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Each( [ self, other ] ) def __rand__(self, other ): """ Implementation of & operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other & self def __invert__( self ): """ Implementation of ~ operator - returns C{L{NotAny}} """ return NotAny( self ) def __call__(self, name=None): """ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") """ if name is not None: return self.setResultsName(name) else: return self.copy() def suppress( self ): """ Suppresses the output of this C{ParserElement}; useful to keep punctuation from cluttering up returned output. """ return Suppress( self ) def leaveWhitespace( self ): """ Disables the skipping of whitespace before matching the characters in the C{ParserElement}'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False return self def setWhitespaceChars( self, chars ): """ Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self def parseWithTabs( self ): """ Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. Must be called before C{parseString} when the input grammar contains elements that match C{<TAB>} characters. """ self.keepTabs = True return self def ignore( self, other ): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ if isinstance(other, basestring): other = Suppress(other) if isinstance( other, Suppress ): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self def setDebugActions( self, startAction, successAction, exceptionAction ): """ Enable display of debugging messages while doing pattern matching. """ self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self def setDebug( self, flag=True ): """ Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. """ if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: self.debug = False return self def __str__( self ): return self.name def __repr__( self ): return _ustr(self) def streamline( self ): self.streamlined = True self.strRepr = None return self def checkRecursion( self, parseElementList ): pass def validate( self, validateTrace=[] ): """ Check defined expressions for valid structure, check for infinite recursive definitions. """ self.checkRecursion( [] ) def parseFile( self, file_or_filename, parseAll=False ): """ Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. """ try: file_contents = file_or_filename.read() except AttributeError: with open(file_or_filename, "r") as f: file_contents = f.read() try: return self.parseString(file_contents, parseAll) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def __eq__(self,other): if isinstance(other, ParserElement): return self is other or vars(self) == vars(other) elif isinstance(other, basestring): return self.matches(other) else: return super(ParserElement,self)==other def __ne__(self,other): return not (self == other) def __hash__(self): return hash(id(self)) def __req__(self,other): return self == other def __rne__(self,other): return not (self == other) def matches(self, testString, parseAll=True): """ Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. Parameters: - testString - to test against this expression for a match - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests Example:: expr = Word(nums) assert expr.matches("100") """ try: self.parseString(_ustr(testString), parseAll=parseAll) return True except ParseBaseException: return False def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): """ Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. Parameters: - tests - a list of separate test strings, or a multiline string of test strings - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - comment - (default=C{'#'}) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; if False, only dump nested list - printResults - (default=C{True}) prints test output to stdout - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if C{failureTests} is True), and the results contain a list of lines of each test's output Example:: number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' # unsigned integer 100 # negative integer -100 # float with scientific notation 6.02e23 # integer with scientific notation 1e-12 ''') print("Success" if result[0] else "Failed!") result = number_expr.runTests(''' # stray character 100Z # missing leading digit before '.' -.100 # too many '.' 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") prints:: # unsigned integer 100 [100] # negative integer -100 [-100] # float with scientific notation 6.02e23 [6.02e+23] # integer with scientific notation 1e-12 [1e-12] Success # stray character 100Z ^ FAIL: Expected end of text (at char 3), (line:1, col:4) # missing leading digit before '.' -.100 ^ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) # too many '.' 3.14.159 ^ FAIL: Expected end of text (at char 4), (line:1, col:5) Success Each test string must be on a single line. If you want to test a string that spans multiple lines, create a test like this:: expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") (Note that this is a raw string literal, you must include the leading 'r'.) """ if isinstance(tests, basestring): tests = list(map(str.strip, tests.rstrip().splitlines())) if isinstance(comment, basestring): comment = Literal(comment) allResults = [] comments = [] success = True for t in tests: if comment is not None and comment.matches(t, False) or comments and not t: comments.append(t) continue if not t: continue out = ['\n'.join(comments), t] comments = [] try: t = t.replace(r'\n','\n') result = self.parseString(t, parseAll=parseAll) out.append(result.dump(full=fullDump)) success = success and not failureTests except ParseBaseException as pe: fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) else: out.append(' '*pe.loc + '^' + fatal) out.append("FAIL: " + str(pe)) success = success and failureTests result = pe except Exception as exc: out.append("FAIL-EXCEPTION: " + str(exc)) success = success and failureTests result = exc if printResults: if fullDump: out.append('') print('\n'.join(out)) allResults.append((t, result)) return success, allResults class Token(ParserElement): """ Abstract C{ParserElement} subclass, for defining atomic matching patterns. """ def __init__( self ): super(Token,self).__init__( savelist=False ) class Empty(Token): """ An empty token, will always match. """ def __init__( self ): super(Empty,self).__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False class NoMatch(Token): """ A token that will never match. """ def __init__( self ): super(NoMatch,self).__init__() self.name = "NoMatch" self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) class Literal(Token): """ Token to exactly match a specified string. Example:: Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" For case-insensitive matching, use L{CaselessLiteral}. For keyword matching (force word break before and after the matched string), use L{Keyword} or L{CaselessKeyword}. """ def __init__( self, matchString ): super(Literal,self).__init__() self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Literal; use Empty() instead", SyntaxWarning, stacklevel=2) self.__class__ = Empty self.name = '"%s"' % _ustr(self.match) self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False # Performance tuning: this routine gets called a *lot* # if this is a single character match string and the first character matches, # short-circuit as quickly as possible, and avoid calling startswith #~ @profile def parseImpl( self, instring, loc, doActions=True ): if (instring[loc] == self.firstMatchChar and (self.matchLen==1 or instring.startswith(self.match,loc)) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) _L = Literal ParserElement._literalStringClass = Literal class Keyword(Token): """ Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with C{L{Literal}}: - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$" - C{caseless} allows case-insensitive matching, default is C{False}. Example:: Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception For case-insensitive matching, use L{CaselessKeyword}. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" def __init__( self, matchString, identChars=None, caseless=False ): super(Keyword,self).__init__() if identChars is None: identChars = Keyword.DEFAULT_KEYWORD_CHARS self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Keyword; use Empty() instead", SyntaxWarning, stacklevel=2) self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False self.caseless = caseless if caseless: self.caselessmatch = matchString.upper() identChars = identChars.upper() self.identChars = set(identChars) def parseImpl( self, instring, loc, doActions=True ): if self.caseless: if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and (loc == 0 or instring[loc-1].upper() not in self.identChars) ): return loc+self.matchLen, self.match else: if (instring[loc] == self.firstMatchChar and (self.matchLen==1 or instring.startswith(self.match,loc)) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and (loc == 0 or instring[loc-1] not in self.identChars) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) def copy(self): c = super(Keyword,self).copy() c.identChars = Keyword.DEFAULT_KEYWORD_CHARS return c @staticmethod def setDefaultKeywordChars( chars ): """Overrides the default Keyword chars """ Keyword.DEFAULT_KEYWORD_CHARS = chars class CaselessLiteral(Literal): """ Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for L{CaselessKeyword}.) """ def __init__( self, matchString ): super(CaselessLiteral,self).__init__( matchString.upper() ) # Preserve the defining literal. self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name def parseImpl( self, instring, loc, doActions=True ): if instring[ loc:loc+self.matchLen ].upper() == self.match: return loc+self.matchLen, self.returnString raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): """ Caseless version of L{Keyword}. Example:: OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for L{CaselessLiteral}.) """ def __init__( self, matchString, identChars=None ): super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) def parseImpl( self, instring, loc, doActions=True ): if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) class CloseMatch(Token): """ A variation on L{Literal} which matches "close" matches, that is, strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - C{match_string} - string to be matched - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match The results from a successful parse will contain the matched text from the input string and the following named results: - C{mismatches} - a list of the positions within the match_string where mismatches were found - C{original} - the original match_string used to compare against the input string If C{mismatches} is an empty list, then the match was an exact match. Example:: patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) # exact match patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) # close match allowing up to 2 mismatches patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) """ def __init__(self, match_string, maxMismatches=1): super(CloseMatch,self).__init__() self.name = match_string self.match_string = match_string self.maxMismatches = maxMismatches self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) self.mayIndexError = False self.mayReturnEmpty = False def parseImpl( self, instring, loc, doActions=True ): start = loc instrlen = len(instring) maxloc = start + len(self.match_string) if maxloc <= instrlen: match_string = self.match_string match_stringloc = 0 mismatches = [] maxMismatches = self.maxMismatches for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): src,mat = s_m if src != mat: mismatches.append(match_stringloc) if len(mismatches) > maxMismatches: break else: loc = match_stringloc + 1 results = ParseResults([instring[start:loc]]) results['original'] = self.match_string results['mismatches'] = mismatches return loc, results raise ParseException(instring, loc, self.errmsg, self) class Word(Token): """ Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. An optional C{excludeChars} parameter can list characters that might be found in the input C{bodyChars} string; useful to define a word of all printables except for one or two characters, for instance. L{srange} is useful for defining custom character set strings for defining C{Word} expressions, using range notation from regular expression character sets. A common mistake is to use C{Word} to match a specific literal string, as in C{Word("Address")}. Remember that C{Word} uses the string argument to define I{sets} of matchable characters. This expression would match "Add", "AAA", "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an exact literal string, use L{Literal} or L{Keyword}. pyparsing includes helper strings for building Words: - L{alphas} - L{nums} - L{alphanums} - L{hexnums} - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - L{printables} (any non-whitespace character) Example:: # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") """ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): super(Word,self).__init__() if excludeChars: initChars = ''.join(c for c in initChars if c not in excludeChars) if bodyChars: bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) self.initCharsOrig = initChars self.initChars = set(initChars) if bodyChars : self.bodyCharsOrig = bodyChars self.bodyChars = set(bodyChars) else: self.bodyCharsOrig = initChars self.bodyChars = set(initChars) self.maxSpecified = max > 0 if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.asKeyword = asKeyword if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): if self.bodyCharsOrig == self.initCharsOrig: self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) elif len(self.initCharsOrig) == 1: self.reString = "%s[%s]*" % \ (re.escape(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig),) else: self.reString = "[%s][%s]*" % \ (_escapeRegexRangeChars(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig),) if self.asKeyword: self.reString = r"\b"+self.reString+r"\b" try: self.re = re.compile( self.reString ) except Exception: self.re = None def parseImpl( self, instring, loc, doActions=True ): if self.re: result = self.re.match(instring,loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() return loc, result.group() if not(instring[ loc ] in self.initChars): raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 instrlen = len(instring) bodychars = self.bodyChars maxloc = start + self.maxLen maxloc = min( maxloc, instrlen ) while loc < maxloc and instring[loc] in bodychars: loc += 1 throwException = False if loc - start < self.minLen: throwException = True if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True if self.asKeyword: if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): throwException = True if throwException: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] def __str__( self ): try: return super(Word,self).__str__() except Exception: pass if self.strRepr is None: def charsAsStr(s): if len(s)>4: return s[:4]+"..." else: return s if ( self.initCharsOrig != self.bodyCharsOrig ): self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) else: self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) return self.strRepr class Regex(Token): r""" Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as named parse results. Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) def __init__( self, pattern, flags=0): """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" super(Regex,self).__init__() if isinstance(pattern, basestring): if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % pattern, SyntaxWarning, stacklevel=2) raise elif isinstance(pattern, Regex.compiledREtype): self.re = pattern self.pattern = \ self.reString = str(pattern) self.flags = flags else: raise ValueError("Regex may only be constructed with a string or a compiled RE object") self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): result = self.re.match(instring,loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() d = result.groupdict() ret = ParseResults(result.group()) if d: for k in d: ret[k] = d[k] return loc,ret def __str__( self ): try: return super(Regex,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "Re:(%s)" % repr(self.pattern) return self.strRepr class QuotedString(Token): r""" Token for matching strings that are delimited by quoting characters. Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=C{None}) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) Example:: qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) prints:: [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] """ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): super(QuotedString,self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() if not quoteChar: warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() if endQuoteChar is None: endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if not endQuoteChar: warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults self.convertWhitespaceEscapes = convertWhitespaceEscapes if multiline: self.flags = re.MULTILINE | re.DOTALL self.pattern = r'%s(?:[^%s%s]' % \ ( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) else: self.flags = 0 self.pattern = r'%s(?:[^%s\n\r%s]' % \ ( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) if len(self.endQuoteChar) > 1: self.pattern += ( '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i])) for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' ) if escQuote: self.pattern += (r'|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += (r'|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = re.escape(self.escChar)+"(.)" self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() ret = result.group() if self.unquoteResults: # strip off quotes ret = ret[self.quoteCharLen:-self.endQuoteCharLen] if isinstance(ret,basestring): # replace escaped whitespace if '\\' in ret and self.convertWhitespaceEscapes: ws_map = { r'\t' : '\t', r'\n' : '\n', r'\f' : '\f', r'\r' : '\r', } for wslit,wschar in ws_map.items(): ret = ret.replace(wslit, wschar) # replace escaped characters if self.escChar: ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) # replace escaped quotes if self.escQuote: ret = ret.replace(self.escQuote, self.endQuoteChar) return loc, ret def __str__( self ): try: return super(QuotedString,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) return self.strRepr class CharsNotIn(Token): """ Token for matching words composed of characters I{not} in a given set (will include whitespace in matched characters if not listed in the provided exclusion set - see example). Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. Example:: # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) prints:: ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ def __init__( self, notChars, min=1, max=0, exact=0 ): super(CharsNotIn,self).__init__() self.skipWhitespace = False self.notChars = notChars if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayReturnEmpty = ( self.minLen == 0 ) self.mayIndexError = False def parseImpl( self, instring, loc, doActions=True ): if instring[loc] in self.notChars: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 notchars = self.notChars maxlen = min( start+self.maxLen, len(instring) ) while loc < maxlen and \ (instring[loc] not in notchars): loc += 1 if loc - start < self.minLen: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] def __str__( self ): try: return super(CharsNotIn, self).__str__() except Exception: pass if self.strRepr is None: if len(self.notChars) > 4: self.strRepr = "!W:(%s...)" % self.notChars[:4] else: self.strRepr = "!W:(%s)" % self.notChars return self.strRepr class White(Token): """ Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, as defined for the C{L{Word}} class. """ whiteStrs = { " " : "<SPC>", "\t": "<TAB>", "\n": "<LF>", "\r": "<CR>", "\f": "<FF>", } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): super(White,self).__init__() self.matchWhite = ws self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) #~ self.leaveWhitespace() self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact def parseImpl( self, instring, loc, doActions=True ): if not(instring[ loc ] in self.matchWhite): raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 maxloc = start + self.maxLen maxloc = min( maxloc, len(instring) ) while loc < maxloc and instring[loc] in self.matchWhite: loc += 1 if loc - start < self.minLen: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] class _PositionToken(Token): def __init__( self ): super(_PositionToken,self).__init__() self.name=self.__class__.__name__ self.mayReturnEmpty = True self.mayIndexError = False class GoToColumn(_PositionToken): """ Token to advance to a specific column of input text; useful for tabular report scraping. """ def __init__( self, colno ): super(GoToColumn,self).__init__() self.col = colno def preParse( self, instring, loc ): if col(loc,instring) != self.col: instrlen = len(instring) if self.ignoreExprs: loc = self._skipIgnorables( instring, loc ) while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : loc += 1 return loc def parseImpl( self, instring, loc, doActions=True ): thiscol = col( loc, instring ) if thiscol > self.col: raise ParseException( instring, loc, "Text not in expected column", self ) newloc = loc + self.col - thiscol ret = instring[ loc: newloc ] return newloc, ret class LineStart(_PositionToken): """ Matches if current position is at the beginning of a line within the parse string Example:: test = '''\ AAA this line AAA and this line AAA but not this one B AAA and definitely not this one ''' for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) Prints:: ['AAA', ' this line'] ['AAA', ' and this line'] """ def __init__( self ): super(LineStart,self).__init__() self.errmsg = "Expected start of line" def parseImpl( self, instring, loc, doActions=True ): if col(loc, instring) == 1: return loc, [] raise ParseException(instring, loc, self.errmsg, self) class LineEnd(_PositionToken): """ Matches if current position is at the end of a line within the parse string """ def __init__( self ): super(LineEnd,self).__init__() self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) self.errmsg = "Expected end of line" def parseImpl( self, instring, loc, doActions=True ): if loc<len(instring): if instring[loc] == "\n": return loc+1, "\n" else: raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): return loc+1, [] else: raise ParseException(instring, loc, self.errmsg, self) class StringStart(_PositionToken): """ Matches if current position is at the beginning of the parse string """ def __init__( self ): super(StringStart,self).__init__() self.errmsg = "Expected start of text" def parseImpl( self, instring, loc, doActions=True ): if loc != 0: # see if entire string up to here is just whitespace and ignoreables if loc != self.preParse( instring, 0 ): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class StringEnd(_PositionToken): """ Matches if current position is at the end of the parse string """ def __init__( self ): super(StringEnd,self).__init__() self.errmsg = "Expected end of text" def parseImpl( self, instring, loc, doActions=True ): if loc < len(instring): raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): return loc+1, [] elif loc > len(instring): return loc, [] else: raise ParseException(instring, loc, self.errmsg, self) class WordStart(_PositionToken): """ Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{\b} behavior of regular expressions, use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of the string being parsed, or at the beginning of a line. """ def __init__(self, wordChars = printables): super(WordStart,self).__init__() self.wordChars = set(wordChars) self.errmsg = "Not at the start of a word" def parseImpl(self, instring, loc, doActions=True ): if loc != 0: if (instring[loc-1] in self.wordChars or instring[loc] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class WordEnd(_PositionToken): """ Matches if the current position is at the end of a Word, and is not followed by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{\b} behavior of regular expressions, use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of the string being parsed, or at the end of a line. """ def __init__(self, wordChars = printables): super(WordEnd,self).__init__() self.wordChars = set(wordChars) self.skipWhitespace = False self.errmsg = "Not at the end of a word" def parseImpl(self, instring, loc, doActions=True ): instrlen = len(instring) if instrlen>0 and loc<instrlen: if (instring[loc] in self.wordChars or instring[loc-1] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class ParseExpression(ParserElement): """ Abstract subclass of ParserElement, for combining and post-processing parsed tokens. """ def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) if isinstance( exprs, _generatorType ): exprs = list(exprs) if isinstance( exprs, basestring ): self.exprs = [ ParserElement._literalStringClass( exprs ) ] elif isinstance( exprs, collections.Iterable ): exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): exprs = map(ParserElement._literalStringClass, exprs) self.exprs = list(exprs) else: try: self.exprs = list( exprs ) except TypeError: self.exprs = [ exprs ] self.callPreparse = False def __getitem__( self, i ): return self.exprs[i] def append( self, other ): self.exprs.append( other ) self.strRepr = None return self def leaveWhitespace( self ): """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on all contained expressions.""" self.skipWhitespace = False self.exprs = [ e.copy() for e in self.exprs ] for e in self.exprs: e.leaveWhitespace() return self def ignore( self, other ): if isinstance( other, Suppress ): if other not in self.ignoreExprs: super( ParseExpression, self).ignore( other ) for e in self.exprs: e.ignore( self.ignoreExprs[-1] ) else: super( ParseExpression, self).ignore( other ) for e in self.exprs: e.ignore( self.ignoreExprs[-1] ) return self def __str__( self ): try: return super(ParseExpression,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) return self.strRepr def streamline( self ): super(ParseExpression,self).streamline() for e in self.exprs: e.streamline() # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) # but only if there are no parse actions or resultsNames on the nested And's # (likewise for Or's and MatchFirst's) if ( len(self.exprs) == 2 ): other = self.exprs[0] if ( isinstance( other, self.__class__ ) and not(other.parseAction) and other.resultsName is None and not other.debug ): self.exprs = other.exprs[:] + [ self.exprs[1] ] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError other = self.exprs[-1] if ( isinstance( other, self.__class__ ) and not(other.parseAction) and other.resultsName is None and not other.debug ): self.exprs = self.exprs[:-1] + other.exprs[:] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError self.errmsg = "Expected " + _ustr(self) return self def setResultsName( self, name, listAllMatches=False ): ret = super(ParseExpression,self).setResultsName(name,listAllMatches) return ret def validate( self, validateTrace=[] ): tmp = validateTrace[:]+[self] for e in self.exprs: e.validate(tmp) self.checkRecursion( [] ) def copy(self): ret = super(ParseExpression,self).copy() ret.exprs = [e.copy() for e in self.exprs] return ret class And(ParseExpression): """ Requires all given C{ParseExpression}s to be found in the given order. Expressions may be separated by whitespace. May be constructed using the C{'+'} operator. May also be constructed using the C{'-'} operator, which will suppress backtracking. Example:: integer = Word(nums) name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"),name_expr("name"),integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): def __init__(self, *args, **kwargs): super(And._ErrorStop,self).__init__(*args, **kwargs) self.name = '-' self.leaveWhitespace() def __init__( self, exprs, savelist = True ): super(And,self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.setWhitespaceChars( self.exprs[0].whiteChars ) self.skipWhitespace = self.exprs[0].skipWhitespace self.callPreparse = True def parseImpl( self, instring, loc, doActions=True ): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) errorStop = False for e in self.exprs[1:]: if isinstance(e, And._ErrorStop): errorStop = True continue if errorStop: try: loc, exprtokens = e._parse( instring, loc, doActions ) except ParseSyntaxException: raise except ParseBaseException as pe: pe.__traceback__ = None raise ParseSyntaxException._from_exception(pe) except IndexError: raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: loc, exprtokens = e._parse( instring, loc, doActions ) if exprtokens or exprtokens.haskeys(): resultlist += exprtokens return loc, resultlist def __iadd__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #And( [ self, other ] ) def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) if not e.mayReturnEmpty: break def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr class Or(ParseExpression): """ Requires that at least one C{ParseExpression} is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the C{'^'} operator. Example:: # construct Or using '^' operator number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) prints:: [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(Or,self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None matches = [] for e in self.exprs: try: loc2 = e.tryParse( instring, loc ) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring,len(instring),e.errmsg,self) maxExcLoc = len(instring) else: # save match among all matches, to retry longest to shortest matches.append((loc2, e)) if matches: matches.sort(key=lambda x: -x[0]) for _,e in matches: try: return e._parse( instring, loc, doActions ) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc if maxException is not None: maxException.msg = self.errmsg raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) def __ixor__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #Or( [ self, other ] ) def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class MatchFirst(ParseExpression): """ Requires that at least one C{ParseExpression} is found. If two expressions match, the first one listed is the one that will match. May be constructed using the C{'|'} operator. Example:: # construct MatchFirst using '|' operator # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] # put more selective expression first number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(MatchFirst,self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None for e in self.exprs: try: ret = e._parse( instring, loc, doActions ) return ret except ParseException as err: if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring,len(instring),e.errmsg,self) maxExcLoc = len(instring) # only got here if no expression matched, raise exception for match that made it the furthest else: if maxException is not None: maxException.msg = self.errmsg raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) def __ior__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #MatchFirst( [ self, other ] ) def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class Each(ParseExpression): """ Requires all given C{ParseExpression}s to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the C{'&'} operator. Example:: color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) shape_attr = "shape:" + shape_type("shape") posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") color_attr = "color:" + color("color") size_attr = "size:" + integer("size") # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) shape_spec.runTests(''' shape: SQUARE color: BLACK posn: 100, 120 shape: CIRCLE size: 50 color: BLUE posn: 50,80 color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) prints:: shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK - posn: ['100', ',', '120'] - x: 100 - y: 120 - shape: SQUARE shape: CIRCLE size: 50 color: BLUE posn: 50,80 ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - color: BLUE - posn: ['50', ',', '80'] - x: 50 - y: 80 - shape: CIRCLE - size: 50 color: GREEN size: 20 shape: TRIANGLE posn: 20,40 ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - color: GREEN - posn: ['20', ',', '40'] - x: 20 - y: 40 - shape: TRIANGLE - size: 20 """ def __init__( self, exprs, savelist = True ): super(Each,self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.skipWhitespace = True self.initExprGroups = True def parseImpl( self, instring, loc, doActions=True ): if self.initExprGroups: self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] self.optionals = opt1 + opt2 self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] self.required += self.multirequired self.initExprGroups = False tmpLoc = loc tmpReqd = self.required[:] tmpOpt = self.optionals[:] matchOrder = [] keepMatching = True while keepMatching: tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired failed = [] for e in tmpExprs: try: tmpLoc = e.tryParse( instring, tmpLoc ) except ParseException: failed.append(e) else: matchOrder.append(self.opt1map.get(id(e),e)) if e in tmpReqd: tmpReqd.remove(e) elif e in tmpOpt: tmpOpt.remove(e) if len(failed) == len(tmpExprs): keepMatching = False if tmpReqd: missing = ", ".join(_ustr(e) for e in tmpReqd) raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) # add any unmatched Optionals, in case they have default values defined matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] resultlist = [] for e in matchOrder: loc,results = e._parse(instring,loc,doActions) resultlist.append(results) finalResults = sum(resultlist, ParseResults([])) return loc, finalResults def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class ParseElementEnhance(ParserElement): """ Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. """ def __init__( self, expr, savelist=False ): super(ParseElementEnhance,self).__init__(savelist) if isinstance( expr, basestring ): if issubclass(ParserElement._literalStringClass, Token): expr = ParserElement._literalStringClass(expr) else: expr = ParserElement._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: self.mayIndexError = expr.mayIndexError self.mayReturnEmpty = expr.mayReturnEmpty self.setWhitespaceChars( expr.whiteChars ) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList self.callPreparse = expr.callPreparse self.ignoreExprs.extend(expr.ignoreExprs) def parseImpl( self, instring, loc, doActions=True ): if self.expr is not None: return self.expr._parse( instring, loc, doActions, callPreParse=False ) else: raise ParseException("",loc,self.errmsg,self) def leaveWhitespace( self ): self.skipWhitespace = False self.expr = self.expr.copy() if self.expr is not None: self.expr.leaveWhitespace() return self def ignore( self, other ): if isinstance( other, Suppress ): if other not in self.ignoreExprs: super( ParseElementEnhance, self).ignore( other ) if self.expr is not None: self.expr.ignore( self.ignoreExprs[-1] ) else: super( ParseElementEnhance, self).ignore( other ) if self.expr is not None: self.expr.ignore( self.ignoreExprs[-1] ) return self def streamline( self ): super(ParseElementEnhance,self).streamline() if self.expr is not None: self.expr.streamline() return self def checkRecursion( self, parseElementList ): if self in parseElementList: raise RecursiveGrammarException( parseElementList+[self] ) subRecCheckList = parseElementList[:] + [ self ] if self.expr is not None: self.expr.checkRecursion( subRecCheckList ) def validate( self, validateTrace=[] ): tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion( [] ) def __str__( self ): try: return super(ParseElementEnhance,self).__str__() except Exception: pass if self.strRepr is None and self.expr is not None: self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) return self.strRepr class FollowedBy(ParseElementEnhance): """ Lookahead matching of the given parse expression. C{FollowedBy} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. C{FollowedBy} always returns a null token list. Example:: # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] """ def __init__( self, expr ): super(FollowedBy,self).__init__(expr) self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): self.expr.tryParse( instring, loc ) return loc, [] class NotAny(ParseElementEnhance): """ Lookahead to disallow matching with the given parse expression. C{NotAny} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression does I{not} match at the current position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} always returns a null token list. May be constructed using the '~' operator. Example:: """ def __init__( self, expr ): super(NotAny,self).__init__(expr) #~ self.leaveWhitespace() self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True self.errmsg = "Found unwanted token, "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "~{" + _ustr(self.expr) + "}" return self.strRepr class _MultipleMatch(ParseElementEnhance): def __init__( self, expr, stopOn=None): super(_MultipleMatch, self).__init__(expr) self.saveAsList = True ender = stopOn if isinstance(ender, basestring): ender = ParserElement._literalStringClass(ender) self.not_ender = ~ender if ender is not None else None def parseImpl( self, instring, loc, doActions=True ): self_expr_parse = self.expr._parse self_skip_ignorables = self._skipIgnorables check_ender = self.not_ender is not None if check_ender: try_not_ender = self.not_ender.tryParse # must be at least one (but first see if we are the stopOn sentinel; # if so, fail) if check_ender: try_not_ender(instring, loc) loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) try: hasIgnoreExprs = (not not self.ignoreExprs) while 1: if check_ender: try_not_ender(instring, loc) if hasIgnoreExprs: preloc = self_skip_ignorables( instring, loc ) else: preloc = loc loc, tmptokens = self_expr_parse( instring, preloc, doActions ) if tmptokens or tmptokens.haskeys(): tokens += tmptokens except (ParseException,IndexError): pass return loc, tokens class OneOrMore(_MultipleMatch): """ Repetition of one or more of the given expression. Parameters: - expr - expression that must match one or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] # could also be written as (attr_expr * (1,)).parseString(text).pprint() """ def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + _ustr(self.expr) + "}..." return self.strRepr class ZeroOrMore(_MultipleMatch): """ Optional repetition of zero or more of the given expression. Parameters: - expr - expression that must match zero or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example: similar to L{OneOrMore} """ def __init__( self, expr, stopOn=None): super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): try: return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) except (ParseException,IndexError): return loc, [] def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "[" + _ustr(self.expr) + "]..." return self.strRepr class _NullToken(object): def __bool__(self): return False __nonzero__ = __bool__ def __str__(self): return "" _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): """ Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 # ZIP+4 form 12101-0001 # invalid ZIP 98765- ''') prints:: # traditional ZIP code 12345 ['12345'] # ZIP+4 form 12101-0001 ['12101-0001'] # invalid ZIP 98765- ^ FAIL: Expected end of text (at char 5), (line:1, col:6) """ def __init__( self, expr, default=_optionalNotMatched ): super(Optional,self).__init__( expr, savelist=False ) self.saveAsList = self.expr.saveAsList self.defaultValue = default self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): try: loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) except (ParseException,IndexError): if self.defaultValue is not _optionalNotMatched: if self.expr.resultsName: tokens = ParseResults([ self.defaultValue ]) tokens[self.expr.resultsName] = self.defaultValue else: tokens = [ self.defaultValue ] else: tokens = [] return loc, tokens def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "[" + _ustr(self.expr) + "]" return self.strRepr class SkipTo(ParseElementEnhance): """ Token for skipping over all undefined text until the matched expression is found. Parameters: - expr - target expression marking the end of the data to be skipped - include - (default=C{False}) if True, the target expression is also parsed (the skipped text and target expression are returned as a 2-element list). - ignore - (default=C{None}) used to define grammars (typically quoted strings and comments) that might contain false matches to the target expression - failOn - (default=C{None}) define expressions that are not allowed to be included in the skipped test; if found before the target expression is found, the SkipTo is not a match Example:: report = ''' Outstanding Issues Report - 1 Jan 2000 # | Severity | Description | Days Open -----+----------+-------------------------------------------+----------- 101 | Critical | Intermittent system crash | 6 94 | Cosmetic | Spelling error on Login ('log|n') | 14 79 | Minor | System slow when running too many reports | 47 ''' integer = Word(nums) SEP = Suppress('|') # use SkipTo to simply match everything up until the next SEP # - ignore quoted strings, so that a '|' character inside a quoted string does not match # - parse action will call token.strip() for each matched token, i.e., the description body string_data = SkipTo(SEP, ignore=quotedString) string_data.setParseAction(tokenMap(str.strip)) ticket_expr = (integer("issue_num") + SEP + string_data("sev") + SEP + string_data("desc") + SEP + integer("days_open")) for tkt in ticket_expr.searchString(report): print tkt.dump() prints:: ['101', 'Critical', 'Intermittent system crash', '6'] - days_open: 6 - desc: Intermittent system crash - issue_num: 101 - sev: Critical ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - days_open: 14 - desc: Spelling error on Login ('log|n') - issue_num: 94 - sev: Cosmetic ['79', 'Minor', 'System slow when running too many reports', '47'] - days_open: 47 - desc: System slow when running too many reports - issue_num: 79 - sev: Minor """ def __init__( self, other, include=False, ignore=None, failOn=None ): super( SkipTo, self ).__init__( other ) self.ignoreExpr = ignore self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include self.asList = False if isinstance(failOn, basestring): self.failOn = ParserElement._literalStringClass(failOn) else: self.failOn = failOn self.errmsg = "No match found for "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): startloc = loc instrlen = len(instring) expr = self.expr expr_parse = self.expr._parse self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None tmploc = loc while tmploc <= instrlen: if self_failOn_canParseNext is not None: # break if failOn expression matches if self_failOn_canParseNext(instring, tmploc): break if self_ignoreExpr_tryParse is not None: # advance past ignore expressions while 1: try: tmploc = self_ignoreExpr_tryParse(instring, tmploc) except ParseBaseException: break try: expr_parse(instring, tmploc, doActions=False, callPreParse=False) except (ParseException, IndexError): # no match, advance loc in string tmploc += 1 else: # matched skipto expr, done break else: # ran off the end of the input string without matching skipto expr, fail raise ParseException(instring, loc, self.errmsg, self) # build up return values loc = tmploc skiptext = instring[startloc:loc] skipresult = ParseResults(skiptext) if self.includeMatch: loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) skipresult += mat return loc, skipresult class Forward(ParseElementEnhance): """ Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. Note: take care when assigning to C{Forward} not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: fwdExpr << a | b | c will actually be evaluated as:: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. It is recommended that you explicitly group the values inserted into the C{Forward}:: fwdExpr << (a | b | c) Converting to use the '<<=' operator instead will avoid this problem. See L{ParseResults.pprint} for an example of a recursive parser created using C{Forward}. """ def __init__( self, other=None ): super(Forward,self).__init__( other, savelist=False ) def __lshift__( self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass(other) self.expr = other self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty self.setWhitespaceChars( self.expr.whiteChars ) self.skipWhitespace = self.expr.skipWhitespace self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) return self def __ilshift__(self, other): return self << other def leaveWhitespace( self ): self.skipWhitespace = False return self def streamline( self ): if not self.streamlined: self.streamlined = True if self.expr is not None: self.expr.streamline() return self def validate( self, validateTrace=[] ): if self not in validateTrace: tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) def __str__( self ): if hasattr(self,"name"): return self.name return self.__class__.__name__ + ": ..." # stubbed out for now - creates awful memory and perf issues self._revertClass = self.__class__ self.__class__ = _ForwardNoRecurse try: if self.expr is not None: retString = _ustr(self.expr) else: retString = "None" finally: self.__class__ = self._revertClass return self.__class__.__name__ + ": " + retString def copy(self): if self.expr is not None: return super(Forward,self).copy() else: ret = Forward() ret <<= self return ret class _ForwardNoRecurse(Forward): def __str__( self ): return "..." class TokenConverter(ParseElementEnhance): """ Abstract subclass of C{ParseExpression}, for converting parsed results. """ def __init__( self, expr, savelist=False ): super(TokenConverter,self).__init__( expr )#, savelist ) self.saveAsList = False class Combine(TokenConverter): """ Converter to concatenate all matching tokens to a single string. By default, the matching patterns must also be contiguous in the input string; this can be disabled by specifying C{'adjacent=False'} in the constructor. Example:: real = Word(nums) + '.' + Word(nums) print(real.parseString('3.1416')) # -> ['3', '.', '1416'] # will also erroneously match the following print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] real = Combine(Word(nums) + '.' + Word(nums)) print(real.parseString('3.1416')) # -> ['3.1416'] # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ def __init__( self, expr, joinString="", adjacent=True ): super(Combine,self).__init__( expr ) # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself if adjacent: self.leaveWhitespace() self.adjacent = adjacent self.skipWhitespace = True self.joinString = joinString self.callPreparse = True def ignore( self, other ): if self.adjacent: ParserElement.ignore(self, other) else: super( Combine, self).ignore( other ) return self def postParse( self, instring, loc, tokenlist ): retToks = tokenlist.copy() del retToks[:] retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) if self.resultsName and retToks.haskeys(): return [ retToks ] else: return retToks class Group(TokenConverter): """ Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. Example:: ident = Word(alphas) num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] """ def __init__( self, expr ): super(Group,self).__init__( expr ) self.saveAsList = True def postParse( self, instring, loc, tokenlist ): return [ tokenlist ] class Dict(TokenConverter): """ Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) # access named fields as dict entries, or output as dict print(result['shape']) print(result.asDict()) prints:: ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} See more examples at L{ParseResults} of accessing fields by results name. """ def __init__( self, expr ): super(Dict,self).__init__( expr ) self.saveAsList = True def postParse( self, instring, loc, tokenlist ): for i,tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] if isinstance(ikey,int): ikey = _ustr(tok[0]).strip() if len(tok)==1: tokenlist[ikey] = _ParseResultsWithOffset("",i) elif len(tok)==2 and not isinstance(tok[1],ParseResults): tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) else: dictvalue = tok.copy() #ParseResults(i) del dictvalue[0] if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) else: tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) if self.resultsName: return [ tokenlist ] else: return tokenlist class Suppress(TokenConverter): """ Converter for ignoring the results of a parsed expression. Example:: source = "a, b, c,d" wd = Word(alphas) wd_list1 = wd + ZeroOrMore(',' + wd) print(wd_list1.parseString(source)) # often, delimiters that are useful during parsing are just in the # way afterward - use Suppress to keep them out of the parsed output wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parseString(source)) prints:: ['a', ',', 'b', ',', 'c', ',', 'd'] ['a', 'b', 'c', 'd'] (See also L{delimitedList}.) """ def postParse( self, instring, loc, tokenlist ): return [] def suppress( self ): return self class OnlyOnce(object): """ Wrapper for parse actions, to ensure they are only called once. """ def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False def __call__(self,s,l,t): if not self.called: results = self.callable(s,l,t) self.called = True return results raise ParseException(s,l,"") def reset(self): self.called = False def traceParseAction(f): """ Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls'] """ f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ s,l,t = paArgs[-3:] if len(paArgs)>3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) raise sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) return ret try: z.__name__ = f.__name__ except AttributeError: pass return z # # global helpers # def delimitedList( expr, delim=",", combine=False ): """ Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing C{combine=True} in the constructor. If C{combine} is set to C{True}, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. Example:: delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." if combine: return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) else: return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) def countedArray( expr, intExpr=None ): """ Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() def countFieldParseAction(s,l,t): n = t[0] arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) return [] if intExpr is None: intExpr = Word(nums).setParseAction(lambda t:int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] for i in L: if isinstance(i,list): ret.extend(_flatten(i)) else: ret.append(i) return ret def matchPreviousLiteral(expr): """ Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def matchPreviousExpr(expr): """ Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do I{not} use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s,l,t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s,l,t): theseTokens = _flatten(t.asList()) if theseTokens != matchTokens: raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): #~ escape these chars: ^-] for c in r"\^-]": s = s.replace(c,_bslash+c) s = s.replace("\n",r"\n") s = s.replace("\t",r"\t") return _ustr(s) def oneOf( strs, caseless=False, useRegex=True ): """ Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns a C{L{MatchFirst}} for best performance. Parameters: - strs - a string of space-delimited literals, or a collection of string literals - caseless - (default=C{False}) - treat all literals as caseless - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) Example:: comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) prints:: [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ if caseless: isequal = ( lambda a,b: a.upper() == b.upper() ) masks = ( lambda a,b: b.upper().startswith(a.upper()) ) parseElementClass = CaselessLiteral else: isequal = ( lambda a,b: a == b ) masks = ( lambda a,b: b.startswith(a) ) parseElementClass = Literal symbols = [] if isinstance(strs,basestring): symbols = strs.split() elif isinstance(strs, collections.Iterable): symbols = list(strs) else: warnings.warn("Invalid argument to oneOf, expected string or iterable", SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() i = 0 while i < len(symbols)-1: cur = symbols[i] for j,other in enumerate(symbols[i+1:]): if ( isequal(other, cur) ): del symbols[i+j+1] break elif ( masks(cur, other) ): del symbols[i+j+1] symbols.insert(i,other) cur = other break else: i += 1 if not caseless and useRegex: #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) try: if len(symbols)==len("".join(symbols)): return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) else: return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) except Exception: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) # last resort, just use MatchFirst return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) def dictOf( key, value ): """ Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the C{Dict} results can include named token fields. Example:: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) # similar to Dict, but simpler call format result = dictOf(attr_label, attr_value).parseString(text) print(result.dump()) print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} """ return Dict( ZeroOrMore( Group ( key + value ) ) ) def originalTextFor(expr, asString=True): """ Helper to return the original, untokenized text for a given expression. Useful to restore the parsed fields of an HTML start tag into the raw tag text itself, or to revert separate tokens with intervening whitespace back to the original matching input text. By default, returns astring containing the original parsed text. If the optional C{asString} argument is passed as C{False}, then the return value is a C{L{ParseResults}} containing any results names that were originally matched, and a single token containing the original matched text from the input string. So if the expression passed to C{L{originalTextFor}} contains expressions with defined results names, you must set C{asString} to C{False} if you want to preserve those results name values. Example:: src = "this is test <b> bold <i>text</i> </b> normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) prints:: ['<b> bold <i>text</i> </b>'] ['<i>text</i>'] """ locMarker = Empty().setParseAction(lambda s,loc,t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") if asString: extractText = lambda s,l,t: s[t._original_start:t._original_end] else: def extractText(s,l,t): t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) matchExpr.ignoreExprs = expr.ignoreExprs return matchExpr def ungroup(expr): """ Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. """ return TokenConverter(expr).setParseAction(lambda t:t[0]) def locatedExpr(expr): """ Helper to decorate a returned token with its starting and ending locations in the input string. This helper adds the following results names: - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains C{<TAB>} characters, you may want to call C{L{ParserElement.parseWithTabs}} Example:: wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) prints:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] """ locator = Empty().setParseAction(lambda s,l,t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) # convenience constants for positional expressions empty = Empty().setName("empty") lineStart = LineStart().setName("lineStart") lineEnd = LineEnd().setName("lineEnd") stringStart = StringStart().setName("stringStart") stringEnd = StringEnd().setName("stringEnd") _escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) _escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) _escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) _singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE) _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" def srange(s): r""" Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be: - a single character - an escaped character with a leading backslash (such as C{\-} or C{\]}) - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) (C{\0x##} is also supported for backwards compatibility) - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) """ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) try: return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) except Exception: return "" def matchOnlyAtCol(n): """ Helper method for defining parse actions that require matching at a specific column in the input text. """ def verifyCol(strg,locn,toks): if col(locn,strg) != n: raise ParseException(strg,locn,"matched token not at column %d" % n) return verifyCol def replaceWith(replStr): """ Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString<ParserElement.transformString>}()}. Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s,l,t: [replStr] def removeQuotes(s,l,t): """ Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1] def tokenMap(func, *args): """ Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional args are passed, they are forwarded to the given function as additional arguments after the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the parsed data to an integer using base 16. Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse ''') wd = Word(alphas).setParseAction(tokenMap(str.title)) OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') prints:: 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] my kingdom for a horse ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] """ def pa(s,l,t): return [func(tokn, *args) for tokn in t] try: func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) pa.__name__ = func_name return pa upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) """(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) """(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas,alphanums+"_-:") if (xml): tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") else: printablesLessRAbrack = "".join(c for c in printables if c not in ">") tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ Optional( Suppress("=") + tagAttrValue ) ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine(_L("</") + tagStr + ">") openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) openTag.tag = resname closeTag.tag = resname return openTag, closeTag def makeHTMLTags(tagStr): """ Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. Example:: text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): # attributes in the <A> tag (like "href" shown here) are also accessible as named results print(link.link_text, '->', link.href) prints:: pyparsing -> http://pyparsing.wikispaces.com """ return _makeTags( tagStr, False ) def makeXMLTags(tagStr): """ Helper to construct opening and closing tag expressions for XML, given a tag name. Matches tags only in the given upper/lower case. Example: similar to L{makeHTMLTags} """ return _makeTags( tagStr, True ) def withAttribute(*args,**attrDict): """ Helper to create a validating parse action to be used with start tags created with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as C{<TD>} or C{<DIV>}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = ''' <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> ''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k,v) for k,v in attrs] def pa(s,l,tokens): for attrName,attrValue in attrs: if attrName not in tokens: raise ParseException(s,l,"no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % (attrName, tokens[attrName], attrValue)) return pa withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): """ Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = ''' <div> Some text <div class="grid">1 4 0 1 0</div> <div class="graph">1,3 2,3 1,1</div> <div>this &lt;div&gt; has no class</div> </div> ''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" return withAttribute(**{classattr : classname}) opAssoc = _Constants() opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """ Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted); if the parse action is passed a tuple or list of functions, this is equivalent to calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: if isinstance(pa, (tuple, list)): matchExpr.setParseAction(*pa) else: matchExpr.setParseAction(pa) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret operatorPrecedence = infixNotation """(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): """ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener,basestring) and isinstance(closer,basestring): if len(opener) == 1 and len(closer)==1: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS ).setParseAction(lambda t:t[0].strip())) else: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + ~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) ret.setName('nested %s%s expression' % (opener,closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): """ Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return curCol = col(l,s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: raise ParseFatalException(s,l,"illegal nesting") raise ParseException(s,l,"not a peer entry") def checkSubIndent(s,l,t): curCol = col(l,s) if curCol > indentStack[-1]: indentStack.append( curCol ) else: raise ParseException(s,l,"not a subentry") def checkUnindent(s,l,t): if l >= len(s): return curCol = col(l,s) if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): raise ParseException(s,l,"not an unindent") indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) else: smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) _htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") def replaceHTMLEntity(t): """Helper parser action to replace common HTML entities with their special characters""" return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") "Comment of the form C{/* ... */}" htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") "Comment of the form C{<!-- ... -->}" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form C{// ... (to end of line)}" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") "Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" javaStyleComment = cppStyleComment "Same as C{L{cppStyleComment}}" pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form C{# ... (to end of line)}" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") """(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" # some other useful expressions - using lower-case class name since we are really using this as a namespace class pyparsing_common: """ Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) - common L{programming identifiers<identifier>} - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} - L{UUID<uuid>} - L{comma-separated list<comma_separated_list>} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] """ convertToInteger = tokenMap(int) """ Parse action for converting parsed integers to Python int """ convertToFloat = tokenMap(float) """ Parse action for converting parsed numbers to Python float """ integer = Word(nums).setName("integer").setParseAction(convertToInteger) """expression that parses an unsigned integer, returns an int""" hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) """expression that parses a hexadecimal integer, returns an int""" signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) """expression that parses an integer with optional leading sign, returns an int""" fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") """fractional expression of an integer divided by an integer, returns a float""" fraction.addParseAction(lambda t: t[0]/t[-1]) mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" mixed_integer.addParseAction(sum) real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) """expression that parses a floating point number and returns a float""" sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) """expression that parses a floating point number with optional scientific notation and returns a float""" # streamlining this expression makes the docs nicer-looking number = (sci_real | real | signed_integer).streamline() """any numeric expression, returns the corresponding Python type""" fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" identifier = Word(alphas+'_', alphanums+'_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") "IPv4 address (C{0.0.0.0 - 255.255.255.255})" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") "IPv6 address (long, short, or mixed form)" mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" @staticmethod def convertToDate(fmt="%Y-%m-%d"): """ Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn @staticmethod def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") "ISO8601 date (C{yyyy-mm-dd})" iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() @staticmethod def stripHTMLTags(s, l, tokens): """ Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' """ return pyparsing_common._html_stripper.transformString(tokens[0]) _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + Optional( White(" \t") ) ) ).streamline().setName("commaItem") comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) """Parse action to convert tokens to upper case.""" downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) """Parse action to convert tokens to lower case.""" if __name__ == "__main__": selectToken = CaselessLiteral("select") fromToken = CaselessLiteral("from") ident = Word(alphas, alphanums + "_$") columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) columnNameList = Group(delimitedList(columnName)).setName("columns") columnSpec = ('*' | columnNameList) tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) tableNameList = Group(delimitedList(tableName)).setName("tables") simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") # demo runTests method, including embedded comments in test string simpleSQL.runTests(""" # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual """) pyparsing_common.number.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) # any int or real number, returned as float pyparsing_common.fnumber.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) pyparsing_common.hex_integer.runTests(""" 100 FF """) import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(""" 12345678-1234-5678-1234-567812345678 """)
225,348
38.389792
195
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/ipaddress.py
# Copyright 2007 Google Inc. # Licensed to PSF under a Contributor Agreement. """A fast, lightweight IPv4/IPv6 manipulation library in Python. This library is used to create/poke/manipulate IPv4 and IPv6 addresses and networks. """ from __future__ import unicode_literals import itertools import struct __version__ = '1.0.19' # Compatibility functions _compat_int_types = (int,) try: _compat_int_types = (int, long) except NameError: pass try: _compat_str = unicode except NameError: _compat_str = str assert bytes != str if b'\0'[0] == 0: # Python 3 semantics def _compat_bytes_to_byte_vals(byt): return byt else: def _compat_bytes_to_byte_vals(byt): return [struct.unpack(b'!B', b)[0] for b in byt] try: _compat_int_from_byte_vals = int.from_bytes except AttributeError: def _compat_int_from_byte_vals(bytvals, endianess): assert endianess == 'big' res = 0 for bv in bytvals: assert isinstance(bv, _compat_int_types) res = (res << 8) + bv return res def _compat_to_bytes(intval, length, endianess): assert isinstance(intval, _compat_int_types) assert endianess == 'big' if length == 4: if intval < 0 or intval >= 2 ** 32: raise struct.error("integer out of range for 'I' format code") return struct.pack(b'!I', intval) elif length == 16: if intval < 0 or intval >= 2 ** 128: raise struct.error("integer out of range for 'QQ' format code") return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) else: raise NotImplementedError() if hasattr(int, 'bit_length'): # Not int.bit_length , since that won't work in 2.7 where long exists def _compat_bit_length(i): return i.bit_length() else: def _compat_bit_length(i): for res in itertools.count(): if i >> res == 0: return res def _compat_range(start, end, step=1): assert step > 0 i = start while i < end: yield i i += step class _TotalOrderingMixin(object): __slots__ = () # Helper that derives the other comparison operations from # __lt__ and __eq__ # We avoid functools.total_ordering because it doesn't handle # NotImplemented correctly yet (http://bugs.python.org/issue10042) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal def __lt__(self, other): raise NotImplementedError def __le__(self, other): less = self.__lt__(other) if less is NotImplemented or not less: return self.__eq__(other) return less def __gt__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not (less or equal) def __ge__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented return not less IPV4LENGTH = 32 IPV6LENGTH = 128 class AddressValueError(ValueError): """A Value Error related to the address.""" class NetmaskValueError(ValueError): """A Value Error related to the netmask.""" def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % address) def ip_network(address, strict=True): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP network. Either IPv4 or IPv6 networks may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Network or IPv6Network object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Or if the network has host bits set. """ try: return IPv4Network(address, strict) except (AddressValueError, NetmaskValueError): pass try: return IPv6Network(address, strict) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 network. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % address) def ip_interface(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Interface or IPv6Interface object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Notes: The IPv?Interface classes describe an Address on a particular Network, so they're basically a combination of both the Address and Network classes. """ try: return IPv4Interface(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Interface(address) except (AddressValueError, NetmaskValueError): pass raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % address) def v4_int_to_packed(address): """Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. """ try: return _compat_to_bytes(address, 4, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv4") def v6_int_to_packed(address): """Represent an address as 16 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv6 IP address. Returns: The integer address packed as 16 bytes in network (big-endian) order. """ try: return _compat_to_bytes(address, 16, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv6") def _split_optional_netmask(address): """Helper to split the netmask and raise AddressValueError if needed""" addr = _compat_str(address).split('/') if len(addr) > 2: raise AddressValueError("Only one '/' permitted in %r" % address) return addr def _find_address_range(addresses): """Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. """ it = iter(addresses) first = last = next(it) for ip in it: if ip._ip != last._ip + 1: yield first, last first = ip last = ip yield first, last def _count_righthand_zero_bits(number, bits): """Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number. """ if number == 0: return bits return min(bits, _compat_bit_length(~number & (number - 1))) def summarize_address_range(first, last): """Summarize a network range given the first and last IP addresses. Example: >>> list(summarize_address_range(IPv4Address('192.0.2.0'), ... IPv4Address('192.0.2.130'))) ... #doctest: +NORMALIZE_WHITESPACE [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), IPv4Network('192.0.2.130/32')] Args: first: the first IPv4Address or IPv6Address in the range. last: the last IPv4Address or IPv6Address in the range. Returns: An iterator of the summarized IPv(4|6) network objects. Raise: TypeError: If the first and last objects are not IP addresses. If the first and last objects are not the same version. ValueError: If the last object is not greater than the first. If the version of the first address is not 4 or 6. """ if (not (isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress))): raise TypeError('first and last must be IP addresses, not networks') if first.version != last.version: raise TypeError("%s and %s are not of the same version" % ( first, last)) if first > last: raise ValueError('last IP address must be greater than first') if first.version == 4: ip = IPv4Network elif first.version == 6: ip = IPv6Network else: raise ValueError('unknown IP version') ip_bits = first._max_prefixlen first_int = first._ip last_int = last._ip while first_int <= last_int: nbits = min(_count_righthand_zero_bits(first_int, ip_bits), _compat_bit_length(last_int - first_int + 1) - 1) net = ip((first_int, ip_bits - nbits)) yield net first_int += 1 << nbits if first_int - 1 == ip._ALL_ONES: break def _collapse_addresses_internal(addresses): """Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. """ # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, # last.network_address <= net.network_address is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net def collapse_addresses(addresses): """Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects. """ addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) # sort and dedup ips = sorted(set(ips)) # find consecutive address ranges in the sorted sequence and summarize them if ips: for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) return _collapse_addresses_internal(addrs + nets) def get_mixed_type_key(obj): """Return a key suitable for sorting between networks and addresses. Address and Network objects are not sortable by default; they're fundamentally different so the expression IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') doesn't make any sense. There are some times however, where you may wish to have ipaddress sort these for you anyway. If you need to do this, you can use this function as the key= argument to sorted(). Args: obj: either a Network or Address object. Returns: appropriate key. """ if isinstance(obj, _BaseNetwork): return obj._get_networks_key() elif isinstance(obj, _BaseAddress): return obj._get_address_key() return NotImplemented class _IPAddressBase(_TotalOrderingMixin): """The mother class.""" __slots__ = () @property def exploded(self): """Return the longhand version of the IP address as a string.""" return self._explode_shorthand_ip_string() @property def compressed(self): """Return the shorthand version of the IP address as a string.""" return _compat_str(self) @property def reverse_pointer(self): """The name of the reverse DNS pointer for the IP address, e.g.: >>> ipaddress.ip_address("127.0.0.1").reverse_pointer '1.0.0.127.in-addr.arpa' >>> ipaddress.ip_address("2001:db8::1").reverse_pointer '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' """ return self._reverse_pointer() @property def version(self): msg = '%200s has no version specified' % (type(self),) raise NotImplementedError(msg) def _check_int_address(self, address): if address < 0: msg = "%d (< 0) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._version)) if address > self._ALL_ONES: msg = "%d (>= 2**%d) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._max_prefixlen, self._version)) def _check_packed_address(self, address, expected_len): address_len = len(address) if address_len != expected_len: msg = ( '%r (len %d != %d) is not permitted as an IPv%d address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?') raise AddressValueError(msg % (address, address_len, expected_len, self._version)) @classmethod def _ip_int_from_prefix(cls, prefixlen): """Turn the prefix length into a bitwise netmask Args: prefixlen: An integer, the prefix length. Returns: An integer. """ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) @classmethod def _prefix_from_ip_int(cls, ip_int): """Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones """ trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = _compat_to_bytes(ip_int, byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen @classmethod def _report_invalid_netmask(cls, netmask_str): msg = '%r is not a valid netmask' % netmask_str raise NetmaskValueError(msg) @classmethod def _prefix_from_prefix_string(cls, prefixlen_str): """Return prefix length from a numeric string Args: prefixlen_str: The string to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask """ # int allows a leading +/- as well as surrounding whitespace, # so we ensure that isn't the case if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): cls._report_invalid_netmask(prefixlen_str) try: prefixlen = int(prefixlen_str) except ValueError: cls._report_invalid_netmask(prefixlen_str) if not (0 <= prefixlen <= cls._max_prefixlen): cls._report_invalid_netmask(prefixlen_str) return prefixlen @classmethod def _prefix_from_ip_string(cls, ip_str): """Turn a netmask/hostmask string into a prefix length Args: ip_str: The netmask/hostmask to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask/hostmask """ # Parse the netmask/hostmask like an IP address. try: ip_int = cls._ip_int_from_string(ip_str) except AddressValueError: cls._report_invalid_netmask(ip_str) # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). # Note that the two ambiguous cases (all-ones and all-zeroes) are # treated as netmasks. try: return cls._prefix_from_ip_int(ip_int) except ValueError: pass # Invert the bits, and try matching a /0+1+/ hostmask instead. ip_int ^= cls._ALL_ONES try: return cls._prefix_from_ip_int(ip_int) except ValueError: cls._report_invalid_netmask(ip_str) def __reduce__(self): return self.__class__, (_compat_str(self),) class _BaseAddress(_IPAddressBase): """A generic IP object. This IP class contains the version independent methods which are used by single IP addresses. """ __slots__ = () def __int__(self): return self._ip def __eq__(self, other): try: return (self._ip == other._ip and self._version == other._version) except AttributeError: return NotImplemented def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseAddress): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self._ip != other._ip: return self._ip < other._ip return False # Shorthand for Integer addition and subtraction. This is not # meant to ever support addition/subtraction of addresses. def __add__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) + other) def __sub__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) - other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return _compat_str(self._string_from_ip_int(self._ip)) def __hash__(self): return hash(hex(int(self._ip))) def _get_address_key(self): return (self._version, self) def __reduce__(self): return self.__class__, (self._ip,) class _BaseNetwork(_IPAddressBase): """A generic IP network object. This IP class contains the version independent methods which are used by networks. """ def __init__(self, address): self._cache = {} def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return '%s/%d' % (self.network_address, self.prefixlen) def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the network or broadcast addresses. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast): yield self._address_class(x) def __iter__(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network, broadcast + 1): yield self._address_class(x) def __getitem__(self, n): network = int(self.network_address) broadcast = int(self.broadcast_address) if n >= 0: if network + n > broadcast: raise IndexError('address out of range') return self._address_class(network + n) else: n += 1 if broadcast + n < network: raise IndexError('address out of range') return self._address_class(broadcast + n) def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseNetwork): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self.network_address != other.network_address: return self.network_address < other.network_address if self.netmask != other.netmask: return self.netmask < other.netmask return False def __eq__(self, other): try: return (self._version == other._version and self.network_address == other.network_address and int(self.netmask) == int(other.netmask)) except AttributeError: return NotImplemented def __hash__(self): return hash(int(self.network_address) ^ int(self.netmask)) def __contains__(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if isinstance(other, _BaseNetwork): return False # dealing with another address else: # address return (int(self.network_address) <= int(other._ip) <= int(self.broadcast_address)) def overlaps(self, other): """Tell if self is partly contained in other.""" return self.network_address in other or ( self.broadcast_address in other or ( other.network_address in self or ( other.broadcast_address in self))) @property def broadcast_address(self): x = self._cache.get('broadcast_address') if x is None: x = self._address_class(int(self.network_address) | int(self.hostmask)) self._cache['broadcast_address'] = x return x @property def hostmask(self): x = self._cache.get('hostmask') if x is None: x = self._address_class(int(self.netmask) ^ self._ALL_ONES) self._cache['hostmask'] = x return x @property def with_prefixlen(self): return '%s/%d' % (self.network_address, self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self.network_address, self.netmask) @property def with_hostmask(self): return '%s/%s' % (self.network_address, self.hostmask) @property def num_addresses(self): """Number of hosts in the current subnet.""" return int(self.broadcast_address) - int(self.network_address) + 1 @property def _address_class(self): # Returning bare address objects (rather than interfaces) allows for # more consistent behaviour across the network address, broadcast # address and individual host addresses. msg = '%200s has no associated address class' % (type(self),) raise NotImplementedError(msg) @property def prefixlen(self): return self._prefixlen def address_exclude(self, other): """Remove an address from a larger block. For example: addr1 = ip_network('192.0.2.0/28') addr2 = ip_network('192.0.2.1/32') list(addr1.address_exclude(addr2)) = [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] or IPv6: addr1 = ip_network('2001:db8::1/32') addr2 = ip_network('2001:db8::1/128') list(addr1.address_exclude(addr2)) = [ip_network('2001:db8::1/128'), ip_network('2001:db8::2/127'), ip_network('2001:db8::4/126'), ip_network('2001:db8::8/125'), ... ip_network('2001:db8:8000::/33')] Args: other: An IPv4Network or IPv6Network object of the same type. Returns: An iterator of the IPv(4|6)Network objects which is self minus other. Raises: TypeError: If self and other are of differing address versions, or if other is not a network object. ValueError: If other is not completely contained by self. """ if not self._version == other._version: raise TypeError("%s and %s are not of the same version" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError("%s is not a network object" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) def compare_networks(self, other): """Compare two IP objects. This is only concerned about the comparison of the integer representation of the network addresses. This means that the host bits aren't considered at all in this method. If you want to compare host bits, you can easily enough do a 'HostA._ip < HostB._ip' Args: other: An IP object. Returns: If the IP versions of self and other are the same, returns: -1 if self < other: eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') IPv6Network('2001:db8::1000/124') < IPv6Network('2001:db8::2000/124') 0 if self == other eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') IPv6Network('2001:db8::1000/124') == IPv6Network('2001:db8::1000/124') 1 if self > other eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') IPv6Network('2001:db8::2000/124') > IPv6Network('2001:db8::1000/124') Raises: TypeError if the IP versions are different. """ # does this need to raise a ValueError? if self._version != other._version: raise TypeError('%s and %s are not of the same type' % ( self, other)) # self._version == other._version below here: if self.network_address < other.network_address: return -1 if self.network_address > other.network_address: return 1 # self.network_address == other.network_address below here: if self.netmask < other.netmask: return -1 if self.netmask > other.netmask: return 1 return 0 def _get_networks_key(self): """Network-only key function. Returns an object that identifies this address' network and netmask. This function is a suitable "key" argument for sorted() and list.sort(). """ return (self._version, self.network_address, self.netmask) def subnets(self, prefixlen_diff=1, new_prefix=None): """The subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network) """ if self._prefixlen == self._max_prefixlen: yield self return if new_prefix is not None: if new_prefix < self._prefixlen: raise ValueError('new prefix must be longer') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = new_prefix - self._prefixlen if prefixlen_diff < 0: raise ValueError('prefix length diff must be > 0') new_prefixlen = self._prefixlen + prefixlen_diff if new_prefixlen > self._max_prefixlen: raise ValueError( 'prefix length diff %d is invalid for netblock %s' % ( new_prefixlen, self)) start = int(self.network_address) end = int(self.broadcast_address) + 1 step = (int(self.hostmask) + 1) >> prefixlen_diff for new_addr in _compat_range(start, end, step): current = self.__class__((new_addr, new_prefixlen)) yield current def supernet(self, prefixlen_diff=1, new_prefix=None): """The supernet containing the current network. Args: prefixlen_diff: An integer, the amount the prefix length of the network should be decreased by. For example, given a /24 network and a prefixlen_diff of 3, a supernet with a /21 netmask is returned. Returns: An IPv4 network object. Raises: ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a negative prefix length. OR If prefixlen_diff and new_prefix are both set or new_prefix is a larger number than the current prefix (larger number means a smaller network) """ if self._prefixlen == 0: return self if new_prefix is not None: if new_prefix > self._prefixlen: raise ValueError('new prefix must be shorter') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = self._prefixlen - new_prefix new_prefixlen = self.prefixlen - prefixlen_diff if new_prefixlen < 0: raise ValueError( 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (self.prefixlen, prefixlen_diff)) return self.__class__(( int(self.network_address) & (int(self.netmask) << prefixlen_diff), new_prefixlen)) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return (self.network_address.is_multicast and self.broadcast_address.is_multicast) @staticmethod def _is_subnet_of(a, b): try: # Always false if one is v4 and the other is v6. if a._version != b._version: raise TypeError("%s and %s are not of the same version" (a, b)) return (b.network_address <= a.network_address and b.broadcast_address >= a.broadcast_address) except AttributeError: raise TypeError("Unable to test subnet containment " "between %s and %s" % (a, b)) def subnet_of(self, other): """Return True if this network is a subnet of other.""" return self._is_subnet_of(self, other) def supernet_of(self, other): """Return True if this network is a supernet of other.""" return self._is_subnet_of(other, self) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return (self.network_address.is_reserved and self.broadcast_address.is_reserved) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return (self.network_address.is_link_local and self.broadcast_address.is_link_local) @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return (self.network_address.is_private and self.broadcast_address.is_private) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return (self.network_address.is_unspecified and self.broadcast_address.is_unspecified) @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return (self.network_address.is_loopback and self.broadcast_address.is_loopback) class _BaseV4(object): """Base IPv4 object. The following methods are used by IPv4 objects in both single IP addresses and networks. """ __slots__ = () _version = 4 # Equivalent to 255.255.255.255 or 32 bits of 1's. _ALL_ONES = (2 ** IPV4LENGTH) - 1 _DECIMAL_DIGITS = frozenset('0123456789') # the valid octets for host and netmasks. only useful for IPv4. _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) _max_prefixlen = IPV4LENGTH # There are only a handful of valid v4 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} def _explode_shorthand_ip_string(self): return _compat_str(self) @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: try: # Check for a netmask in prefix length form prefixlen = cls._prefix_from_prefix_string(arg) except NetmaskValueError: # Check for a netmask or hostmask in dotted-quad form. # This may raise NetmaskValueError. prefixlen = cls._prefix_from_ip_string(arg) netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: return _compat_int_from_byte_vals( map(cls._parse_octet, octets), 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_octet(cls, octet_str): """Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255]. """ if not octet_str: raise ValueError("Empty octet not permitted") # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Convert to integer (we know digits are legal) octet_int = int(octet_str, 10) # Any octets that look like they *might* be written in octal, # and which don't look exactly the same in both octal and # decimal are rejected as ambiguous if octet_int > 7 and octet_str[0] == '0': msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int @classmethod def _string_from_ip_int(cls, ip_int): """Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation. """ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] if isinstance(b, bytes) else b) for b in _compat_to_bytes(ip_int, 4, 'big')) def _is_hostmask(self, ip_str): """Test if the IP string is a hostmask (rather than a netmask). Args: ip_str: A string, the potential hostmask. Returns: A boolean, True if the IP string is a hostmask. """ bits = ip_str.split('.') try: parts = [x for x in map(int, bits) if x in self._valid_mask_octets] except ValueError: return False if len(parts) != len(bits): return False if parts[0] < parts[-1]: return True return False def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv4 address. This implements the method described in RFC1035 3.5. """ reverse_octets = _compat_str(self).split('.')[::-1] return '.'.join(reverse_octets) + '.in-addr.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv4Address(_BaseV4, _BaseAddress): """Represent and manipulate single IPv4 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """ Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv4Address('192.0.2.1') == IPv4Address(3221225985). or, more generally IPv4Address(int(IPv4Address('192.0.2.1'))) == IPv4Address('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 4) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v4_int_to_packed(self._ip) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within the reserved IPv4 Network range. """ return self in self._constants._reserved_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): return ( self not in self._constants._public_network and not self.is_private) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is multicast. See RFC 3171 for details. """ return self in self._constants._multicast_network @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3. """ return self == self._constants._unspecified_address @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback per RFC 3330. """ return self in self._constants._loopback_network @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is link-local per RFC 3927. """ return self in self._constants._linklocal_network class IPv4Interface(IPv4Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv4Address.__init__(self, address) self.network = IPv4Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv4Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv4Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv4Address.__init__(self, addr[0]) self.network = IPv4Network(address, strict=False) self._prefixlen = self.network._prefixlen self.netmask = self.network.netmask self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv4Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv4Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return (self.network < other.network or self.network == other.network and address_less) except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv4Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) class IPv4Network(_BaseV4, _BaseNetwork): """This class represents and manipulates 32-bit IPv4 network + addresses.. Attributes: [examples for IPv4Network('192.0.2.0/27')] .network_address: IPv4Address('192.0.2.0') .hostmask: IPv4Address('0.0.0.31') .broadcast_address: IPv4Address('192.0.2.32') .netmask: IPv4Address('255.255.255.224') .prefixlen: 27 """ # Class to use when creating address objects _address_class = IPv4Address def __init__(self, address, strict=True): """Instantiate a new IPv4 network object. Args: address: A string or integer representing the IP [& network]. '192.0.2.0/24' '192.0.2.0/255.255.255.0' '192.0.0.2/0.0.0.255' are all functionally the same in IPv4. Similarly, '192.0.2.1' '192.0.2.1/255.255.255.255' '192.0.2.1/32' are also functionally equivalent. That is to say, failing to provide a subnetmask will create an object with a mask of /32. If the mask (portion after the / in the argument) is given in dotted quad form, it is treated as a netmask if it starts with a non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it starts with a zero field (e.g. 0.255.255.255 == /8), with the single exception of an all-zero mask which is treated as a netmask == /0. If no mask is given, a default of /32 is used. Additionally, an integer can be passed, so IPv4Network('192.0.2.1') == IPv4Network(3221225985) or, more generally IPv4Interface(int(IPv4Interface('192.0.2.1'))) == IPv4Interface('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. NetmaskValueError: If the netmask isn't valid for an IPv4 address. ValueError: If strict is True and a network address is not supplied. """ _BaseNetwork.__init__(self, address) # Constructing from a packed address or integer if isinstance(address, (_compat_int_types, bytes)): self.network_address = IPv4Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) # fixme: address/network test here. return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: # We weren't given an address[1] arg = self._max_prefixlen self.network_address = IPv4Address(address[0]) self.netmask, self._prefixlen = self._make_netmask(arg) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv4Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv4Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv4Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry. """ return (not (self.network_address in IPv4Network('100.64.0.0/10') and self.broadcast_address in IPv4Network('100.64.0.0/10')) and not self.is_private) class _IPv4Constants(object): _linklocal_network = IPv4Network('169.254.0.0/16') _loopback_network = IPv4Network('127.0.0.0/8') _multicast_network = IPv4Network('224.0.0.0/4') _public_network = IPv4Network('100.64.0.0/10') _private_networks = [ IPv4Network('0.0.0.0/8'), IPv4Network('10.0.0.0/8'), IPv4Network('127.0.0.0/8'), IPv4Network('169.254.0.0/16'), IPv4Network('172.16.0.0/12'), IPv4Network('192.0.0.0/29'), IPv4Network('192.0.0.170/31'), IPv4Network('192.0.2.0/24'), IPv4Network('192.168.0.0/16'), IPv4Network('198.18.0.0/15'), IPv4Network('198.51.100.0/24'), IPv4Network('203.0.113.0/24'), IPv4Network('240.0.0.0/4'), IPv4Network('255.255.255.255/32'), ] _reserved_network = IPv4Network('240.0.0.0/4') _unspecified_address = IPv4Address('0.0.0.0') IPv4Address._constants = _IPv4Constants class _BaseV6(object): """Base IPv6 object. The following methods are used by IPv6 objects in both single IP addresses and networks. """ __slots__ = () _version = 6 _ALL_ONES = (2 ** IPV6LENGTH) - 1 _HEXTET_COUNT = 8 _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') _max_prefixlen = IPV6LENGTH # There are only a bunch of valid v6 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: prefixlen = cls._prefix_from_prefix_string(arg) netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). _min_parts = 3 if len(parts) < _min_parts: msg = "At least %d parts expected in %r" % (_min_parts, ip_str) raise AddressValueError(msg) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: try: ipv4_int = IPv4Address(parts.pop())._ip except AddressValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). # The extra colon comes from using the "::" notation for a single # leading or trailing zero part. _max_parts = cls._HEXTET_COUNT + 1 if len(parts) > _max_parts: msg = "At most %d colons permitted in %r" % ( _max_parts - 1, ip_str) raise AddressValueError(msg) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. skip_index = None for i in _compat_range(1, len(parts) - 1): if not parts[i]: if skip_index is not None: # Can't have more than one '::' msg = "At most one '::' permitted in %r" % ip_str raise AddressValueError(msg) skip_index = i # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: msg = "Expected at most %d other parts with '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) else: # Otherwise, allocate the entire address to parts_hi. The # endpoints could still be empty, but _parse_hextet() will check # for that. if len(parts) != cls._HEXTET_COUNT: msg = "Exactly %d parts expected without '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) if not parts[0]: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0 for i in range(parts_hi): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in range(-parts_lo, 0): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) return ip_int except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_hextet(cls, hextet_str): """Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF]. """ # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._HEX_DIGITS.issuperset(hextet_str): raise ValueError("Only hex digits permitted in %r" % hextet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(hextet_str) > 4: msg = "At most 4 characters permitted in %r" raise ValueError(msg % hextet_str) # Length check means we can skip checking the integer value return int(hextet_str, 16) @classmethod def _compress_hextets(cls, hextets): """Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings. """ best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index, hextet in enumerate(hextets): if hextet == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets return hextets @classmethod def _string_from_ip_int(cls, ip_int=None): """Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones. """ if ip_int is None: ip_int = int(cls._ip) if ip_int > cls._ALL_ONES: raise ValueError('IPv6 address is too large') hex_str = '%032x' % ip_int hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] hextets = cls._compress_hextets(hextets) return ':'.join(hextets) def _explode_shorthand_ip_string(self): """Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if isinstance(self, IPv6Network): ip_str = _compat_str(self.network_address) elif isinstance(self, IPv6Interface): ip_str = _compat_str(self.ip) else: ip_str = _compat_str(self) ip_int = self._ip_int_from_string(ip_str) hex_str = '%032x' % ip_int parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] if isinstance(self, (_BaseNetwork, IPv6Interface)): return '%s/%d' % (':'.join(parts), self._prefixlen) return ':'.join(parts) def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv6 address. This implements the method described in RFC3596 2.5. """ reverse_chars = self.exploded[::-1].replace(':', '') return '.'.join(reverse_chars) + '.ip6.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv6Address(_BaseV6, _BaseAddress): """Represent and manipulate single IPv6 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 16) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v6_int_to_packed(self._ip) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return self in self._constants._multicast_network @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return any(self in x for x in self._constants._reserved_networks) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return self in self._constants._linklocal_network @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return self in self._constants._sitelocal_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return self._ip == 0 @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return self._ip == 1 @property def ipv4_mapped(self): """Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. """ if (self._ip >> 32) != 0xFFFF: return None return IPv4Address(self._ip & 0xFFFFFFFF) @property def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF)) @property def sixtofour(self): """Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. """ if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) class IPv6Interface(IPv6Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv6Address.__init__(self, address) self.network = IPv6Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv6Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv6Address.__init__(self, addr[0]) self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self._prefixlen = self.network._prefixlen self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv6Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv6Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return (self.network < other.network or self.network == other.network and address_less) except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv6Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) @property def is_unspecified(self): return self._ip == 0 and self.network.is_unspecified @property def is_loopback(self): return self._ip == 1 and self.network.is_loopback class IPv6Network(_BaseV6, _BaseNetwork): """This class represents and manipulates 128-bit IPv6 networks. Attributes: [examples for IPv6('2001:db8::1000/124')] .network_address: IPv6Address('2001:db8::1000') .hostmask: IPv6Address('::f') .broadcast_address: IPv6Address('2001:db8::100f') .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') .prefixlen: 124 """ # Class to use when creating address objects _address_class = IPv6Address def __init__(self, address, strict=True): """Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. """ _BaseNetwork.__init__(self, address) # Efficient constructor from integer or packed address if isinstance(address, (bytes, _compat_int_types)): self.network_address = IPv6Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) self.network_address = IPv6Address(address[0]) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv6Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv6Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv6Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the Subnet-Router anycast address. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast + 1): yield self._address_class(x) @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return (self.network_address.is_site_local and self.broadcast_address.is_site_local) class _IPv6Constants(object): _linklocal_network = IPv6Network('fe80::/10') _multicast_network = IPv6Network('ff00::/8') _private_networks = [ IPv6Network('::1/128'), IPv6Network('::/128'), IPv6Network('::ffff:0:0/96'), IPv6Network('100::/64'), IPv6Network('2001::/23'), IPv6Network('2001:2::/48'), IPv6Network('2001:db8::/32'), IPv6Network('2001:10::/28'), IPv6Network('fc00::/7'), IPv6Network('fe80::/10'), ] _reserved_networks = [ IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9'), ] _sitelocal_network = IPv6Network('fec0::/10') IPv6Address._constants = _IPv6Constants
79,852
31.997107
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/distro.py
# Copyright 2015,2016 Nir Cohen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The ``distro`` package (``distro`` stands for Linux Distribution) provides information about the Linux distribution it runs on, such as a reliable machine-readable distro ID, or version information. It is a renewed alternative implementation for Python's original :py:func:`platform.linux_distribution` function, but it provides much more functionality. An alternative implementation became necessary because Python 3.5 deprecated this function, and Python 3.7 is expected to remove it altogether. Its predecessor function :py:func:`platform.dist` was already deprecated since Python 2.6 and is also expected to be removed in Python 3.7. Still, there are many cases in which access to Linux distribution information is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for more information. """ import os import re import sys import json import shlex import logging import argparse import subprocess _UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') _OS_RELEASE_BASENAME = 'os-release' #: Translation table for normalizing the "ID" attribute defined in os-release #: files, for use by the :func:`distro.id` method. #: #: * Key: Value as defined in the os-release file, translated to lower case, #: with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_OS_ID = {} #: Translation table for normalizing the "Distributor ID" attribute returned by #: the lsb_release command, for use by the :func:`distro.id` method. #: #: * Key: Value as returned by the lsb_release command, translated to lower #: case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_LSB_ID = { 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server } #: Translation table for normalizing the distro ID derived from the file name #: of distro release files, for use by the :func:`distro.id` method. #: #: * Key: Value as derived from the file name of a distro release file, #: translated to lower case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_DISTRO_ID = { 'redhat': 'rhel', # RHEL 6.x, 7.x } # Pattern for content of distro release file (reversed) _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') # Pattern for base file name of distro release file _DISTRO_RELEASE_BASENAME_PATTERN = re.compile( r'(\w+)[-_](release|version)$') # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( 'debian_version', 'lsb-release', 'oem-release', _OS_RELEASE_BASENAME, 'system-release' ) def linux_distribution(full_distribution_name=True): """ Return information about the current Linux distribution as a tuple ``(id_name, version, codename)`` with items as follows: * ``id_name``: If *full_distribution_name* is false, the result of :func:`distro.id`. Otherwise, the result of :func:`distro.name`. * ``version``: The result of :func:`distro.version`. * ``codename``: The result of :func:`distro.codename`. The interface of this function is compatible with the original :py:func:`platform.linux_distribution` function, supporting a subset of its parameters. The data it returns may not exactly be the same, because it uses more data sources than the original function, and that may lead to different data if the Linux distribution is not consistent across multiple data sources it provides (there are indeed such distributions ...). Another reason for differences is the fact that the :func:`distro.id` method normalizes the distro ID string to a reliable machine-readable value for a number of popular Linux distributions. """ return _distro.linux_distribution(full_distribution_name) def id(): """ Return the distro ID of the current Linux distribution, as a machine-readable string. For a number of Linux distributions, the returned distro ID value is *reliable*, in the sense that it is documented and that it does not change across releases of the distribution. This package maintains the following reliable distro ID values: ============== ========================================= Distro ID Distribution ============== ========================================= "ubuntu" Ubuntu "debian" Debian "rhel" RedHat Enterprise Linux "centos" CentOS "fedora" Fedora "sles" SUSE Linux Enterprise Server "opensuse" openSUSE "amazon" Amazon Linux "arch" Arch Linux "cloudlinux" CloudLinux OS "exherbo" Exherbo Linux "gentoo" GenToo Linux "ibm_powerkvm" IBM PowerKVM "kvmibm" KVM for IBM z Systems "linuxmint" Linux Mint "mageia" Mageia "mandriva" Mandriva Linux "parallels" Parallels "pidora" Pidora "raspbian" Raspbian "oracle" Oracle Linux (and Oracle Enterprise Linux) "scientific" Scientific Linux "slackware" Slackware "xenserver" XenServer ============== ========================================= If you have a need to get distros for reliable IDs added into this set, or if you find that the :func:`distro.id` function returns a different distro ID for one of the listed distros, please create an issue in the `distro issue tracker`_. **Lookup hierarchy and transformations:** First, the ID is obtained from the following sources, in the specified order. The first available and non-empty value is used: * the value of the "ID" attribute of the os-release file, * the value of the "Distributor ID" attribute returned by the lsb_release command, * the first part of the file name of the distro release file, The so determined ID value then passes the following transformations, before it is returned by this method: * it is translated to lower case, * blanks (which should not be there anyway) are translated to underscores, * a normalization of the ID is performed, based upon `normalization tables`_. The purpose of this normalization is to ensure that the ID is as reliable as possible, even across incompatible changes in the Linux distributions. A common reason for an incompatible change is the addition of an os-release file, or the addition of the lsb_release command, with ID values that differ from what was previously determined from the distro release file name. """ return _distro.id() def name(pretty=False): """ Return the name of the current Linux distribution, as a human-readable string. If *pretty* is false, the name is returned without version or codename. (e.g. "CentOS Linux") If *pretty* is true, the version and codename are appended. (e.g. "CentOS Linux 7.1.1503 (Core)") **Lookup hierarchy:** The name is obtained from the following sources, in the specified order. The first available and non-empty value is used: * If *pretty* is false: - the value of the "NAME" attribute of the os-release file, - the value of the "Distributor ID" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file. * If *pretty* is true: - the value of the "PRETTY_NAME" attribute of the os-release file, - the value of the "Description" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file, appended with the value of the pretty version ("<version_id>" and "<codename>" fields) of the distro release file, if available. """ return _distro.name(pretty) def version(pretty=False, best=False): """ Return the version of the current Linux distribution, as a human-readable string. If *pretty* is false, the version is returned without codename (e.g. "7.0"). If *pretty* is true, the codename in parenthesis is appended, if the codename is non-empty (e.g. "7.0 (Maipo)"). Some distributions provide version numbers with different precisions in the different sources of distribution information. Examining the different sources in a fixed priority order does not always yield the most precise version (e.g. for Debian 8.2, or CentOS 7.1). The *best* parameter can be used to control the approach for the returned version: If *best* is false, the first non-empty version number in priority order of the examined sources is returned. If *best* is true, the most precise version number out of all examined sources is returned. **Lookup hierarchy:** In all cases, the version number is obtained from the following sources. If *best* is false, this order represents the priority order: * the value of the "VERSION_ID" attribute of the os-release file, * the value of the "Release" attribute returned by the lsb_release command, * the version number parsed from the "<version_id>" field of the first line of the distro release file, * the version number parsed from the "PRETTY_NAME" attribute of the os-release file, if it follows the format of the distro release files. * the version number parsed from the "Description" attribute returned by the lsb_release command, if it follows the format of the distro release files. """ return _distro.version(pretty, best) def version_parts(best=False): """ Return the version of the current Linux distribution as a tuple ``(major, minor, build_number)`` with items as follows: * ``major``: The result of :func:`distro.major_version`. * ``minor``: The result of :func:`distro.minor_version`. * ``build_number``: The result of :func:`distro.build_number`. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.version_parts(best) def major_version(best=False): """ Return the major version of the current Linux distribution, as a string, if provided. Otherwise, the empty string is returned. The major version is the first part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.major_version(best) def minor_version(best=False): """ Return the minor version of the current Linux distribution, as a string, if provided. Otherwise, the empty string is returned. The minor version is the second part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.minor_version(best) def build_number(best=False): """ Return the build number of the current Linux distribution, as a string, if provided. Otherwise, the empty string is returned. The build number is the third part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.build_number(best) def like(): """ Return a space-separated list of distro IDs of distributions that are closely related to the current Linux distribution in regards to packaging and programming interfaces, for example distributions the current distribution is a derivative from. **Lookup hierarchy:** This information item is only provided by the os-release file. For details, see the description of the "ID_LIKE" attribute in the `os-release man page <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. """ return _distro.like() def codename(): """ Return the codename for the release of the current Linux distribution, as a string. If the distribution does not have a codename, an empty string is returned. Note that the returned codename is not always really a codename. For example, openSUSE returns "x86_64". This function does not handle such cases in any special way and just returns the string it finds, if any. **Lookup hierarchy:** * the codename within the "VERSION" attribute of the os-release file, if provided, * the value of the "Codename" attribute returned by the lsb_release command, * the value of the "<codename>" field of the distro release file. """ return _distro.codename() def info(pretty=False, best=False): """ Return certain machine-readable information items about the current Linux distribution in a dictionary, as shown in the following example: .. sourcecode:: python { 'id': 'rhel', 'version': '7.0', 'version_parts': { 'major': '7', 'minor': '0', 'build_number': '' }, 'like': 'fedora', 'codename': 'Maipo' } The dictionary structure and keys are always the same, regardless of which information items are available in the underlying data sources. The values for the various keys are as follows: * ``id``: The result of :func:`distro.id`. * ``version``: The result of :func:`distro.version`. * ``version_parts -> major``: The result of :func:`distro.major_version`. * ``version_parts -> minor``: The result of :func:`distro.minor_version`. * ``version_parts -> build_number``: The result of :func:`distro.build_number`. * ``like``: The result of :func:`distro.like`. * ``codename``: The result of :func:`distro.codename`. For a description of the *pretty* and *best* parameters, see the :func:`distro.version` method. """ return _distro.info(pretty, best) def os_release_info(): """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current Linux distribution. See `os-release file`_ for details about these information items. """ return _distro.os_release_info() def lsb_release_info(): """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current Linux distribution. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_info() def distro_release_info(): """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current Linux distribution. See `distro release file`_ for details about these information items. """ return _distro.distro_release_info() def os_release_attr(attribute): """ Return a single named information item from the os-release file data source of the current Linux distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `os-release file`_ for details about these information items. """ return _distro.os_release_attr(attribute) def lsb_release_attr(attribute): """ Return a single named information item from the lsb_release command output data source of the current Linux distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_attr(attribute) def distro_release_attr(attribute): """ Return a single named information item from the distro release file data source of the current Linux distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `distro release file`_ for details about these information items. """ return _distro.distro_release_attr(attribute) class cached_property(object): """A version of @property which caches the value. On access, it calls the underlying function and sets the value in `__dict__` so future accesses will not re-call the property. """ def __init__(self, f): self._fname = f.__name__ self._f = f def __get__(self, obj, owner): assert obj is not None, 'call {} on an instance'.format(self._fname) ret = obj.__dict__[self._fname] = self._f(obj) return ret class LinuxDistribution(object): """ Provides information about a Linux distribution. This package creates a private module-global instance of this class with default initialization arguments, that is used by the `consolidated accessor functions`_ and `single source accessor functions`_. By using default initialization arguments, that module-global instance returns data about the current Linux distribution (i.e. the distro this package runs on). Normally, it is not necessary to create additional instances of this class. However, in situations where control is needed over the exact data sources that are used, instances of this class can be created with a specific distro release file, or a specific os-release file, or without invoking the lsb_release command. """ def __init__(self, include_lsb=True, os_release_file='', distro_release_file=''): """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. Subsequent access to the information items uses these private instance attributes, so that the data sources are read only once. Parameters: * ``include_lsb`` (bool): Controls whether the `lsb_release command output`_ is included as a data source. If the lsb_release command is not available in the program execution path, the data source for the lsb_release command will be empty. * ``os_release_file`` (string): The path name of the `os-release file`_ that is to be used as a data source. An empty string (the default) will cause the default path name to be used (see `os-release file`_ for details). If the specified or defaulted os-release file does not exist, the data source for the os-release file will be empty. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is to be used as a data source. An empty string (the default) will cause a default search algorithm to be used (see `distro release file`_ for details). If the specified distro release file does not exist, or if no default distro release file can be found, the data source for the distro release file will be empty. Public instance attributes: * ``os_release_file`` (string): The path name of the `os-release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. This controls whether the lsb information will be loaded. Raises: * :py:exc:`IOError`: Some I/O issue with an os-release file or distro release file. * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had some issue (other than not being available in the program execution path). * :py:exc:`UnicodeError`: A data source has unexpected characters or uses an unexpected encoding. """ self.os_release_file = os_release_file or \ os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) self.distro_release_file = distro_release_file or '' # updated later self.include_lsb = include_lsb def __repr__(self): """Return repr of all info """ return \ "LinuxDistribution(" \ "os_release_file={self.os_release_file!r}, " \ "distro_release_file={self.distro_release_file!r}, " \ "include_lsb={self.include_lsb!r}, " \ "_os_release_info={self._os_release_info!r}, " \ "_lsb_release_info={self._lsb_release_info!r}, " \ "_distro_release_info={self._distro_release_info!r})".format( self=self) def linux_distribution(self, full_distribution_name=True): """ Return information about the Linux distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset of its parameters. For details, see :func:`distro.linux_distribution`. """ return ( self.name() if full_distribution_name else self.id(), self.version(), self.codename() ) def id(self): """Return the distro ID of the Linux distribution, as a string. For details, see :func:`distro.id`. """ def normalize(distro_id, table): distro_id = distro_id.lower().replace(' ', '_') return table.get(distro_id, distro_id) distro_id = self.os_release_attr('id') if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) distro_id = self.lsb_release_attr('distributor_id') if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) distro_id = self.distro_release_attr('id') if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) return '' def name(self, pretty=False): """ Return the name of the Linux distribution, as a string. For details, see :func:`distro.name`. """ name = self.os_release_attr('name') \ or self.lsb_release_attr('distributor_id') \ or self.distro_release_attr('name') if pretty: name = self.os_release_attr('pretty_name') \ or self.lsb_release_attr('description') if not name: name = self.distro_release_attr('name') version = self.version(pretty=True) if version: name = name + ' ' + version return name or '' def version(self, pretty=False, best=False): """ Return the version of the Linux distribution, as a string. For details, see :func:`distro.version`. """ versions = [ self.os_release_attr('version_id'), self.lsb_release_attr('release'), self.distro_release_attr('version_id'), self._parse_distro_release_content( self.os_release_attr('pretty_name')).get('version_id', ''), self._parse_distro_release_content( self.lsb_release_attr('description')).get('version_id', '') ] version = '' if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: if v.count(".") > version.count(".") or version == '': version = v else: for v in versions: if v != '': version = v break if pretty and version and self.codename(): version = u'{0} ({1})'.format(version, self.codename()) return version def version_parts(self, best=False): """ Return the version of the Linux distribution, as a tuple of version numbers. For details, see :func:`distro.version_parts`. """ version_str = self.version(best=best) if version_str: version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() return major, minor or '', build_number or '' return '', '', '' def major_version(self, best=False): """ Return the major version number of the current distribution. For details, see :func:`distro.major_version`. """ return self.version_parts(best)[0] def minor_version(self, best=False): """ Return the minor version number of the Linux distribution. For details, see :func:`distro.minor_version`. """ return self.version_parts(best)[1] def build_number(self, best=False): """ Return the build number of the Linux distribution. For details, see :func:`distro.build_number`. """ return self.version_parts(best)[2] def like(self): """ Return the IDs of distributions that are like the Linux distribution. For details, see :func:`distro.like`. """ return self.os_release_attr('id_like') or '' def codename(self): """ Return the codename of the Linux distribution. For details, see :func:`distro.codename`. """ return self.os_release_attr('codename') \ or self.lsb_release_attr('codename') \ or self.distro_release_attr('codename') \ or '' def info(self, pretty=False, best=False): """ Return certain machine-readable information about the Linux distribution. For details, see :func:`distro.info`. """ return dict( id=self.id(), version=self.version(pretty, best), version_parts=dict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best) ), like=self.like(), codename=self.codename(), ) def os_release_info(self): """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the Linux distribution. For details, see :func:`distro.os_release_info`. """ return self._os_release_info def lsb_release_info(self): """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the Linux distribution. For details, see :func:`distro.lsb_release_info`. """ return self._lsb_release_info def distro_release_info(self): """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the Linux distribution. For details, see :func:`distro.distro_release_info`. """ return self._distro_release_info def os_release_attr(self, attribute): """ Return a single named information item from the os-release file data source of the Linux distribution. For details, see :func:`distro.os_release_attr`. """ return self._os_release_info.get(attribute, '') def lsb_release_attr(self, attribute): """ Return a single named information item from the lsb_release command output data source of the Linux distribution. For details, see :func:`distro.lsb_release_attr`. """ return self._lsb_release_info.get(attribute, '') def distro_release_attr(self, attribute): """ Return a single named information item from the distro release file data source of the Linux distribution. For details, see :func:`distro.distro_release_attr`. """ return self._distro_release_info.get(attribute, '') @cached_property def _os_release_info(self): """ Get the information items from the specified os-release file. Returns: A dictionary containing all information items. """ if os.path.isfile(self.os_release_file): with open(self.os_release_file) as release_file: return self._parse_os_release_content(release_file) return {} @staticmethod def _parse_os_release_content(lines): """ Parse the lines of an os-release file. Parameters: * lines: Iterable through the lines in the os-release file. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True # The shlex module defines its `wordchars` variable using literals, # making it dependent on the encoding of the Python source file. # In Python 2.6 and 2.7, the shlex source file is encoded in # 'iso-8859-1', and the `wordchars` variable is defined as a byte # string. This causes a UnicodeDecodeError to be raised when the # parsed content is a unicode object. The following fix resolves that # (... but it should be fixed in shlex...): if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): lexer.wordchars = lexer.wordchars.decode('iso-8859-1') tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. # comments processed, quotes and backslash escape sequences # processed, multi-line values assembled, trailing newlines # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) if '=' in token: k, v = token.split('=', 1) if isinstance(v, bytes): v = v.decode('utf-8') props[k.lower()] = v if k == 'VERSION': # this handles cases in which the codename is in # the `(CODENAME)` (rhel, centos, fedora) format # or in the `, CODENAME` format (Ubuntu). codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v) if codename: codename = codename.group() codename = codename.strip('()') codename = codename.strip(',') codename = codename.strip() # codename appears within paranthese. props['codename'] = codename else: props['codename'] = '' else: # Ignore any tokens that are not variable assignments pass return props @cached_property def _lsb_release_info(self): """ Get the information items from the lsb_release command output. Returns: A dictionary containing all information items. """ if not self.include_lsb: return {} with open(os.devnull, 'w') as devnull: try: cmd = ('lsb_release', '-a') stdout = subprocess.check_output(cmd, stderr=devnull) except OSError: # Command not found return {} content = stdout.decode(sys.getfilesystemencoding()).splitlines() return self._parse_lsb_release_content(content) @staticmethod def _parse_lsb_release_content(lines): """ Parse the output of the lsb_release command. Parameters: * lines: Iterable through the lines of the lsb_release output. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} for line in lines: kv = line.strip('\n').split(':', 1) if len(kv) != 2: # Ignore lines without colon. continue k, v = kv props.update({k.replace(' ', '_').lower(): v.strip()}) return props @cached_property def _distro_release_info(self): """ Get the information items from the specified distro release file. Returns: A dictionary containing all information items. """ if self.distro_release_file: # If it was specified, we use it and parse what we can, even if # its file name or content does not match the expected pattern. distro_info = self._parse_distro_release_file( self.distro_release_file) basename = os.path.basename(self.distro_release_file) # The file name pattern for user-specified distro release files # is somewhat more tolerant (compared to when searching for the # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match: distro_info['id'] = match.group(1) return distro_info else: try: basenames = os.listdir(_UNIXCONFDIR) # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. basenames.sort() except OSError: # This may occur when /etc is not readable but we can't be # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. basenames = ['SuSE-release', 'arch-release', 'base-release', 'centos-release', 'fedora-release', 'gentoo-release', 'mageia-release', 'mandrake-release', 'mandriva-release', 'mandrivalinux-release', 'manjaro-release', 'oracle-release', 'redhat-release', 'sl-release', 'slackware-version'] for basename in basenames: if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: continue match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match: filepath = os.path.join(_UNIXCONFDIR, basename) distro_info = self._parse_distro_release_file(filepath) if 'name' in distro_info: # The name is always present if the pattern matches self.distro_release_file = filepath distro_info['id'] = match.group(1) return distro_info return {} def _parse_distro_release_file(self, filepath): """ Parse a distro release file. Parameters: * filepath: Path name of the distro release file. Returns: A dictionary containing all information items. """ try: with open(filepath) as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) except (OSError, IOError): # Ignore not being able to read a specific, seemingly version # related file. # See https://github.com/nir0s/distro/issues/162 return {} @staticmethod def _parse_distro_release_content(line): """ Parse a line from a distro release file. Parameters: * line: Line from the distro release file. Must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ if isinstance(line, bytes): line = line.decode('utf-8') matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None distro_info['name'] = matches.group(3)[::-1] if matches.group(2): distro_info['version_id'] = matches.group(2)[::-1] if matches.group(1): distro_info['codename'] = matches.group(1)[::-1] elif line: distro_info['name'] = line.strip() return distro_info _distro = LinuxDistribution() def main(): logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description="Linux distro info tool") parser.add_argument( '--json', '-j', help="Output in machine readable format", action="store_true") args = parser.parse_args() if args.json: logger.info(json.dumps(info(), indent=4, sort_keys=True)) else: logger.info('Name: %s', name(pretty=True)) distribution_version = version(pretty=True) logger.info('Version: %s', distribution_version) distribution_codename = codename() logger.info('Codename: %s', distribution_codename) if __name__ == '__main__': main()
39,461
34.712217
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/__init__.py
""" pip._vendor is for vendoring dependencies of pip to prevent needing pip to depend on something external. Files inside of pip._vendor should be considered immutable and should only be updated to versions from upstream. """ from __future__ import absolute_import import glob import os.path import sys # Downstream redistributors which have debundled our dependencies should also # patch this value to be true. This will trigger the additional patching # to cause things like "six" to be available as pip. DEBUNDLED = False # By default, look in this directory for a bunch of .whl files which we will # add to the beginning of sys.path before attempting to import anything. This # is done to support downstream re-distributors like Debian and Fedora who # wish to create their own Wheels for our dependencies to aid in debundling. WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) # Define a small helper function to alias our vendored modules to the real ones # if the vendored ones do not exist. This idea of this was taken from # https://github.com/kennethreitz/requests/pull/2567. def vendored(modulename): vendored_name = "{0}.{1}".format(__name__, modulename) try: __import__(vendored_name, globals(), locals(), level=0) except ImportError: try: __import__(modulename, globals(), locals(), level=0) except ImportError: # We can just silently allow import failures to pass here. If we # got to this point it means that ``import pip._vendor.whatever`` # failed and so did ``import whatever``. Since we're importing this # upfront in an attempt to alias imports, not erroring here will # just mean we get a regular import error whenever pip *actually* # tries to import one of these modules to use it, which actually # gives us a better error message than we would have otherwise # gotten. pass else: sys.modules[vendored_name] = sys.modules[modulename] base, head = vendored_name.rsplit(".", 1) setattr(sys.modules[base], head, sys.modules[modulename]) # If we're operating in a debundled setup, then we want to go ahead and trigger # the aliasing of our vendored libraries as well as looking for wheels to add # to our sys.path. This will cause all of this code to be a no-op typically # however downstream redistributors can enable it in a consistent way across # all platforms. if DEBUNDLED: # Actually look inside of WHEEL_DIR to find .whl files and add them to the # front of our sys.path. sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path # Actually alias all of our vendored dependencies. vendored("cachecontrol") vendored("colorama") vendored("distlib") vendored("distro") vendored("html5lib") vendored("lockfile") vendored("six") vendored("six.moves") vendored("six.moves.urllib") vendored("six.moves.urllib.parse") vendored("packaging") vendored("packaging.version") vendored("packaging.specifiers") vendored("pkg_resources") vendored("progress") vendored("pytoml") vendored("retrying") vendored("requests") vendored("requests.packages") vendored("requests.packages.urllib3") vendored("requests.packages.urllib3._collections") vendored("requests.packages.urllib3.connection") vendored("requests.packages.urllib3.connectionpool") vendored("requests.packages.urllib3.contrib") vendored("requests.packages.urllib3.contrib.ntlmpool") vendored("requests.packages.urllib3.contrib.pyopenssl") vendored("requests.packages.urllib3.exceptions") vendored("requests.packages.urllib3.fields") vendored("requests.packages.urllib3.filepost") vendored("requests.packages.urllib3.packages") vendored("requests.packages.urllib3.packages.ordered_dict") vendored("requests.packages.urllib3.packages.six") vendored("requests.packages.urllib3.packages.ssl_match_hostname") vendored("requests.packages.urllib3.packages.ssl_match_hostname." "_implementation") vendored("requests.packages.urllib3.poolmanager") vendored("requests.packages.urllib3.request") vendored("requests.packages.urllib3.response") vendored("requests.packages.urllib3.util") vendored("requests.packages.urllib3.util.connection") vendored("requests.packages.urllib3.util.request") vendored("requests.packages.urllib3.util.response") vendored("requests.packages.urllib3.util.retry") vendored("requests.packages.urllib3.util.ssl_") vendored("requests.packages.urllib3.util.timeout") vendored("requests.packages.urllib3.util.url")
4,732
42.027273
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/retrying.py
## Copyright 2013-2014 Ray Holder ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import random from pip._vendor import six import sys import time import traceback # sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... MAX_WAIT = 1073741823 def retry(*dargs, **dkw): """ Decorator function that instantiates the Retrying object @param *dargs: positional arguments passed to Retrying object @param **dkw: keyword arguments passed to the Retrying object """ # support both @retry and @retry() as valid syntax if len(dargs) == 1 and callable(dargs[0]): def wrap_simple(f): @six.wraps(f) def wrapped_f(*args, **kw): return Retrying().call(f, *args, **kw) return wrapped_f return wrap_simple(dargs[0]) else: def wrap(f): @six.wraps(f) def wrapped_f(*args, **kw): return Retrying(*dargs, **dkw).call(f, *args, **kw) return wrapped_f return wrap class Retrying(object): def __init__(self, stop=None, wait=None, stop_max_attempt_number=None, stop_max_delay=None, wait_fixed=None, wait_random_min=None, wait_random_max=None, wait_incrementing_start=None, wait_incrementing_increment=None, wait_exponential_multiplier=None, wait_exponential_max=None, retry_on_exception=None, retry_on_result=None, wrap_exception=False, stop_func=None, wait_func=None, wait_jitter_max=None): self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay self._wait_fixed = 1000 if wait_fixed is None else wait_fixed self._wait_random_min = 0 if wait_random_min is None else wait_random_min self._wait_random_max = 1000 if wait_random_max is None else wait_random_max self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max # TODO add chaining of stop behaviors # stop behavior stop_funcs = [] if stop_max_attempt_number is not None: stop_funcs.append(self.stop_after_attempt) if stop_max_delay is not None: stop_funcs.append(self.stop_after_delay) if stop_func is not None: self.stop = stop_func elif stop is None: self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs) else: self.stop = getattr(self, stop) # TODO add chaining of wait behaviors # wait behavior wait_funcs = [lambda *args, **kwargs: 0] if wait_fixed is not None: wait_funcs.append(self.fixed_sleep) if wait_random_min is not None or wait_random_max is not None: wait_funcs.append(self.random_sleep) if wait_incrementing_start is not None or wait_incrementing_increment is not None: wait_funcs.append(self.incrementing_sleep) if wait_exponential_multiplier is not None or wait_exponential_max is not None: wait_funcs.append(self.exponential_sleep) if wait_func is not None: self.wait = wait_func elif wait is None: self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs) else: self.wait = getattr(self, wait) # retry on exception filter if retry_on_exception is None: self._retry_on_exception = self.always_reject else: self._retry_on_exception = retry_on_exception # TODO simplify retrying by Exception types # retry on result filter if retry_on_result is None: self._retry_on_result = self.never_reject else: self._retry_on_result = retry_on_result self._wrap_exception = wrap_exception def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms): """Stop after the previous attempt >= stop_max_attempt_number.""" return previous_attempt_number >= self._stop_max_attempt_number def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms): """Stop after the time from the first attempt >= stop_max_delay.""" return delay_since_first_attempt_ms >= self._stop_max_delay def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): """Don't sleep at all before retrying.""" return 0 def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): """Sleep a fixed amount of time between each retry.""" return self._wait_fixed def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): """Sleep a random amount of time between wait_random_min and wait_random_max""" return random.randint(self._wait_random_min, self._wait_random_max) def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): """ Sleep an incremental amount of time after each attempt, starting at wait_incrementing_start and incrementing by wait_incrementing_increment """ result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1)) if result < 0: result = 0 return result def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): exp = 2 ** previous_attempt_number result = self._wait_exponential_multiplier * exp if result > self._wait_exponential_max: result = self._wait_exponential_max if result < 0: result = 0 return result def never_reject(self, result): return False def always_reject(self, result): return True def should_reject(self, attempt): reject = False if attempt.has_exception: reject |= self._retry_on_exception(attempt.value[1]) else: reject |= self._retry_on_result(attempt.value) return reject def call(self, fn, *args, **kwargs): start_time = int(round(time.time() * 1000)) attempt_number = 1 while True: try: attempt = Attempt(fn(*args, **kwargs), attempt_number, False) except: tb = sys.exc_info() attempt = Attempt(tb, attempt_number, True) if not self.should_reject(attempt): return attempt.get(self._wrap_exception) delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time if self.stop(attempt_number, delay_since_first_attempt_ms): if not self._wrap_exception and attempt.has_exception: # get() on an attempt with an exception should cause it to be raised, but raise just in case raise attempt.get() else: raise RetryError(attempt) else: sleep = self.wait(attempt_number, delay_since_first_attempt_ms) if self._wait_jitter_max: jitter = random.random() * self._wait_jitter_max sleep = sleep + max(0, jitter) time.sleep(sleep / 1000.0) attempt_number += 1 class Attempt(object): """ An Attempt encapsulates a call to a target function that may end as a normal return value from the function or an Exception depending on what occurred during the execution. """ def __init__(self, value, attempt_number, has_exception): self.value = value self.attempt_number = attempt_number self.has_exception = has_exception def get(self, wrap_exception=False): """ Return the return value of this Attempt instance or raise an Exception. If wrap_exception is true, this Attempt is wrapped inside of a RetryError before being raised. """ if self.has_exception: if wrap_exception: raise RetryError(self) else: six.reraise(self.value[0], self.value[1], self.value[2]) else: return self.value def __repr__(self): if self.has_exception: return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2]))) else: return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value) class RetryError(Exception): """ A RetryError encapsulates the last Attempt instance right before giving up. """ def __init__(self, last_attempt): self.last_attempt = last_attempt def __str__(self): return "RetryError[{0}]".format(self.last_attempt)
9,972
36.212687
120
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/winterm.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from . import win32 # from wincon.h class WinColor(object): BLACK = 0 BLUE = 1 GREEN = 2 CYAN = 3 RED = 4 MAGENTA = 5 YELLOW = 6 GREY = 7 # from wincon.h class WinStyle(object): NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background BRIGHT_BACKGROUND = 0x80 # dim text, bright background class WinTerm(object): def __init__(self): self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes self.set_attrs(self._default) self._default_fore = self._fore self._default_back = self._back self._default_style = self._style # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. # So that LIGHT_EX colors and BRIGHT style do not clobber each other, # we track them separately, since LIGHT_EX is overwritten by Fore/Back # and BRIGHT is overwritten by Style codes. self._light = 0 def get_attrs(self): return self._fore + self._back * 16 + (self._style | self._light) def set_attrs(self, value): self._fore = value & 7 self._back = (value >> 4) & 7 self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) def reset_all(self, on_stderr=None): self.set_attrs(self._default) self.set_console(attrs=self._default) def fore(self, fore=None, light=False, on_stderr=False): if fore is None: fore = self._default_fore self._fore = fore # Emulate LIGHT_EX with BRIGHT Style if light: self._light |= WinStyle.BRIGHT else: self._light &= ~WinStyle.BRIGHT self.set_console(on_stderr=on_stderr) def back(self, back=None, light=False, on_stderr=False): if back is None: back = self._default_back self._back = back # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style if light: self._light |= WinStyle.BRIGHT_BACKGROUND else: self._light &= ~WinStyle.BRIGHT_BACKGROUND self.set_console(on_stderr=on_stderr) def style(self, style=None, on_stderr=False): if style is None: style = self._default_style self._style = style self.set_console(on_stderr=on_stderr) def set_console(self, attrs=None, on_stderr=False): if attrs is None: attrs = self.get_attrs() handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleTextAttribute(handle, attrs) def get_position(self, handle): position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition # Because Windows coordinates are 0-based, # and win32.SetConsoleCursorPosition expects 1-based. position.X += 1 position.Y += 1 return position def set_cursor_position(self, position=None, on_stderr=False): if position is None: # I'm not currently tracking the position, so there is no default. # position = self.get_position() return handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleCursorPosition(handle, position) def cursor_adjust(self, x, y, on_stderr=False): handle = win32.STDOUT if on_stderr: handle = win32.STDERR position = self.get_position(handle) adjusted_position = (position.Y + y, position.X + x) win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) def erase_screen(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the screen. # 1 should clear from the cursor to the beginning of the screen. # 2 should clear the entire screen, and move cursor to (1,1) handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) # get the number of character cells in the current buffer cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y # get number of character cells before current cursor position cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = cells_in_screen - cells_before_cursor if mode == 1: from_coord = win32.COORD(0, 0) cells_to_erase = cells_before_cursor elif mode == 2: from_coord = win32.COORD(0, 0) cells_to_erase = cells_in_screen # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) if mode == 2: # put the cursor where needed win32.SetConsoleCursorPosition(handle, (1, 1)) def erase_line(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the line. # 1 should clear from the cursor to the beginning of the line. # 2 should clear the entire line. handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X if mode == 1: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwCursorPosition.X elif mode == 2: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwSize.X # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) def set_title(self, title): win32.SetConsoleTitle(title)
6,290
37.595092
95
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/ansitowin32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import re import sys import os from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style from .winterm import WinTerm, WinColor, WinStyle from .win32 import windll, winapi_test winterm = None if windll is not None: winterm = WinTerm() def is_stream_closed(stream): return not hasattr(stream, 'closed') or stream.closed def is_a_tty(stream): return hasattr(stream, 'isatty') and stream.isatty() class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all attribute access apart from method 'write()', which is delegated to our Converter instance. ''' def __init__(self, wrapped, converter): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. self.__wrapped = wrapped self.__convertor = converter def __getattr__(self, name): return getattr(self.__wrapped, name) def write(self, text): self.__convertor.write(text) class AnsiToWin32(object): ''' Implements a 'write()' method which, on Windows, will strip ANSI character sequences from the text, and if outputting to a tty, will convert them into win32 function calls. ''' ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) self.wrapped = wrapped # should we reset colors to defaults after every .write() self.autoreset = autoreset # create the proxy wrapping our output stream self.stream = StreamWrapper(wrapped, self) on_windows = os.name == 'nt' # We test if the WinAPI works, because even if we are on Windows # we may be using a terminal that doesn't support the WinAPI # (e.g. Cygwin Terminal). In this case it's up to the terminal # to support the ANSI codes. conversion_supported = on_windows and winapi_test() # should we strip ANSI sequences from our output? if strip is None: strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped)) self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped) self.convert = convert # dict of ansi codes to win32 functions and parameters self.win32_calls = self.get_win32_calls() # are we wrapping stderr? self.on_stderr = self.wrapped is sys.stderr def should_wrap(self): ''' True if this class is actually needed. If false, then the output stream will not be affected, nor will win32 calls be issued, so wrapping stdout is not actually required. This will generally be False on non-Windows platforms, unless optional functionality like autoreset has been requested using kwargs to init() ''' return self.convert or self.strip or self.autoreset def get_win32_calls(self): if self.convert and winterm: return { AnsiStyle.RESET_ALL: (winterm.reset_all, ), AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), AnsiFore.RED: (winterm.fore, WinColor.RED), AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), AnsiFore.WHITE: (winterm.fore, WinColor.GREY), AnsiFore.RESET: (winterm.fore, ), AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), AnsiBack.BLACK: (winterm.back, WinColor.BLACK), AnsiBack.RED: (winterm.back, WinColor.RED), AnsiBack.GREEN: (winterm.back, WinColor.GREEN), AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), AnsiBack.BLUE: (winterm.back, WinColor.BLUE), AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), AnsiBack.CYAN: (winterm.back, WinColor.CYAN), AnsiBack.WHITE: (winterm.back, WinColor.GREY), AnsiBack.RESET: (winterm.back, ), AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), } return dict() def write(self, text): if self.strip or self.convert: self.write_and_convert(text) else: self.wrapped.write(text) self.wrapped.flush() if self.autoreset: self.reset_all() def reset_all(self): if self.convert: self.call_win32('m', (0,)) elif not self.strip and not is_stream_closed(self.wrapped): self.wrapped.write(Style.RESET_ALL) def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 text = self.convert_osc(text) for match in self.ANSI_CSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text)) def write_plain_text(self, text, start, end): if start < end: self.wrapped.write(text[start:end]) self.wrapped.flush() def convert_ansi(self, paramstring, command): if self.convert: params = self.extract_params(command, paramstring) self.call_win32(command, params) def extract_params(self, command, paramstring): if command in 'Hf': params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) while len(params) < 2: # defaults: params = params + (1,) else: params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) if len(params) == 0: # defaults: if command in 'JKm': params = (0,) elif command in 'ABCD': params = (1,) return params def call_win32(self, command, params): if command == 'm': for param in params: if param in self.win32_calls: func_args = self.win32_calls[param] func = func_args[0] args = func_args[1:] kwargs = dict(on_stderr=self.on_stderr) func(*args, **kwargs) elif command in 'J': winterm.erase_screen(params[0], on_stderr=self.on_stderr) elif command in 'K': winterm.erase_line(params[0], on_stderr=self.on_stderr) elif command in 'Hf': # cursor position - absolute winterm.set_cursor_position(params, on_stderr=self.on_stderr) elif command in 'ABCD': # cursor position - relative n = params[0] # A - up, B - down, C - forward, D - back x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) def convert_osc(self, text): for match in self.ANSI_OSC_RE.finditer(text): start, end = match.span() text = text[:start] + text[end:] paramstring, command = match.groups() if command in '\x07': # \x07 = BEL params = paramstring.split(";") # 0 - change title and icon (we will only change title) # 1 - change icon (we don't support this) # 2 - change title if params[0] in '02': winterm.set_title(params[1]) return text
9,668
39.797468
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/__init__.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from .initialise import init, deinit, reinit, colorama_text from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 __version__ = '0.3.9'
240
29.125
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/initialise.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import atexit import contextlib import sys from .ansitowin32 import AnsiToWin32 orig_stdout = None orig_stderr = None wrapped_stdout = None wrapped_stderr = None atexit_done = False def reset_all(): if AnsiToWin32 is not None: # Issue #74: objects might become None at exit AnsiToWin32(orig_stdout).reset_all() def init(autoreset=False, convert=None, strip=None, wrap=True): if not wrap and any([autoreset, convert, strip]): raise ValueError('wrap=False conflicts with any other arg=True') global wrapped_stdout, wrapped_stderr global orig_stdout, orig_stderr orig_stdout = sys.stdout orig_stderr = sys.stderr if sys.stdout is None: wrapped_stdout = None else: sys.stdout = wrapped_stdout = \ wrap_stream(orig_stdout, convert, strip, autoreset, wrap) if sys.stderr is None: wrapped_stderr = None else: sys.stderr = wrapped_stderr = \ wrap_stream(orig_stderr, convert, strip, autoreset, wrap) global atexit_done if not atexit_done: atexit.register(reset_all) atexit_done = True def deinit(): if orig_stdout is not None: sys.stdout = orig_stdout if orig_stderr is not None: sys.stderr = orig_stderr @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) try: yield finally: deinit() def reinit(): if wrapped_stdout is not None: sys.stdout = wrapped_stdout if wrapped_stderr is not None: sys.stderr = wrapped_stderr def wrap_stream(stream, convert, strip, autoreset, wrap): if wrap: wrapper = AnsiToWin32(stream, convert=convert, strip=strip, autoreset=autoreset) if wrapper.should_wrap(): stream = wrapper.stream return stream
1,917
22.108434
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/win32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 try: import ctypes from ctypes import LibraryLoader windll = LibraryLoader(ctypes.WinDLL) from ctypes import wintypes except (AttributeError, ImportError): windll = None SetConsoleTextAttribute = lambda *_: None winapi_test = lambda *_: None else: from ctypes import byref, Structure, c_char, POINTER COORD = wintypes._COORD class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW _SetConsoleTitleW.argtypes = [ wintypes.LPCWSTR ] _SetConsoleTitleW.restype = wintypes.BOOL handles = { STDOUT: _GetStdHandle(STDOUT), STDERR: _GetStdHandle(STDERR), } def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return bool(success) def winapi_test(): return any(_winapi_test(h) for h in handles.values()) def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = handles[stream_id] return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): position = COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = COORD(position.Y - 1, position.X - 1) if adjust: # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = handles[stream_id] return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = handles[stream_id] char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = handles[stream_id] attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written)) def SetConsoleTitle(title): return _SetConsoleTitleW(title)
5,426
33.566879
111
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/colorama/ansi.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. ''' This module generates ANSI character codes to printing colors to terminals. See: http://en.wikipedia.org/wiki/ANSI_escape_code ''' CSI = '\033[' OSC = '\033]' BEL = '\007' def code_to_chars(code): return CSI + str(code) + 'm' def set_title(title): return OSC + '2;' + title + BEL def clear_screen(mode=2): return CSI + str(mode) + 'J' def clear_line(mode=2): return CSI + str(mode) + 'K' class AnsiCodes(object): def __init__(self): # the subclasses declare class attributes which are numbers. # Upon instantiation we define instance attributes, which are the same # as the class attributes but wrapped with the ANSI escape sequence for name in dir(self): if not name.startswith('_'): value = getattr(self, name) setattr(self, name, code_to_chars(value)) class AnsiCursor(object): def UP(self, n=1): return CSI + str(n) + 'A' def DOWN(self, n=1): return CSI + str(n) + 'B' def FORWARD(self, n=1): return CSI + str(n) + 'C' def BACK(self, n=1): return CSI + str(n) + 'D' def POS(self, x=1, y=1): return CSI + str(y) + ';' + str(x) + 'H' class AnsiFore(AnsiCodes): BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 RESET = 39 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 90 LIGHTRED_EX = 91 LIGHTGREEN_EX = 92 LIGHTYELLOW_EX = 93 LIGHTBLUE_EX = 94 LIGHTMAGENTA_EX = 95 LIGHTCYAN_EX = 96 LIGHTWHITE_EX = 97 class AnsiBack(AnsiCodes): BLACK = 40 RED = 41 GREEN = 42 YELLOW = 43 BLUE = 44 MAGENTA = 45 CYAN = 46 WHITE = 47 RESET = 49 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 100 LIGHTRED_EX = 101 LIGHTGREEN_EX = 102 LIGHTYELLOW_EX = 103 LIGHTBLUE_EX = 104 LIGHTMAGENTA_EX = 105 LIGHTCYAN_EX = 106 LIGHTWHITE_EX = 107 class AnsiStyle(AnsiCodes): BRIGHT = 1 DIM = 2 NORMAL = 22 RESET_ALL = 0 Fore = AnsiFore() Back = AnsiBack() Style = AnsiStyle() Cursor = AnsiCursor()
2,524
23.514563
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/core.py
from . import idnadata import bisect import unicodedata import re import sys from .intranges import intranges_contain _virama_combining_class = 9 _alabel_prefix = b'xn--' _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') if sys.version_info[0] == 3: unicode = str unichr = chr class IDNAError(UnicodeError): """ Base exception for all IDNA-encoding related problems """ pass class IDNABidiError(IDNAError): """ Exception when bidirectional requirements are not satisfied """ pass class InvalidCodepoint(IDNAError): """ Exception when a disallowed or unallocated codepoint is used """ pass class InvalidCodepointContext(IDNAError): """ Exception when the codepoint is not valid in the context it is used """ pass def _combining_class(cp): return unicodedata.combining(unichr(cp)) def _is_script(cp, script): return intranges_contain(ord(cp), idnadata.scripts[script]) def _punycode(s): return s.encode('punycode') def _unot(s): return 'U+{0:04X}'.format(s) def valid_label_length(label): if len(label) > 63: return False return True def valid_string_length(label, trailing_dot): if len(label) > (254 if trailing_dot else 253): return False return True def check_bidi(label, check_ltr=False): # Bidi rules should only be applied if string contains RTL characters bidi_label = False for (idx, cp) in enumerate(label, 1): direction = unicodedata.bidirectional(cp) if direction == '': # String likely comes from a newer version of Unicode raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx)) if direction in ['R', 'AL', 'AN']: bidi_label = True break if not bidi_label and not check_ltr: return True # Bidi rule 1 direction = unicodedata.bidirectional(label[0]) if direction in ['R', 'AL']: rtl = True elif direction == 'L': rtl = False else: raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label))) valid_ending = False number_type = False for (idx, cp) in enumerate(label, 1): direction = unicodedata.bidirectional(cp) if rtl: # Bidi rule 2 if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx)) # Bidi rule 3 if direction in ['R', 'AL', 'EN', 'AN']: valid_ending = True elif direction != 'NSM': valid_ending = False # Bidi rule 4 if direction in ['AN', 'EN']: if not number_type: number_type = direction else: if number_type != direction: raise IDNABidiError('Can not mix numeral types in a right-to-left label') else: # Bidi rule 5 if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx)) # Bidi rule 6 if direction in ['L', 'EN']: valid_ending = True elif direction != 'NSM': valid_ending = False if not valid_ending: raise IDNABidiError('Label ends with illegal codepoint directionality') return True def check_initial_combiner(label): if unicodedata.category(label[0])[0] == 'M': raise IDNAError('Label begins with an illegal combining character') return True def check_hyphen_ok(label): if label[2:4] == '--': raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') if label[0] == '-' or label[-1] == '-': raise IDNAError('Label must not start or end with a hyphen') return True def check_nfc(label): if unicodedata.normalize('NFC', label) != label: raise IDNAError('Label must be in Normalization Form C') def valid_contextj(label, pos): cp_value = ord(label[pos]) if cp_value == 0x200c: if pos > 0: if _combining_class(ord(label[pos - 1])) == _virama_combining_class: return True ok = False for i in range(pos-1, -1, -1): joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue if joining_type in [ord('L'), ord('D')]: ok = True break if not ok: return False ok = False for i in range(pos+1, len(label)): joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue if joining_type in [ord('R'), ord('D')]: ok = True break return ok if cp_value == 0x200d: if pos > 0: if _combining_class(ord(label[pos - 1])) == _virama_combining_class: return True return False else: return False def valid_contexto(label, pos, exception=False): cp_value = ord(label[pos]) if cp_value == 0x00b7: if 0 < pos < len(label)-1: if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: return True return False elif cp_value == 0x0375: if pos < len(label)-1 and len(label) > 1: return _is_script(label[pos + 1], 'Greek') return False elif cp_value == 0x05f3 or cp_value == 0x05f4: if pos > 0: return _is_script(label[pos - 1], 'Hebrew') return False elif cp_value == 0x30fb: for cp in label: if cp == u'\u30fb': continue if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): return True return False elif 0x660 <= cp_value <= 0x669: for cp in label: if 0x6f0 <= ord(cp) <= 0x06f9: return False return True elif 0x6f0 <= cp_value <= 0x6f9: for cp in label: if 0x660 <= ord(cp) <= 0x0669: return False return True def check_label(label): if isinstance(label, (bytes, bytearray)): label = label.decode('utf-8') if len(label) == 0: raise IDNAError('Empty Label') check_nfc(label) check_hyphen_ok(label) check_initial_combiner(label) for (pos, cp) in enumerate(label): cp_value = ord(cp) if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): continue elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): if not valid_contextj(label, pos): raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label))) elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): if not valid_contexto(label, pos): raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label))) else: raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label))) check_bidi(label) def alabel(label): try: label = label.encode('ascii') try: ulabel(label) except IDNAError: raise IDNAError('The label {0} is not a valid A-label'.format(label)) if not valid_label_length(label): raise IDNAError('Label too long') return label except UnicodeEncodeError: pass if not label: raise IDNAError('No Input') label = unicode(label) check_label(label) label = _punycode(label) label = _alabel_prefix + label if not valid_label_length(label): raise IDNAError('Label too long') return label def ulabel(label): if not isinstance(label, (bytes, bytearray)): try: label = label.encode('ascii') except UnicodeEncodeError: check_label(label) return label label = label.lower() if label.startswith(_alabel_prefix): label = label[len(_alabel_prefix):] else: check_label(label) return label.decode('ascii') label = label.decode('punycode') check_label(label) return label def uts46_remap(domain, std3_rules=True, transitional=False): """Re-map the characters in the string according to UTS46 processing.""" from .uts46data import uts46data output = u"" try: for pos, char in enumerate(domain): code_point = ord(char) uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] status = uts46row[1] replacement = uts46row[2] if len(uts46row) == 3 else None if (status == "V" or (status == "D" and not transitional) or (status == "3" and std3_rules and replacement is None)): output += char elif replacement is not None and (status == "M" or (status == "3" and std3_rules) or (status == "D" and transitional)): output += replacement elif status != "I": raise IndexError() return unicodedata.normalize("NFC", output) except IndexError: raise InvalidCodepoint( "Codepoint {0} not allowed at position {1} in {2}".format( _unot(code_point), pos + 1, repr(domain))) def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): if isinstance(s, (bytes, bytearray)): s = s.decode("ascii") if uts46: s = uts46_remap(s, std3_rules, transitional) trailing_dot = False result = [] if strict: labels = s.split('.') else: labels = _unicode_dots_re.split(s) while labels and not labels[0]: del labels[0] if not labels: raise IDNAError('Empty domain') if labels[-1] == '': del labels[-1] trailing_dot = True for label in labels: result.append(alabel(label)) if trailing_dot: result.append(b'') s = b'.'.join(result) if not valid_string_length(s, trailing_dot): raise IDNAError('Domain too long') return s def decode(s, strict=False, uts46=False, std3_rules=False): if isinstance(s, (bytes, bytearray)): s = s.decode("ascii") if uts46: s = uts46_remap(s, std3_rules, False) trailing_dot = False result = [] if not strict: labels = _unicode_dots_re.split(s) else: labels = s.split(u'.') while labels and not labels[0]: del labels[0] if not labels: raise IDNAError('Empty domain') if not labels[-1]: del labels[-1] trailing_dot = True for label in labels: result.append(ulabel(label)) if trailing_dot: result.append(u'') return u'.'.join(result)
11,390
28.358247
141
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/codec.py
from .core import encode, decode, alabel, ulabel, IDNAError import codecs import re _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') class Codec(codecs.Codec): def encode(self, data, errors='strict'): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return "", 0 return encode(data), len(data) def decode(self, data, errors='strict'): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return u"", 0 return decode(data), len(data) class IncrementalEncoder(codecs.BufferedIncrementalEncoder): def _buffer_encode(self, data, errors, final): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return ("", 0) labels = _unicode_dots_re.split(data) trailing_dot = u'' if labels: if not labels[-1]: trailing_dot = '.' del labels[-1] elif not final: # Keep potentially unfinished label until the next call del labels[-1] if labels: trailing_dot = '.' result = [] size = 0 for label in labels: result.append(alabel(label)) if size: size += 1 size += len(label) # Join with U+002E result = ".".join(result) + trailing_dot size += len(trailing_dot) return (result, size) class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def _buffer_decode(self, data, errors, final): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return (u"", 0) # IDNA allows decoding to operate on Unicode strings, too. if isinstance(data, unicode): labels = _unicode_dots_re.split(data) else: # Must be ASCII string data = str(data) unicode(data, "ascii") labels = data.split(".") trailing_dot = u'' if labels: if not labels[-1]: trailing_dot = u'.' del labels[-1] elif not final: # Keep potentially unfinished label until the next call del labels[-1] if labels: trailing_dot = u'.' result = [] size = 0 for label in labels: result.append(ulabel(label)) if size: size += 1 size += len(label) result = u".".join(result) + trailing_dot size += len(trailing_dot) return (result, size) class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass def getregentry(): return codecs.CodecInfo( name='idna', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
3,299
26.731092
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/intranges.py
""" Given a list of integers, made up of (hopefully) a small number of long runs of consecutive integers, compute a representation of the form ((start1, end1), (start2, end2) ...). Then answer the question "was x present in the original list?" in time O(log(# runs)). """ import bisect def intranges_from_list(list_): """Represent a list of integers as a sequence of ranges: ((start_0, end_0), (start_1, end_1), ...), such that the original integers are exactly those x such that start_i <= x < end_i for some i. Ranges are encoded as single integers (start << 32 | end), not as tuples. """ sorted_list = sorted(list_) ranges = [] last_write = -1 for i in range(len(sorted_list)): if i+1 < len(sorted_list): if sorted_list[i] == sorted_list[i+1]-1: continue current_range = sorted_list[last_write+1:i+1] ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) last_write = i return tuple(ranges) def _encode_range(start, end): return (start << 32) | end def _decode_range(r): return (r >> 32), (r & ((1 << 32) - 1)) def intranges_contain(int_, ranges): """Determine if `int_` falls into one of the ranges in `ranges`.""" tuple_ = _encode_range(int_, 0) pos = bisect.bisect_left(ranges, tuple_) # we could be immediately ahead of a tuple (start, end) # with start < int_ <= end if pos > 0: left, right = _decode_range(ranges[pos-1]) if left <= int_ < right: return True # or we could be immediately behind a tuple (int_, end) if pos < len(ranges): left, _ = _decode_range(ranges[pos]) if left == int_: return True return False
1,749
31.407407
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/package_data.py
__version__ = '2.6'
21
6.333333
19
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/__init__.py
from .package_data import __version__ from .core import *
58
18.666667
37
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/uts46data.py
# This file is automatically generated by tools/idna-data # vim: set fileencoding=utf-8 : """IDNA Mapping Table from UTS46.""" __version__ = "6.3.0" def _seg_0(): return [ (0x0, '3'), (0x1, '3'), (0x2, '3'), (0x3, '3'), (0x4, '3'), (0x5, '3'), (0x6, '3'), (0x7, '3'), (0x8, '3'), (0x9, '3'), (0xA, '3'), (0xB, '3'), (0xC, '3'), (0xD, '3'), (0xE, '3'), (0xF, '3'), (0x10, '3'), (0x11, '3'), (0x12, '3'), (0x13, '3'), (0x14, '3'), (0x15, '3'), (0x16, '3'), (0x17, '3'), (0x18, '3'), (0x19, '3'), (0x1A, '3'), (0x1B, '3'), (0x1C, '3'), (0x1D, '3'), (0x1E, '3'), (0x1F, '3'), (0x20, '3'), (0x21, '3'), (0x22, '3'), (0x23, '3'), (0x24, '3'), (0x25, '3'), (0x26, '3'), (0x27, '3'), (0x28, '3'), (0x29, '3'), (0x2A, '3'), (0x2B, '3'), (0x2C, '3'), (0x2D, 'V'), (0x2E, 'V'), (0x2F, '3'), (0x30, 'V'), (0x31, 'V'), (0x32, 'V'), (0x33, 'V'), (0x34, 'V'), (0x35, 'V'), (0x36, 'V'), (0x37, 'V'), (0x38, 'V'), (0x39, 'V'), (0x3A, '3'), (0x3B, '3'), (0x3C, '3'), (0x3D, '3'), (0x3E, '3'), (0x3F, '3'), (0x40, '3'), (0x41, 'M', u'a'), (0x42, 'M', u'b'), (0x43, 'M', u'c'), (0x44, 'M', u'd'), (0x45, 'M', u'e'), (0x46, 'M', u'f'), (0x47, 'M', u'g'), (0x48, 'M', u'h'), (0x49, 'M', u'i'), (0x4A, 'M', u'j'), (0x4B, 'M', u'k'), (0x4C, 'M', u'l'), (0x4D, 'M', u'm'), (0x4E, 'M', u'n'), (0x4F, 'M', u'o'), (0x50, 'M', u'p'), (0x51, 'M', u'q'), (0x52, 'M', u'r'), (0x53, 'M', u's'), (0x54, 'M', u't'), (0x55, 'M', u'u'), (0x56, 'M', u'v'), (0x57, 'M', u'w'), (0x58, 'M', u'x'), (0x59, 'M', u'y'), (0x5A, 'M', u'z'), (0x5B, '3'), (0x5C, '3'), (0x5D, '3'), (0x5E, '3'), (0x5F, '3'), (0x60, '3'), (0x61, 'V'), (0x62, 'V'), (0x63, 'V'), ] def _seg_1(): return [ (0x64, 'V'), (0x65, 'V'), (0x66, 'V'), (0x67, 'V'), (0x68, 'V'), (0x69, 'V'), (0x6A, 'V'), (0x6B, 'V'), (0x6C, 'V'), (0x6D, 'V'), (0x6E, 'V'), (0x6F, 'V'), (0x70, 'V'), (0x71, 'V'), (0x72, 'V'), (0x73, 'V'), (0x74, 'V'), (0x75, 'V'), (0x76, 'V'), (0x77, 'V'), (0x78, 'V'), (0x79, 'V'), (0x7A, 'V'), (0x7B, '3'), (0x7C, '3'), (0x7D, '3'), (0x7E, '3'), (0x7F, '3'), (0x80, 'X'), (0x81, 'X'), (0x82, 'X'), (0x83, 'X'), (0x84, 'X'), (0x85, 'X'), (0x86, 'X'), (0x87, 'X'), (0x88, 'X'), (0x89, 'X'), (0x8A, 'X'), (0x8B, 'X'), (0x8C, 'X'), (0x8D, 'X'), (0x8E, 'X'), (0x8F, 'X'), (0x90, 'X'), (0x91, 'X'), (0x92, 'X'), (0x93, 'X'), (0x94, 'X'), (0x95, 'X'), (0x96, 'X'), (0x97, 'X'), (0x98, 'X'), (0x99, 'X'), (0x9A, 'X'), (0x9B, 'X'), (0x9C, 'X'), (0x9D, 'X'), (0x9E, 'X'), (0x9F, 'X'), (0xA0, '3', u' '), (0xA1, 'V'), (0xA2, 'V'), (0xA3, 'V'), (0xA4, 'V'), (0xA5, 'V'), (0xA6, 'V'), (0xA7, 'V'), (0xA8, '3', u' ̈'), (0xA9, 'V'), (0xAA, 'M', u'a'), (0xAB, 'V'), (0xAC, 'V'), (0xAD, 'I'), (0xAE, 'V'), (0xAF, '3', u' ̄'), (0xB0, 'V'), (0xB1, 'V'), (0xB2, 'M', u'2'), (0xB3, 'M', u'3'), (0xB4, '3', u' ́'), (0xB5, 'M', u'μ'), (0xB6, 'V'), (0xB7, 'V'), (0xB8, '3', u' ̧'), (0xB9, 'M', u'1'), (0xBA, 'M', u'o'), (0xBB, 'V'), (0xBC, 'M', u'1⁄4'), (0xBD, 'M', u'1⁄2'), (0xBE, 'M', u'3⁄4'), (0xBF, 'V'), (0xC0, 'M', u'à'), (0xC1, 'M', u'á'), (0xC2, 'M', u'â'), (0xC3, 'M', u'ã'), (0xC4, 'M', u'ä'), (0xC5, 'M', u'å'), (0xC6, 'M', u'æ'), (0xC7, 'M', u'ç'), ] def _seg_2(): return [ (0xC8, 'M', u'è'), (0xC9, 'M', u'é'), (0xCA, 'M', u'ê'), (0xCB, 'M', u'ë'), (0xCC, 'M', u'ì'), (0xCD, 'M', u'í'), (0xCE, 'M', u'î'), (0xCF, 'M', u'ï'), (0xD0, 'M', u'ð'), (0xD1, 'M', u'ñ'), (0xD2, 'M', u'ò'), (0xD3, 'M', u'ó'), (0xD4, 'M', u'ô'), (0xD5, 'M', u'õ'), (0xD6, 'M', u'ö'), (0xD7, 'V'), (0xD8, 'M', u'ø'), (0xD9, 'M', u'ù'), (0xDA, 'M', u'ú'), (0xDB, 'M', u'û'), (0xDC, 'M', u'ü'), (0xDD, 'M', u'ý'), (0xDE, 'M', u'þ'), (0xDF, 'D', u'ss'), (0xE0, 'V'), (0xE1, 'V'), (0xE2, 'V'), (0xE3, 'V'), (0xE4, 'V'), (0xE5, 'V'), (0xE6, 'V'), (0xE7, 'V'), (0xE8, 'V'), (0xE9, 'V'), (0xEA, 'V'), (0xEB, 'V'), (0xEC, 'V'), (0xED, 'V'), (0xEE, 'V'), (0xEF, 'V'), (0xF0, 'V'), (0xF1, 'V'), (0xF2, 'V'), (0xF3, 'V'), (0xF4, 'V'), (0xF5, 'V'), (0xF6, 'V'), (0xF7, 'V'), (0xF8, 'V'), (0xF9, 'V'), (0xFA, 'V'), (0xFB, 'V'), (0xFC, 'V'), (0xFD, 'V'), (0xFE, 'V'), (0xFF, 'V'), (0x100, 'M', u'ā'), (0x101, 'V'), (0x102, 'M', u'ă'), (0x103, 'V'), (0x104, 'M', u'ą'), (0x105, 'V'), (0x106, 'M', u'ć'), (0x107, 'V'), (0x108, 'M', u'ĉ'), (0x109, 'V'), (0x10A, 'M', u'ċ'), (0x10B, 'V'), (0x10C, 'M', u'č'), (0x10D, 'V'), (0x10E, 'M', u'ď'), (0x10F, 'V'), (0x110, 'M', u'đ'), (0x111, 'V'), (0x112, 'M', u'ē'), (0x113, 'V'), (0x114, 'M', u'ĕ'), (0x115, 'V'), (0x116, 'M', u'ė'), (0x117, 'V'), (0x118, 'M', u'ę'), (0x119, 'V'), (0x11A, 'M', u'ě'), (0x11B, 'V'), (0x11C, 'M', u'ĝ'), (0x11D, 'V'), (0x11E, 'M', u'ğ'), (0x11F, 'V'), (0x120, 'M', u'ġ'), (0x121, 'V'), (0x122, 'M', u'ģ'), (0x123, 'V'), (0x124, 'M', u'ĥ'), (0x125, 'V'), (0x126, 'M', u'ħ'), (0x127, 'V'), (0x128, 'M', u'ĩ'), (0x129, 'V'), (0x12A, 'M', u'ī'), (0x12B, 'V'), ] def _seg_3(): return [ (0x12C, 'M', u'ĭ'), (0x12D, 'V'), (0x12E, 'M', u'į'), (0x12F, 'V'), (0x130, 'M', u'i̇'), (0x131, 'V'), (0x132, 'M', u'ij'), (0x134, 'M', u'ĵ'), (0x135, 'V'), (0x136, 'M', u'ķ'), (0x137, 'V'), (0x139, 'M', u'ĺ'), (0x13A, 'V'), (0x13B, 'M', u'ļ'), (0x13C, 'V'), (0x13D, 'M', u'ľ'), (0x13E, 'V'), (0x13F, 'M', u'l·'), (0x141, 'M', u'ł'), (0x142, 'V'), (0x143, 'M', u'ń'), (0x144, 'V'), (0x145, 'M', u'ņ'), (0x146, 'V'), (0x147, 'M', u'ň'), (0x148, 'V'), (0x149, 'M', u'ʼn'), (0x14A, 'M', u'ŋ'), (0x14B, 'V'), (0x14C, 'M', u'ō'), (0x14D, 'V'), (0x14E, 'M', u'ŏ'), (0x14F, 'V'), (0x150, 'M', u'ő'), (0x151, 'V'), (0x152, 'M', u'œ'), (0x153, 'V'), (0x154, 'M', u'ŕ'), (0x155, 'V'), (0x156, 'M', u'ŗ'), (0x157, 'V'), (0x158, 'M', u'ř'), (0x159, 'V'), (0x15A, 'M', u'ś'), (0x15B, 'V'), (0x15C, 'M', u'ŝ'), (0x15D, 'V'), (0x15E, 'M', u'ş'), (0x15F, 'V'), (0x160, 'M', u'š'), (0x161, 'V'), (0x162, 'M', u'ţ'), (0x163, 'V'), (0x164, 'M', u'ť'), (0x165, 'V'), (0x166, 'M', u'ŧ'), (0x167, 'V'), (0x168, 'M', u'ũ'), (0x169, 'V'), (0x16A, 'M', u'ū'), (0x16B, 'V'), (0x16C, 'M', u'ŭ'), (0x16D, 'V'), (0x16E, 'M', u'ů'), (0x16F, 'V'), (0x170, 'M', u'ű'), (0x171, 'V'), (0x172, 'M', u'ų'), (0x173, 'V'), (0x174, 'M', u'ŵ'), (0x175, 'V'), (0x176, 'M', u'ŷ'), (0x177, 'V'), (0x178, 'M', u'ÿ'), (0x179, 'M', u'ź'), (0x17A, 'V'), (0x17B, 'M', u'ż'), (0x17C, 'V'), (0x17D, 'M', u'ž'), (0x17E, 'V'), (0x17F, 'M', u's'), (0x180, 'V'), (0x181, 'M', u'ɓ'), (0x182, 'M', u'ƃ'), (0x183, 'V'), (0x184, 'M', u'ƅ'), (0x185, 'V'), (0x186, 'M', u'ɔ'), (0x187, 'M', u'ƈ'), (0x188, 'V'), (0x189, 'M', u'ɖ'), (0x18A, 'M', u'ɗ'), (0x18B, 'M', u'ƌ'), (0x18C, 'V'), (0x18E, 'M', u'ǝ'), (0x18F, 'M', u'ə'), (0x190, 'M', u'ɛ'), (0x191, 'M', u'ƒ'), (0x192, 'V'), (0x193, 'M', u'ɠ'), ] def _seg_4(): return [ (0x194, 'M', u'ɣ'), (0x195, 'V'), (0x196, 'M', u'ɩ'), (0x197, 'M', u'ɨ'), (0x198, 'M', u'ƙ'), (0x199, 'V'), (0x19C, 'M', u'ɯ'), (0x19D, 'M', u'ɲ'), (0x19E, 'V'), (0x19F, 'M', u'ɵ'), (0x1A0, 'M', u'ơ'), (0x1A1, 'V'), (0x1A2, 'M', u'ƣ'), (0x1A3, 'V'), (0x1A4, 'M', u'ƥ'), (0x1A5, 'V'), (0x1A6, 'M', u'ʀ'), (0x1A7, 'M', u'ƨ'), (0x1A8, 'V'), (0x1A9, 'M', u'ʃ'), (0x1AA, 'V'), (0x1AC, 'M', u'ƭ'), (0x1AD, 'V'), (0x1AE, 'M', u'ʈ'), (0x1AF, 'M', u'ư'), (0x1B0, 'V'), (0x1B1, 'M', u'ʊ'), (0x1B2, 'M', u'ʋ'), (0x1B3, 'M', u'ƴ'), (0x1B4, 'V'), (0x1B5, 'M', u'ƶ'), (0x1B6, 'V'), (0x1B7, 'M', u'ʒ'), (0x1B8, 'M', u'ƹ'), (0x1B9, 'V'), (0x1BC, 'M', u'ƽ'), (0x1BD, 'V'), (0x1C4, 'M', u'dž'), (0x1C7, 'M', u'lj'), (0x1CA, 'M', u'nj'), (0x1CD, 'M', u'ǎ'), (0x1CE, 'V'), (0x1CF, 'M', u'ǐ'), (0x1D0, 'V'), (0x1D1, 'M', u'ǒ'), (0x1D2, 'V'), (0x1D3, 'M', u'ǔ'), (0x1D4, 'V'), (0x1D5, 'M', u'ǖ'), (0x1D6, 'V'), (0x1D7, 'M', u'ǘ'), (0x1D8, 'V'), (0x1D9, 'M', u'ǚ'), (0x1DA, 'V'), (0x1DB, 'M', u'ǜ'), (0x1DC, 'V'), (0x1DE, 'M', u'ǟ'), (0x1DF, 'V'), (0x1E0, 'M', u'ǡ'), (0x1E1, 'V'), (0x1E2, 'M', u'ǣ'), (0x1E3, 'V'), (0x1E4, 'M', u'ǥ'), (0x1E5, 'V'), (0x1E6, 'M', u'ǧ'), (0x1E7, 'V'), (0x1E8, 'M', u'ǩ'), (0x1E9, 'V'), (0x1EA, 'M', u'ǫ'), (0x1EB, 'V'), (0x1EC, 'M', u'ǭ'), (0x1ED, 'V'), (0x1EE, 'M', u'ǯ'), (0x1EF, 'V'), (0x1F1, 'M', u'dz'), (0x1F4, 'M', u'ǵ'), (0x1F5, 'V'), (0x1F6, 'M', u'ƕ'), (0x1F7, 'M', u'ƿ'), (0x1F8, 'M', u'ǹ'), (0x1F9, 'V'), (0x1FA, 'M', u'ǻ'), (0x1FB, 'V'), (0x1FC, 'M', u'ǽ'), (0x1FD, 'V'), (0x1FE, 'M', u'ǿ'), (0x1FF, 'V'), (0x200, 'M', u'ȁ'), (0x201, 'V'), (0x202, 'M', u'ȃ'), (0x203, 'V'), (0x204, 'M', u'ȅ'), (0x205, 'V'), (0x206, 'M', u'ȇ'), (0x207, 'V'), (0x208, 'M', u'ȉ'), (0x209, 'V'), (0x20A, 'M', u'ȋ'), (0x20B, 'V'), (0x20C, 'M', u'ȍ'), ] def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', u'ȏ'), (0x20F, 'V'), (0x210, 'M', u'ȑ'), (0x211, 'V'), (0x212, 'M', u'ȓ'), (0x213, 'V'), (0x214, 'M', u'ȕ'), (0x215, 'V'), (0x216, 'M', u'ȗ'), (0x217, 'V'), (0x218, 'M', u'ș'), (0x219, 'V'), (0x21A, 'M', u'ț'), (0x21B, 'V'), (0x21C, 'M', u'ȝ'), (0x21D, 'V'), (0x21E, 'M', u'ȟ'), (0x21F, 'V'), (0x220, 'M', u'ƞ'), (0x221, 'V'), (0x222, 'M', u'ȣ'), (0x223, 'V'), (0x224, 'M', u'ȥ'), (0x225, 'V'), (0x226, 'M', u'ȧ'), (0x227, 'V'), (0x228, 'M', u'ȩ'), (0x229, 'V'), (0x22A, 'M', u'ȫ'), (0x22B, 'V'), (0x22C, 'M', u'ȭ'), (0x22D, 'V'), (0x22E, 'M', u'ȯ'), (0x22F, 'V'), (0x230, 'M', u'ȱ'), (0x231, 'V'), (0x232, 'M', u'ȳ'), (0x233, 'V'), (0x23A, 'M', u'ⱥ'), (0x23B, 'M', u'ȼ'), (0x23C, 'V'), (0x23D, 'M', u'ƚ'), (0x23E, 'M', u'ⱦ'), (0x23F, 'V'), (0x241, 'M', u'ɂ'), (0x242, 'V'), (0x243, 'M', u'ƀ'), (0x244, 'M', u'ʉ'), (0x245, 'M', u'ʌ'), (0x246, 'M', u'ɇ'), (0x247, 'V'), (0x248, 'M', u'ɉ'), (0x249, 'V'), (0x24A, 'M', u'ɋ'), (0x24B, 'V'), (0x24C, 'M', u'ɍ'), (0x24D, 'V'), (0x24E, 'M', u'ɏ'), (0x24F, 'V'), (0x2B0, 'M', u'h'), (0x2B1, 'M', u'ɦ'), (0x2B2, 'M', u'j'), (0x2B3, 'M', u'r'), (0x2B4, 'M', u'ɹ'), (0x2B5, 'M', u'ɻ'), (0x2B6, 'M', u'ʁ'), (0x2B7, 'M', u'w'), (0x2B8, 'M', u'y'), (0x2B9, 'V'), (0x2D8, '3', u' ̆'), (0x2D9, '3', u' ̇'), (0x2DA, '3', u' ̊'), (0x2DB, '3', u' ̨'), (0x2DC, '3', u' ̃'), (0x2DD, '3', u' ̋'), (0x2DE, 'V'), (0x2E0, 'M', u'ɣ'), (0x2E1, 'M', u'l'), (0x2E2, 'M', u's'), (0x2E3, 'M', u'x'), (0x2E4, 'M', u'ʕ'), (0x2E5, 'V'), (0x340, 'M', u'̀'), (0x341, 'M', u'́'), (0x342, 'V'), (0x343, 'M', u'̓'), (0x344, 'M', u'̈́'), (0x345, 'M', u'ι'), (0x346, 'V'), (0x34F, 'I'), (0x350, 'V'), (0x370, 'M', u'ͱ'), (0x371, 'V'), (0x372, 'M', u'ͳ'), (0x373, 'V'), (0x374, 'M', u'ʹ'), (0x375, 'V'), (0x376, 'M', u'ͷ'), (0x377, 'V'), ] def _seg_6(): return [ (0x378, 'X'), (0x37A, '3', u' ι'), (0x37B, 'V'), (0x37E, '3', u';'), (0x37F, 'X'), (0x384, '3', u' ́'), (0x385, '3', u' ̈́'), (0x386, 'M', u'ά'), (0x387, 'M', u'·'), (0x388, 'M', u'έ'), (0x389, 'M', u'ή'), (0x38A, 'M', u'ί'), (0x38B, 'X'), (0x38C, 'M', u'ό'), (0x38D, 'X'), (0x38E, 'M', u'ύ'), (0x38F, 'M', u'ώ'), (0x390, 'V'), (0x391, 'M', u'α'), (0x392, 'M', u'β'), (0x393, 'M', u'γ'), (0x394, 'M', u'δ'), (0x395, 'M', u'ε'), (0x396, 'M', u'ζ'), (0x397, 'M', u'η'), (0x398, 'M', u'θ'), (0x399, 'M', u'ι'), (0x39A, 'M', u'κ'), (0x39B, 'M', u'λ'), (0x39C, 'M', u'μ'), (0x39D, 'M', u'ν'), (0x39E, 'M', u'ξ'), (0x39F, 'M', u'ο'), (0x3A0, 'M', u'π'), (0x3A1, 'M', u'ρ'), (0x3A2, 'X'), (0x3A3, 'M', u'σ'), (0x3A4, 'M', u'τ'), (0x3A5, 'M', u'υ'), (0x3A6, 'M', u'φ'), (0x3A7, 'M', u'χ'), (0x3A8, 'M', u'ψ'), (0x3A9, 'M', u'ω'), (0x3AA, 'M', u'ϊ'), (0x3AB, 'M', u'ϋ'), (0x3AC, 'V'), (0x3C2, 'D', u'σ'), (0x3C3, 'V'), (0x3CF, 'M', u'ϗ'), (0x3D0, 'M', u'β'), (0x3D1, 'M', u'θ'), (0x3D2, 'M', u'υ'), (0x3D3, 'M', u'ύ'), (0x3D4, 'M', u'ϋ'), (0x3D5, 'M', u'φ'), (0x3D6, 'M', u'π'), (0x3D7, 'V'), (0x3D8, 'M', u'ϙ'), (0x3D9, 'V'), (0x3DA, 'M', u'ϛ'), (0x3DB, 'V'), (0x3DC, 'M', u'ϝ'), (0x3DD, 'V'), (0x3DE, 'M', u'ϟ'), (0x3DF, 'V'), (0x3E0, 'M', u'ϡ'), (0x3E1, 'V'), (0x3E2, 'M', u'ϣ'), (0x3E3, 'V'), (0x3E4, 'M', u'ϥ'), (0x3E5, 'V'), (0x3E6, 'M', u'ϧ'), (0x3E7, 'V'), (0x3E8, 'M', u'ϩ'), (0x3E9, 'V'), (0x3EA, 'M', u'ϫ'), (0x3EB, 'V'), (0x3EC, 'M', u'ϭ'), (0x3ED, 'V'), (0x3EE, 'M', u'ϯ'), (0x3EF, 'V'), (0x3F0, 'M', u'κ'), (0x3F1, 'M', u'ρ'), (0x3F2, 'M', u'σ'), (0x3F3, 'V'), (0x3F4, 'M', u'θ'), (0x3F5, 'M', u'ε'), (0x3F6, 'V'), (0x3F7, 'M', u'ϸ'), (0x3F8, 'V'), (0x3F9, 'M', u'σ'), (0x3FA, 'M', u'ϻ'), (0x3FB, 'V'), (0x3FD, 'M', u'ͻ'), (0x3FE, 'M', u'ͼ'), (0x3FF, 'M', u'ͽ'), (0x400, 'M', u'ѐ'), (0x401, 'M', u'ё'), (0x402, 'M', u'ђ'), (0x403, 'M', u'ѓ'), ] def _seg_7(): return [ (0x404, 'M', u'є'), (0x405, 'M', u'ѕ'), (0x406, 'M', u'і'), (0x407, 'M', u'ї'), (0x408, 'M', u'ј'), (0x409, 'M', u'љ'), (0x40A, 'M', u'њ'), (0x40B, 'M', u'ћ'), (0x40C, 'M', u'ќ'), (0x40D, 'M', u'ѝ'), (0x40E, 'M', u'ў'), (0x40F, 'M', u'џ'), (0x410, 'M', u'а'), (0x411, 'M', u'б'), (0x412, 'M', u'в'), (0x413, 'M', u'г'), (0x414, 'M', u'д'), (0x415, 'M', u'е'), (0x416, 'M', u'ж'), (0x417, 'M', u'з'), (0x418, 'M', u'и'), (0x419, 'M', u'й'), (0x41A, 'M', u'к'), (0x41B, 'M', u'л'), (0x41C, 'M', u'м'), (0x41D, 'M', u'н'), (0x41E, 'M', u'о'), (0x41F, 'M', u'п'), (0x420, 'M', u'р'), (0x421, 'M', u'с'), (0x422, 'M', u'т'), (0x423, 'M', u'у'), (0x424, 'M', u'ф'), (0x425, 'M', u'х'), (0x426, 'M', u'ц'), (0x427, 'M', u'ч'), (0x428, 'M', u'ш'), (0x429, 'M', u'щ'), (0x42A, 'M', u'ъ'), (0x42B, 'M', u'ы'), (0x42C, 'M', u'ь'), (0x42D, 'M', u'э'), (0x42E, 'M', u'ю'), (0x42F, 'M', u'я'), (0x430, 'V'), (0x460, 'M', u'ѡ'), (0x461, 'V'), (0x462, 'M', u'ѣ'), (0x463, 'V'), (0x464, 'M', u'ѥ'), (0x465, 'V'), (0x466, 'M', u'ѧ'), (0x467, 'V'), (0x468, 'M', u'ѩ'), (0x469, 'V'), (0x46A, 'M', u'ѫ'), (0x46B, 'V'), (0x46C, 'M', u'ѭ'), (0x46D, 'V'), (0x46E, 'M', u'ѯ'), (0x46F, 'V'), (0x470, 'M', u'ѱ'), (0x471, 'V'), (0x472, 'M', u'ѳ'), (0x473, 'V'), (0x474, 'M', u'ѵ'), (0x475, 'V'), (0x476, 'M', u'ѷ'), (0x477, 'V'), (0x478, 'M', u'ѹ'), (0x479, 'V'), (0x47A, 'M', u'ѻ'), (0x47B, 'V'), (0x47C, 'M', u'ѽ'), (0x47D, 'V'), (0x47E, 'M', u'ѿ'), (0x47F, 'V'), (0x480, 'M', u'ҁ'), (0x481, 'V'), (0x48A, 'M', u'ҋ'), (0x48B, 'V'), (0x48C, 'M', u'ҍ'), (0x48D, 'V'), (0x48E, 'M', u'ҏ'), (0x48F, 'V'), (0x490, 'M', u'ґ'), (0x491, 'V'), (0x492, 'M', u'ғ'), (0x493, 'V'), (0x494, 'M', u'ҕ'), (0x495, 'V'), (0x496, 'M', u'җ'), (0x497, 'V'), (0x498, 'M', u'ҙ'), (0x499, 'V'), (0x49A, 'M', u'қ'), (0x49B, 'V'), (0x49C, 'M', u'ҝ'), (0x49D, 'V'), (0x49E, 'M', u'ҟ'), ] def _seg_8(): return [ (0x49F, 'V'), (0x4A0, 'M', u'ҡ'), (0x4A1, 'V'), (0x4A2, 'M', u'ң'), (0x4A3, 'V'), (0x4A4, 'M', u'ҥ'), (0x4A5, 'V'), (0x4A6, 'M', u'ҧ'), (0x4A7, 'V'), (0x4A8, 'M', u'ҩ'), (0x4A9, 'V'), (0x4AA, 'M', u'ҫ'), (0x4AB, 'V'), (0x4AC, 'M', u'ҭ'), (0x4AD, 'V'), (0x4AE, 'M', u'ү'), (0x4AF, 'V'), (0x4B0, 'M', u'ұ'), (0x4B1, 'V'), (0x4B2, 'M', u'ҳ'), (0x4B3, 'V'), (0x4B4, 'M', u'ҵ'), (0x4B5, 'V'), (0x4B6, 'M', u'ҷ'), (0x4B7, 'V'), (0x4B8, 'M', u'ҹ'), (0x4B9, 'V'), (0x4BA, 'M', u'һ'), (0x4BB, 'V'), (0x4BC, 'M', u'ҽ'), (0x4BD, 'V'), (0x4BE, 'M', u'ҿ'), (0x4BF, 'V'), (0x4C0, 'X'), (0x4C1, 'M', u'ӂ'), (0x4C2, 'V'), (0x4C3, 'M', u'ӄ'), (0x4C4, 'V'), (0x4C5, 'M', u'ӆ'), (0x4C6, 'V'), (0x4C7, 'M', u'ӈ'), (0x4C8, 'V'), (0x4C9, 'M', u'ӊ'), (0x4CA, 'V'), (0x4CB, 'M', u'ӌ'), (0x4CC, 'V'), (0x4CD, 'M', u'ӎ'), (0x4CE, 'V'), (0x4D0, 'M', u'ӑ'), (0x4D1, 'V'), (0x4D2, 'M', u'ӓ'), (0x4D3, 'V'), (0x4D4, 'M', u'ӕ'), (0x4D5, 'V'), (0x4D6, 'M', u'ӗ'), (0x4D7, 'V'), (0x4D8, 'M', u'ә'), (0x4D9, 'V'), (0x4DA, 'M', u'ӛ'), (0x4DB, 'V'), (0x4DC, 'M', u'ӝ'), (0x4DD, 'V'), (0x4DE, 'M', u'ӟ'), (0x4DF, 'V'), (0x4E0, 'M', u'ӡ'), (0x4E1, 'V'), (0x4E2, 'M', u'ӣ'), (0x4E3, 'V'), (0x4E4, 'M', u'ӥ'), (0x4E5, 'V'), (0x4E6, 'M', u'ӧ'), (0x4E7, 'V'), (0x4E8, 'M', u'ө'), (0x4E9, 'V'), (0x4EA, 'M', u'ӫ'), (0x4EB, 'V'), (0x4EC, 'M', u'ӭ'), (0x4ED, 'V'), (0x4EE, 'M', u'ӯ'), (0x4EF, 'V'), (0x4F0, 'M', u'ӱ'), (0x4F1, 'V'), (0x4F2, 'M', u'ӳ'), (0x4F3, 'V'), (0x4F4, 'M', u'ӵ'), (0x4F5, 'V'), (0x4F6, 'M', u'ӷ'), (0x4F7, 'V'), (0x4F8, 'M', u'ӹ'), (0x4F9, 'V'), (0x4FA, 'M', u'ӻ'), (0x4FB, 'V'), (0x4FC, 'M', u'ӽ'), (0x4FD, 'V'), (0x4FE, 'M', u'ӿ'), (0x4FF, 'V'), (0x500, 'M', u'ԁ'), (0x501, 'V'), (0x502, 'M', u'ԃ'), (0x503, 'V'), ] def _seg_9(): return [ (0x504, 'M', u'ԅ'), (0x505, 'V'), (0x506, 'M', u'ԇ'), (0x507, 'V'), (0x508, 'M', u'ԉ'), (0x509, 'V'), (0x50A, 'M', u'ԋ'), (0x50B, 'V'), (0x50C, 'M', u'ԍ'), (0x50D, 'V'), (0x50E, 'M', u'ԏ'), (0x50F, 'V'), (0x510, 'M', u'ԑ'), (0x511, 'V'), (0x512, 'M', u'ԓ'), (0x513, 'V'), (0x514, 'M', u'ԕ'), (0x515, 'V'), (0x516, 'M', u'ԗ'), (0x517, 'V'), (0x518, 'M', u'ԙ'), (0x519, 'V'), (0x51A, 'M', u'ԛ'), (0x51B, 'V'), (0x51C, 'M', u'ԝ'), (0x51D, 'V'), (0x51E, 'M', u'ԟ'), (0x51F, 'V'), (0x520, 'M', u'ԡ'), (0x521, 'V'), (0x522, 'M', u'ԣ'), (0x523, 'V'), (0x524, 'M', u'ԥ'), (0x525, 'V'), (0x526, 'M', u'ԧ'), (0x527, 'V'), (0x528, 'X'), (0x531, 'M', u'ա'), (0x532, 'M', u'բ'), (0x533, 'M', u'գ'), (0x534, 'M', u'դ'), (0x535, 'M', u'ե'), (0x536, 'M', u'զ'), (0x537, 'M', u'է'), (0x538, 'M', u'ը'), (0x539, 'M', u'թ'), (0x53A, 'M', u'ժ'), (0x53B, 'M', u'ի'), (0x53C, 'M', u'լ'), (0x53D, 'M', u'խ'), (0x53E, 'M', u'ծ'), (0x53F, 'M', u'կ'), (0x540, 'M', u'հ'), (0x541, 'M', u'ձ'), (0x542, 'M', u'ղ'), (0x543, 'M', u'ճ'), (0x544, 'M', u'մ'), (0x545, 'M', u'յ'), (0x546, 'M', u'ն'), (0x547, 'M', u'շ'), (0x548, 'M', u'ո'), (0x549, 'M', u'չ'), (0x54A, 'M', u'պ'), (0x54B, 'M', u'ջ'), (0x54C, 'M', u'ռ'), (0x54D, 'M', u'ս'), (0x54E, 'M', u'վ'), (0x54F, 'M', u'տ'), (0x550, 'M', u'ր'), (0x551, 'M', u'ց'), (0x552, 'M', u'ւ'), (0x553, 'M', u'փ'), (0x554, 'M', u'ք'), (0x555, 'M', u'օ'), (0x556, 'M', u'ֆ'), (0x557, 'X'), (0x559, 'V'), (0x560, 'X'), (0x561, 'V'), (0x587, 'M', u'եւ'), (0x588, 'X'), (0x589, 'V'), (0x58B, 'X'), (0x58F, 'V'), (0x590, 'X'), (0x591, 'V'), (0x5C8, 'X'), (0x5D0, 'V'), (0x5EB, 'X'), (0x5F0, 'V'), (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), (0x61E, 'V'), (0x675, 'M', u'اٴ'), (0x676, 'M', u'وٴ'), (0x677, 'M', u'ۇٴ'), (0x678, 'M', u'يٴ'), (0x679, 'V'), (0x6DD, 'X'), ] def _seg_10(): return [ (0x6DE, 'V'), (0x70E, 'X'), (0x710, 'V'), (0x74B, 'X'), (0x74D, 'V'), (0x7B2, 'X'), (0x7C0, 'V'), (0x7FB, 'X'), (0x800, 'V'), (0x82E, 'X'), (0x830, 'V'), (0x83F, 'X'), (0x840, 'V'), (0x85C, 'X'), (0x85E, 'V'), (0x85F, 'X'), (0x8A0, 'V'), (0x8A1, 'X'), (0x8A2, 'V'), (0x8AD, 'X'), (0x8E4, 'V'), (0x8FF, 'X'), (0x900, 'V'), (0x958, 'M', u'क़'), (0x959, 'M', u'ख़'), (0x95A, 'M', u'ग़'), (0x95B, 'M', u'ज़'), (0x95C, 'M', u'ड़'), (0x95D, 'M', u'ढ़'), (0x95E, 'M', u'फ़'), (0x95F, 'M', u'य़'), (0x960, 'V'), (0x978, 'X'), (0x979, 'V'), (0x980, 'X'), (0x981, 'V'), (0x984, 'X'), (0x985, 'V'), (0x98D, 'X'), (0x98F, 'V'), (0x991, 'X'), (0x993, 'V'), (0x9A9, 'X'), (0x9AA, 'V'), (0x9B1, 'X'), (0x9B2, 'V'), (0x9B3, 'X'), (0x9B6, 'V'), (0x9BA, 'X'), (0x9BC, 'V'), (0x9C5, 'X'), (0x9C7, 'V'), (0x9C9, 'X'), (0x9CB, 'V'), (0x9CF, 'X'), (0x9D7, 'V'), (0x9D8, 'X'), (0x9DC, 'M', u'ড়'), (0x9DD, 'M', u'ঢ়'), (0x9DE, 'X'), (0x9DF, 'M', u'য়'), (0x9E0, 'V'), (0x9E4, 'X'), (0x9E6, 'V'), (0x9FC, 'X'), (0xA01, 'V'), (0xA04, 'X'), (0xA05, 'V'), (0xA0B, 'X'), (0xA0F, 'V'), (0xA11, 'X'), (0xA13, 'V'), (0xA29, 'X'), (0xA2A, 'V'), (0xA31, 'X'), (0xA32, 'V'), (0xA33, 'M', u'ਲ਼'), (0xA34, 'X'), (0xA35, 'V'), (0xA36, 'M', u'ਸ਼'), (0xA37, 'X'), (0xA38, 'V'), (0xA3A, 'X'), (0xA3C, 'V'), (0xA3D, 'X'), (0xA3E, 'V'), (0xA43, 'X'), (0xA47, 'V'), (0xA49, 'X'), (0xA4B, 'V'), (0xA4E, 'X'), (0xA51, 'V'), (0xA52, 'X'), (0xA59, 'M', u'ਖ਼'), (0xA5A, 'M', u'ਗ਼'), (0xA5B, 'M', u'ਜ਼'), (0xA5C, 'V'), (0xA5D, 'X'), (0xA5E, 'M', u'ਫ਼'), (0xA5F, 'X'), ] def _seg_11(): return [ (0xA66, 'V'), (0xA76, 'X'), (0xA81, 'V'), (0xA84, 'X'), (0xA85, 'V'), (0xA8E, 'X'), (0xA8F, 'V'), (0xA92, 'X'), (0xA93, 'V'), (0xAA9, 'X'), (0xAAA, 'V'), (0xAB1, 'X'), (0xAB2, 'V'), (0xAB4, 'X'), (0xAB5, 'V'), (0xABA, 'X'), (0xABC, 'V'), (0xAC6, 'X'), (0xAC7, 'V'), (0xACA, 'X'), (0xACB, 'V'), (0xACE, 'X'), (0xAD0, 'V'), (0xAD1, 'X'), (0xAE0, 'V'), (0xAE4, 'X'), (0xAE6, 'V'), (0xAF2, 'X'), (0xB01, 'V'), (0xB04, 'X'), (0xB05, 'V'), (0xB0D, 'X'), (0xB0F, 'V'), (0xB11, 'X'), (0xB13, 'V'), (0xB29, 'X'), (0xB2A, 'V'), (0xB31, 'X'), (0xB32, 'V'), (0xB34, 'X'), (0xB35, 'V'), (0xB3A, 'X'), (0xB3C, 'V'), (0xB45, 'X'), (0xB47, 'V'), (0xB49, 'X'), (0xB4B, 'V'), (0xB4E, 'X'), (0xB56, 'V'), (0xB58, 'X'), (0xB5C, 'M', u'ଡ଼'), (0xB5D, 'M', u'ଢ଼'), (0xB5E, 'X'), (0xB5F, 'V'), (0xB64, 'X'), (0xB66, 'V'), (0xB78, 'X'), (0xB82, 'V'), (0xB84, 'X'), (0xB85, 'V'), (0xB8B, 'X'), (0xB8E, 'V'), (0xB91, 'X'), (0xB92, 'V'), (0xB96, 'X'), (0xB99, 'V'), (0xB9B, 'X'), (0xB9C, 'V'), (0xB9D, 'X'), (0xB9E, 'V'), (0xBA0, 'X'), (0xBA3, 'V'), (0xBA5, 'X'), (0xBA8, 'V'), (0xBAB, 'X'), (0xBAE, 'V'), (0xBBA, 'X'), (0xBBE, 'V'), (0xBC3, 'X'), (0xBC6, 'V'), (0xBC9, 'X'), (0xBCA, 'V'), (0xBCE, 'X'), (0xBD0, 'V'), (0xBD1, 'X'), (0xBD7, 'V'), (0xBD8, 'X'), (0xBE6, 'V'), (0xBFB, 'X'), (0xC01, 'V'), (0xC04, 'X'), (0xC05, 'V'), (0xC0D, 'X'), (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), (0xC29, 'X'), (0xC2A, 'V'), (0xC34, 'X'), (0xC35, 'V'), ] def _seg_12(): return [ (0xC3A, 'X'), (0xC3D, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), (0xC4A, 'V'), (0xC4E, 'X'), (0xC55, 'V'), (0xC57, 'X'), (0xC58, 'V'), (0xC5A, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), (0xC70, 'X'), (0xC78, 'V'), (0xC80, 'X'), (0xC82, 'V'), (0xC84, 'X'), (0xC85, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), (0xC92, 'V'), (0xCA9, 'X'), (0xCAA, 'V'), (0xCB4, 'X'), (0xCB5, 'V'), (0xCBA, 'X'), (0xCBC, 'V'), (0xCC5, 'X'), (0xCC6, 'V'), (0xCC9, 'X'), (0xCCA, 'V'), (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), (0xCDE, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), (0xCF3, 'X'), (0xD02, 'V'), (0xD04, 'X'), (0xD05, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), (0xD11, 'X'), (0xD12, 'V'), (0xD3B, 'X'), (0xD3D, 'V'), (0xD45, 'X'), (0xD46, 'V'), (0xD49, 'X'), (0xD4A, 'V'), (0xD4F, 'X'), (0xD57, 'V'), (0xD58, 'X'), (0xD60, 'V'), (0xD64, 'X'), (0xD66, 'V'), (0xD76, 'X'), (0xD79, 'V'), (0xD80, 'X'), (0xD82, 'V'), (0xD84, 'X'), (0xD85, 'V'), (0xD97, 'X'), (0xD9A, 'V'), (0xDB2, 'X'), (0xDB3, 'V'), (0xDBC, 'X'), (0xDBD, 'V'), (0xDBE, 'X'), (0xDC0, 'V'), (0xDC7, 'X'), (0xDCA, 'V'), (0xDCB, 'X'), (0xDCF, 'V'), (0xDD5, 'X'), (0xDD6, 'V'), (0xDD7, 'X'), (0xDD8, 'V'), (0xDE0, 'X'), (0xDF2, 'V'), (0xDF5, 'X'), (0xE01, 'V'), (0xE33, 'M', u'ํา'), (0xE34, 'V'), (0xE3B, 'X'), (0xE3F, 'V'), (0xE5C, 'X'), (0xE81, 'V'), (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), (0xE87, 'V'), ] def _seg_13(): return [ (0xE89, 'X'), (0xE8A, 'V'), (0xE8B, 'X'), (0xE8D, 'V'), (0xE8E, 'X'), (0xE94, 'V'), (0xE98, 'X'), (0xE99, 'V'), (0xEA0, 'X'), (0xEA1, 'V'), (0xEA4, 'X'), (0xEA5, 'V'), (0xEA6, 'X'), (0xEA7, 'V'), (0xEA8, 'X'), (0xEAA, 'V'), (0xEAC, 'X'), (0xEAD, 'V'), (0xEB3, 'M', u'ໍາ'), (0xEB4, 'V'), (0xEBA, 'X'), (0xEBB, 'V'), (0xEBE, 'X'), (0xEC0, 'V'), (0xEC5, 'X'), (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), (0xECE, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', u'ຫນ'), (0xEDD, 'M', u'ຫມ'), (0xEDE, 'V'), (0xEE0, 'X'), (0xF00, 'V'), (0xF0C, 'M', u'་'), (0xF0D, 'V'), (0xF43, 'M', u'གྷ'), (0xF44, 'V'), (0xF48, 'X'), (0xF49, 'V'), (0xF4D, 'M', u'ཌྷ'), (0xF4E, 'V'), (0xF52, 'M', u'དྷ'), (0xF53, 'V'), (0xF57, 'M', u'བྷ'), (0xF58, 'V'), (0xF5C, 'M', u'ཛྷ'), (0xF5D, 'V'), (0xF69, 'M', u'ཀྵ'), (0xF6A, 'V'), (0xF6D, 'X'), (0xF71, 'V'), (0xF73, 'M', u'ཱི'), (0xF74, 'V'), (0xF75, 'M', u'ཱུ'), (0xF76, 'M', u'ྲྀ'), (0xF77, 'M', u'ྲཱྀ'), (0xF78, 'M', u'ླྀ'), (0xF79, 'M', u'ླཱྀ'), (0xF7A, 'V'), (0xF81, 'M', u'ཱྀ'), (0xF82, 'V'), (0xF93, 'M', u'ྒྷ'), (0xF94, 'V'), (0xF98, 'X'), (0xF99, 'V'), (0xF9D, 'M', u'ྜྷ'), (0xF9E, 'V'), (0xFA2, 'M', u'ྡྷ'), (0xFA3, 'V'), (0xFA7, 'M', u'ྦྷ'), (0xFA8, 'V'), (0xFAC, 'M', u'ྫྷ'), (0xFAD, 'V'), (0xFB9, 'M', u'ྐྵ'), (0xFBA, 'V'), (0xFBD, 'X'), (0xFBE, 'V'), (0xFCD, 'X'), (0xFCE, 'V'), (0xFDB, 'X'), (0x1000, 'V'), (0x10A0, 'X'), (0x10C7, 'M', u'ⴧ'), (0x10C8, 'X'), (0x10CD, 'M', u'ⴭ'), (0x10CE, 'X'), (0x10D0, 'V'), (0x10FC, 'M', u'ნ'), (0x10FD, 'V'), (0x115F, 'X'), (0x1161, 'V'), (0x1249, 'X'), (0x124A, 'V'), (0x124E, 'X'), (0x1250, 'V'), (0x1257, 'X'), (0x1258, 'V'), ] def _seg_14(): return [ (0x1259, 'X'), (0x125A, 'V'), (0x125E, 'X'), (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), (0x12B2, 'V'), (0x12B6, 'X'), (0x12B8, 'V'), (0x12BF, 'X'), (0x12C0, 'V'), (0x12C1, 'X'), (0x12C2, 'V'), (0x12C6, 'X'), (0x12C8, 'V'), (0x12D7, 'X'), (0x12D8, 'V'), (0x1311, 'X'), (0x1312, 'V'), (0x1316, 'X'), (0x1318, 'V'), (0x135B, 'X'), (0x135D, 'V'), (0x137D, 'X'), (0x1380, 'V'), (0x139A, 'X'), (0x13A0, 'V'), (0x13F5, 'X'), (0x1400, 'V'), (0x1680, 'X'), (0x1681, 'V'), (0x169D, 'X'), (0x16A0, 'V'), (0x16F1, 'X'), (0x1700, 'V'), (0x170D, 'X'), (0x170E, 'V'), (0x1715, 'X'), (0x1720, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), (0x1760, 'V'), (0x176D, 'X'), (0x176E, 'V'), (0x1771, 'X'), (0x1772, 'V'), (0x1774, 'X'), (0x1780, 'V'), (0x17B4, 'X'), (0x17B6, 'V'), (0x17DE, 'X'), (0x17E0, 'V'), (0x17EA, 'X'), (0x17F0, 'V'), (0x17FA, 'X'), (0x1800, 'V'), (0x1806, 'X'), (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), (0x1878, 'X'), (0x1880, 'V'), (0x18AB, 'X'), (0x18B0, 'V'), (0x18F6, 'X'), (0x1900, 'V'), (0x191D, 'X'), (0x1920, 'V'), (0x192C, 'X'), (0x1930, 'V'), (0x193C, 'X'), (0x1940, 'V'), (0x1941, 'X'), (0x1944, 'V'), (0x196E, 'X'), (0x1970, 'V'), (0x1975, 'X'), (0x1980, 'V'), (0x19AC, 'X'), (0x19B0, 'V'), (0x19CA, 'X'), (0x19D0, 'V'), (0x19DB, 'X'), (0x19DE, 'V'), (0x1A1C, 'X'), (0x1A1E, 'V'), (0x1A5F, 'X'), (0x1A60, 'V'), (0x1A7D, 'X'), (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), (0x1A9A, 'X'), ] def _seg_15(): return [ (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1B00, 'V'), (0x1B4C, 'X'), (0x1B50, 'V'), (0x1B7D, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), (0x1C38, 'X'), (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), (0x1C80, 'X'), (0x1CC0, 'V'), (0x1CC8, 'X'), (0x1CD0, 'V'), (0x1CF7, 'X'), (0x1D00, 'V'), (0x1D2C, 'M', u'a'), (0x1D2D, 'M', u'æ'), (0x1D2E, 'M', u'b'), (0x1D2F, 'V'), (0x1D30, 'M', u'd'), (0x1D31, 'M', u'e'), (0x1D32, 'M', u'ǝ'), (0x1D33, 'M', u'g'), (0x1D34, 'M', u'h'), (0x1D35, 'M', u'i'), (0x1D36, 'M', u'j'), (0x1D37, 'M', u'k'), (0x1D38, 'M', u'l'), (0x1D39, 'M', u'm'), (0x1D3A, 'M', u'n'), (0x1D3B, 'V'), (0x1D3C, 'M', u'o'), (0x1D3D, 'M', u'ȣ'), (0x1D3E, 'M', u'p'), (0x1D3F, 'M', u'r'), (0x1D40, 'M', u't'), (0x1D41, 'M', u'u'), (0x1D42, 'M', u'w'), (0x1D43, 'M', u'a'), (0x1D44, 'M', u'ɐ'), (0x1D45, 'M', u'ɑ'), (0x1D46, 'M', u'ᴂ'), (0x1D47, 'M', u'b'), (0x1D48, 'M', u'd'), (0x1D49, 'M', u'e'), (0x1D4A, 'M', u'ə'), (0x1D4B, 'M', u'ɛ'), (0x1D4C, 'M', u'ɜ'), (0x1D4D, 'M', u'g'), (0x1D4E, 'V'), (0x1D4F, 'M', u'k'), (0x1D50, 'M', u'm'), (0x1D51, 'M', u'ŋ'), (0x1D52, 'M', u'o'), (0x1D53, 'M', u'ɔ'), (0x1D54, 'M', u'ᴖ'), (0x1D55, 'M', u'ᴗ'), (0x1D56, 'M', u'p'), (0x1D57, 'M', u't'), (0x1D58, 'M', u'u'), (0x1D59, 'M', u'ᴝ'), (0x1D5A, 'M', u'ɯ'), (0x1D5B, 'M', u'v'), (0x1D5C, 'M', u'ᴥ'), (0x1D5D, 'M', u'β'), (0x1D5E, 'M', u'γ'), (0x1D5F, 'M', u'δ'), (0x1D60, 'M', u'φ'), (0x1D61, 'M', u'χ'), (0x1D62, 'M', u'i'), (0x1D63, 'M', u'r'), (0x1D64, 'M', u'u'), (0x1D65, 'M', u'v'), (0x1D66, 'M', u'β'), (0x1D67, 'M', u'γ'), (0x1D68, 'M', u'ρ'), (0x1D69, 'M', u'φ'), (0x1D6A, 'M', u'χ'), (0x1D6B, 'V'), (0x1D78, 'M', u'н'), (0x1D79, 'V'), (0x1D9B, 'M', u'ɒ'), (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), (0x1DA2, 'M', u'ɡ'), (0x1DA3, 'M', u'ɥ'), (0x1DA4, 'M', u'ɨ'), (0x1DA5, 'M', u'ɩ'), (0x1DA6, 'M', u'ɪ'), (0x1DA7, 'M', u'ᵻ'), (0x1DA8, 'M', u'ʝ'), (0x1DA9, 'M', u'ɭ'), ] def _seg_16(): return [ (0x1DAA, 'M', u'ᶅ'), (0x1DAB, 'M', u'ʟ'), (0x1DAC, 'M', u'ɱ'), (0x1DAD, 'M', u'ɰ'), (0x1DAE, 'M', u'ɲ'), (0x1DAF, 'M', u'ɳ'), (0x1DB0, 'M', u'ɴ'), (0x1DB1, 'M', u'ɵ'), (0x1DB2, 'M', u'ɸ'), (0x1DB3, 'M', u'ʂ'), (0x1DB4, 'M', u'ʃ'), (0x1DB5, 'M', u'ƫ'), (0x1DB6, 'M', u'ʉ'), (0x1DB7, 'M', u'ʊ'), (0x1DB8, 'M', u'ᴜ'), (0x1DB9, 'M', u'ʋ'), (0x1DBA, 'M', u'ʌ'), (0x1DBB, 'M', u'z'), (0x1DBC, 'M', u'ʐ'), (0x1DBD, 'M', u'ʑ'), (0x1DBE, 'M', u'ʒ'), (0x1DBF, 'M', u'θ'), (0x1DC0, 'V'), (0x1DE7, 'X'), (0x1DFC, 'V'), (0x1E00, 'M', u'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', u'ḃ'), (0x1E03, 'V'), (0x1E04, 'M', u'ḅ'), (0x1E05, 'V'), (0x1E06, 'M', u'ḇ'), (0x1E07, 'V'), (0x1E08, 'M', u'ḉ'), (0x1E09, 'V'), (0x1E0A, 'M', u'ḋ'), (0x1E0B, 'V'), (0x1E0C, 'M', u'ḍ'), (0x1E0D, 'V'), (0x1E0E, 'M', u'ḏ'), (0x1E0F, 'V'), (0x1E10, 'M', u'ḑ'), (0x1E11, 'V'), (0x1E12, 'M', u'ḓ'), (0x1E13, 'V'), (0x1E14, 'M', u'ḕ'), (0x1E15, 'V'), (0x1E16, 'M', u'ḗ'), (0x1E17, 'V'), (0x1E18, 'M', u'ḙ'), (0x1E19, 'V'), (0x1E1A, 'M', u'ḛ'), (0x1E1B, 'V'), (0x1E1C, 'M', u'ḝ'), (0x1E1D, 'V'), (0x1E1E, 'M', u'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', u'ḡ'), (0x1E21, 'V'), (0x1E22, 'M', u'ḣ'), (0x1E23, 'V'), (0x1E24, 'M', u'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', u'ḧ'), (0x1E27, 'V'), (0x1E28, 'M', u'ḩ'), (0x1E29, 'V'), (0x1E2A, 'M', u'ḫ'), (0x1E2B, 'V'), (0x1E2C, 'M', u'ḭ'), (0x1E2D, 'V'), (0x1E2E, 'M', u'ḯ'), (0x1E2F, 'V'), (0x1E30, 'M', u'ḱ'), (0x1E31, 'V'), (0x1E32, 'M', u'ḳ'), (0x1E33, 'V'), (0x1E34, 'M', u'ḵ'), (0x1E35, 'V'), (0x1E36, 'M', u'ḷ'), (0x1E37, 'V'), (0x1E38, 'M', u'ḹ'), (0x1E39, 'V'), (0x1E3A, 'M', u'ḻ'), (0x1E3B, 'V'), (0x1E3C, 'M', u'ḽ'), (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), (0x1E43, 'V'), (0x1E44, 'M', u'ṅ'), (0x1E45, 'V'), (0x1E46, 'M', u'ṇ'), (0x1E47, 'V'), (0x1E48, 'M', u'ṉ'), (0x1E49, 'V'), (0x1E4A, 'M', u'ṋ'), ] def _seg_17(): return [ (0x1E4B, 'V'), (0x1E4C, 'M', u'ṍ'), (0x1E4D, 'V'), (0x1E4E, 'M', u'ṏ'), (0x1E4F, 'V'), (0x1E50, 'M', u'ṑ'), (0x1E51, 'V'), (0x1E52, 'M', u'ṓ'), (0x1E53, 'V'), (0x1E54, 'M', u'ṕ'), (0x1E55, 'V'), (0x1E56, 'M', u'ṗ'), (0x1E57, 'V'), (0x1E58, 'M', u'ṙ'), (0x1E59, 'V'), (0x1E5A, 'M', u'ṛ'), (0x1E5B, 'V'), (0x1E5C, 'M', u'ṝ'), (0x1E5D, 'V'), (0x1E5E, 'M', u'ṟ'), (0x1E5F, 'V'), (0x1E60, 'M', u'ṡ'), (0x1E61, 'V'), (0x1E62, 'M', u'ṣ'), (0x1E63, 'V'), (0x1E64, 'M', u'ṥ'), (0x1E65, 'V'), (0x1E66, 'M', u'ṧ'), (0x1E67, 'V'), (0x1E68, 'M', u'ṩ'), (0x1E69, 'V'), (0x1E6A, 'M', u'ṫ'), (0x1E6B, 'V'), (0x1E6C, 'M', u'ṭ'), (0x1E6D, 'V'), (0x1E6E, 'M', u'ṯ'), (0x1E6F, 'V'), (0x1E70, 'M', u'ṱ'), (0x1E71, 'V'), (0x1E72, 'M', u'ṳ'), (0x1E73, 'V'), (0x1E74, 'M', u'ṵ'), (0x1E75, 'V'), (0x1E76, 'M', u'ṷ'), (0x1E77, 'V'), (0x1E78, 'M', u'ṹ'), (0x1E79, 'V'), (0x1E7A, 'M', u'ṻ'), (0x1E7B, 'V'), (0x1E7C, 'M', u'ṽ'), (0x1E7D, 'V'), (0x1E7E, 'M', u'ṿ'), (0x1E7F, 'V'), (0x1E80, 'M', u'ẁ'), (0x1E81, 'V'), (0x1E82, 'M', u'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', u'ẅ'), (0x1E85, 'V'), (0x1E86, 'M', u'ẇ'), (0x1E87, 'V'), (0x1E88, 'M', u'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', u'ẋ'), (0x1E8B, 'V'), (0x1E8C, 'M', u'ẍ'), (0x1E8D, 'V'), (0x1E8E, 'M', u'ẏ'), (0x1E8F, 'V'), (0x1E90, 'M', u'ẑ'), (0x1E91, 'V'), (0x1E92, 'M', u'ẓ'), (0x1E93, 'V'), (0x1E94, 'M', u'ẕ'), (0x1E95, 'V'), (0x1E9A, 'M', u'aʾ'), (0x1E9B, 'M', u'ṡ'), (0x1E9C, 'V'), (0x1E9E, 'M', u'ss'), (0x1E9F, 'V'), (0x1EA0, 'M', u'ạ'), (0x1EA1, 'V'), (0x1EA2, 'M', u'ả'), (0x1EA3, 'V'), (0x1EA4, 'M', u'ấ'), (0x1EA5, 'V'), (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), (0x1EAC, 'M', u'ậ'), (0x1EAD, 'V'), (0x1EAE, 'M', u'ắ'), (0x1EAF, 'V'), (0x1EB0, 'M', u'ằ'), (0x1EB1, 'V'), (0x1EB2, 'M', u'ẳ'), (0x1EB3, 'V'), ] def _seg_18(): return [ (0x1EB4, 'M', u'ẵ'), (0x1EB5, 'V'), (0x1EB6, 'M', u'ặ'), (0x1EB7, 'V'), (0x1EB8, 'M', u'ẹ'), (0x1EB9, 'V'), (0x1EBA, 'M', u'ẻ'), (0x1EBB, 'V'), (0x1EBC, 'M', u'ẽ'), (0x1EBD, 'V'), (0x1EBE, 'M', u'ế'), (0x1EBF, 'V'), (0x1EC0, 'M', u'ề'), (0x1EC1, 'V'), (0x1EC2, 'M', u'ể'), (0x1EC3, 'V'), (0x1EC4, 'M', u'ễ'), (0x1EC5, 'V'), (0x1EC6, 'M', u'ệ'), (0x1EC7, 'V'), (0x1EC8, 'M', u'ỉ'), (0x1EC9, 'V'), (0x1ECA, 'M', u'ị'), (0x1ECB, 'V'), (0x1ECC, 'M', u'ọ'), (0x1ECD, 'V'), (0x1ECE, 'M', u'ỏ'), (0x1ECF, 'V'), (0x1ED0, 'M', u'ố'), (0x1ED1, 'V'), (0x1ED2, 'M', u'ồ'), (0x1ED3, 'V'), (0x1ED4, 'M', u'ổ'), (0x1ED5, 'V'), (0x1ED6, 'M', u'ỗ'), (0x1ED7, 'V'), (0x1ED8, 'M', u'ộ'), (0x1ED9, 'V'), (0x1EDA, 'M', u'ớ'), (0x1EDB, 'V'), (0x1EDC, 'M', u'ờ'), (0x1EDD, 'V'), (0x1EDE, 'M', u'ở'), (0x1EDF, 'V'), (0x1EE0, 'M', u'ỡ'), (0x1EE1, 'V'), (0x1EE2, 'M', u'ợ'), (0x1EE3, 'V'), (0x1EE4, 'M', u'ụ'), (0x1EE5, 'V'), (0x1EE6, 'M', u'ủ'), (0x1EE7, 'V'), (0x1EE8, 'M', u'ứ'), (0x1EE9, 'V'), (0x1EEA, 'M', u'ừ'), (0x1EEB, 'V'), (0x1EEC, 'M', u'ử'), (0x1EED, 'V'), (0x1EEE, 'M', u'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', u'ự'), (0x1EF1, 'V'), (0x1EF2, 'M', u'ỳ'), (0x1EF3, 'V'), (0x1EF4, 'M', u'ỵ'), (0x1EF5, 'V'), (0x1EF6, 'M', u'ỷ'), (0x1EF7, 'V'), (0x1EF8, 'M', u'ỹ'), (0x1EF9, 'V'), (0x1EFA, 'M', u'ỻ'), (0x1EFB, 'V'), (0x1EFC, 'M', u'ỽ'), (0x1EFD, 'V'), (0x1EFE, 'M', u'ỿ'), (0x1EFF, 'V'), (0x1F08, 'M', u'ἀ'), (0x1F09, 'M', u'ἁ'), (0x1F0A, 'M', u'ἂ'), (0x1F0B, 'M', u'ἃ'), (0x1F0C, 'M', u'ἄ'), (0x1F0D, 'M', u'ἅ'), (0x1F0E, 'M', u'ἆ'), (0x1F0F, 'M', u'ἇ'), (0x1F10, 'V'), (0x1F16, 'X'), (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), (0x1F1E, 'X'), (0x1F20, 'V'), (0x1F28, 'M', u'ἠ'), (0x1F29, 'M', u'ἡ'), (0x1F2A, 'M', u'ἢ'), (0x1F2B, 'M', u'ἣ'), (0x1F2C, 'M', u'ἤ'), (0x1F2D, 'M', u'ἥ'), ] def _seg_19(): return [ (0x1F2E, 'M', u'ἦ'), (0x1F2F, 'M', u'ἧ'), (0x1F30, 'V'), (0x1F38, 'M', u'ἰ'), (0x1F39, 'M', u'ἱ'), (0x1F3A, 'M', u'ἲ'), (0x1F3B, 'M', u'ἳ'), (0x1F3C, 'M', u'ἴ'), (0x1F3D, 'M', u'ἵ'), (0x1F3E, 'M', u'ἶ'), (0x1F3F, 'M', u'ἷ'), (0x1F40, 'V'), (0x1F46, 'X'), (0x1F48, 'M', u'ὀ'), (0x1F49, 'M', u'ὁ'), (0x1F4A, 'M', u'ὂ'), (0x1F4B, 'M', u'ὃ'), (0x1F4C, 'M', u'ὄ'), (0x1F4D, 'M', u'ὅ'), (0x1F4E, 'X'), (0x1F50, 'V'), (0x1F58, 'X'), (0x1F59, 'M', u'ὑ'), (0x1F5A, 'X'), (0x1F5B, 'M', u'ὓ'), (0x1F5C, 'X'), (0x1F5D, 'M', u'ὕ'), (0x1F5E, 'X'), (0x1F5F, 'M', u'ὗ'), (0x1F60, 'V'), (0x1F68, 'M', u'ὠ'), (0x1F69, 'M', u'ὡ'), (0x1F6A, 'M', u'ὢ'), (0x1F6B, 'M', u'ὣ'), (0x1F6C, 'M', u'ὤ'), (0x1F6D, 'M', u'ὥ'), (0x1F6E, 'M', u'ὦ'), (0x1F6F, 'M', u'ὧ'), (0x1F70, 'V'), (0x1F71, 'M', u'ά'), (0x1F72, 'V'), (0x1F73, 'M', u'έ'), (0x1F74, 'V'), (0x1F75, 'M', u'ή'), (0x1F76, 'V'), (0x1F77, 'M', u'ί'), (0x1F78, 'V'), (0x1F79, 'M', u'ό'), (0x1F7A, 'V'), (0x1F7B, 'M', u'ύ'), (0x1F7C, 'V'), (0x1F7D, 'M', u'ώ'), (0x1F7E, 'X'), (0x1F80, 'M', u'ἀι'), (0x1F81, 'M', u'ἁι'), (0x1F82, 'M', u'ἂι'), (0x1F83, 'M', u'ἃι'), (0x1F84, 'M', u'ἄι'), (0x1F85, 'M', u'ἅι'), (0x1F86, 'M', u'ἆι'), (0x1F87, 'M', u'ἇι'), (0x1F88, 'M', u'ἀι'), (0x1F89, 'M', u'ἁι'), (0x1F8A, 'M', u'ἂι'), (0x1F8B, 'M', u'ἃι'), (0x1F8C, 'M', u'ἄι'), (0x1F8D, 'M', u'ἅι'), (0x1F8E, 'M', u'ἆι'), (0x1F8F, 'M', u'ἇι'), (0x1F90, 'M', u'ἠι'), (0x1F91, 'M', u'ἡι'), (0x1F92, 'M', u'ἢι'), (0x1F93, 'M', u'ἣι'), (0x1F94, 'M', u'ἤι'), (0x1F95, 'M', u'ἥι'), (0x1F96, 'M', u'ἦι'), (0x1F97, 'M', u'ἧι'), (0x1F98, 'M', u'ἠι'), (0x1F99, 'M', u'ἡι'), (0x1F9A, 'M', u'ἢι'), (0x1F9B, 'M', u'ἣι'), (0x1F9C, 'M', u'ἤι'), (0x1F9D, 'M', u'ἥι'), (0x1F9E, 'M', u'ἦι'), (0x1F9F, 'M', u'ἧι'), (0x1FA0, 'M', u'ὠι'), (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), (0x1FA7, 'M', u'ὧι'), (0x1FA8, 'M', u'ὠι'), (0x1FA9, 'M', u'ὡι'), (0x1FAA, 'M', u'ὢι'), (0x1FAB, 'M', u'ὣι'), (0x1FAC, 'M', u'ὤι'), (0x1FAD, 'M', u'ὥι'), (0x1FAE, 'M', u'ὦι'), ] def _seg_20(): return [ (0x1FAF, 'M', u'ὧι'), (0x1FB0, 'V'), (0x1FB2, 'M', u'ὰι'), (0x1FB3, 'M', u'αι'), (0x1FB4, 'M', u'άι'), (0x1FB5, 'X'), (0x1FB6, 'V'), (0x1FB7, 'M', u'ᾶι'), (0x1FB8, 'M', u'ᾰ'), (0x1FB9, 'M', u'ᾱ'), (0x1FBA, 'M', u'ὰ'), (0x1FBB, 'M', u'ά'), (0x1FBC, 'M', u'αι'), (0x1FBD, '3', u' ̓'), (0x1FBE, 'M', u'ι'), (0x1FBF, '3', u' ̓'), (0x1FC0, '3', u' ͂'), (0x1FC1, '3', u' ̈͂'), (0x1FC2, 'M', u'ὴι'), (0x1FC3, 'M', u'ηι'), (0x1FC4, 'M', u'ήι'), (0x1FC5, 'X'), (0x1FC6, 'V'), (0x1FC7, 'M', u'ῆι'), (0x1FC8, 'M', u'ὲ'), (0x1FC9, 'M', u'έ'), (0x1FCA, 'M', u'ὴ'), (0x1FCB, 'M', u'ή'), (0x1FCC, 'M', u'ηι'), (0x1FCD, '3', u' ̓̀'), (0x1FCE, '3', u' ̓́'), (0x1FCF, '3', u' ̓͂'), (0x1FD0, 'V'), (0x1FD3, 'M', u'ΐ'), (0x1FD4, 'X'), (0x1FD6, 'V'), (0x1FD8, 'M', u'ῐ'), (0x1FD9, 'M', u'ῑ'), (0x1FDA, 'M', u'ὶ'), (0x1FDB, 'M', u'ί'), (0x1FDC, 'X'), (0x1FDD, '3', u' ̔̀'), (0x1FDE, '3', u' ̔́'), (0x1FDF, '3', u' ̔͂'), (0x1FE0, 'V'), (0x1FE3, 'M', u'ΰ'), (0x1FE4, 'V'), (0x1FE8, 'M', u'ῠ'), (0x1FE9, 'M', u'ῡ'), (0x1FEA, 'M', u'ὺ'), (0x1FEB, 'M', u'ύ'), (0x1FEC, 'M', u'ῥ'), (0x1FED, '3', u' ̈̀'), (0x1FEE, '3', u' ̈́'), (0x1FEF, '3', u'`'), (0x1FF0, 'X'), (0x1FF2, 'M', u'ὼι'), (0x1FF3, 'M', u'ωι'), (0x1FF4, 'M', u'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), (0x1FF7, 'M', u'ῶι'), (0x1FF8, 'M', u'ὸ'), (0x1FF9, 'M', u'ό'), (0x1FFA, 'M', u'ὼ'), (0x1FFB, 'M', u'ώ'), (0x1FFC, 'M', u'ωι'), (0x1FFD, '3', u' ́'), (0x1FFE, '3', u' ̔'), (0x1FFF, 'X'), (0x2000, '3', u' '), (0x200B, 'I'), (0x200C, 'D', u''), (0x200E, 'X'), (0x2010, 'V'), (0x2011, 'M', u'‐'), (0x2012, 'V'), (0x2017, '3', u' ̳'), (0x2018, 'V'), (0x2024, 'X'), (0x2027, 'V'), (0x2028, 'X'), (0x202F, '3', u' '), (0x2030, 'V'), (0x2033, 'M', u'′′'), (0x2034, 'M', u'′′′'), (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), (0x203E, '3', u' ̅'), (0x203F, 'V'), (0x2047, '3', u'??'), (0x2048, '3', u'?!'), (0x2049, '3', u'!?'), (0x204A, 'V'), (0x2057, 'M', u'′′′′'), (0x2058, 'V'), ] def _seg_21(): return [ (0x205F, '3', u' '), (0x2060, 'I'), (0x2061, 'X'), (0x2064, 'I'), (0x2065, 'X'), (0x2070, 'M', u'0'), (0x2071, 'M', u'i'), (0x2072, 'X'), (0x2074, 'M', u'4'), (0x2075, 'M', u'5'), (0x2076, 'M', u'6'), (0x2077, 'M', u'7'), (0x2078, 'M', u'8'), (0x2079, 'M', u'9'), (0x207A, '3', u'+'), (0x207B, 'M', u'−'), (0x207C, '3', u'='), (0x207D, '3', u'('), (0x207E, '3', u')'), (0x207F, 'M', u'n'), (0x2080, 'M', u'0'), (0x2081, 'M', u'1'), (0x2082, 'M', u'2'), (0x2083, 'M', u'3'), (0x2084, 'M', u'4'), (0x2085, 'M', u'5'), (0x2086, 'M', u'6'), (0x2087, 'M', u'7'), (0x2088, 'M', u'8'), (0x2089, 'M', u'9'), (0x208A, '3', u'+'), (0x208B, 'M', u'−'), (0x208C, '3', u'='), (0x208D, '3', u'('), (0x208E, '3', u')'), (0x208F, 'X'), (0x2090, 'M', u'a'), (0x2091, 'M', u'e'), (0x2092, 'M', u'o'), (0x2093, 'M', u'x'), (0x2094, 'M', u'ə'), (0x2095, 'M', u'h'), (0x2096, 'M', u'k'), (0x2097, 'M', u'l'), (0x2098, 'M', u'm'), (0x2099, 'M', u'n'), (0x209A, 'M', u'p'), (0x209B, 'M', u's'), (0x209C, 'M', u't'), (0x209D, 'X'), (0x20A0, 'V'), (0x20A8, 'M', u'rs'), (0x20A9, 'V'), (0x20BB, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', u'a/c'), (0x2101, '3', u'a/s'), (0x2102, 'M', u'c'), (0x2103, 'M', u'°c'), (0x2104, 'V'), (0x2105, '3', u'c/o'), (0x2106, '3', u'c/u'), (0x2107, 'M', u'ɛ'), (0x2108, 'V'), (0x2109, 'M', u'°f'), (0x210A, 'M', u'g'), (0x210B, 'M', u'h'), (0x210F, 'M', u'ħ'), (0x2110, 'M', u'i'), (0x2112, 'M', u'l'), (0x2114, 'V'), (0x2115, 'M', u'n'), (0x2116, 'M', u'no'), (0x2117, 'V'), (0x2119, 'M', u'p'), (0x211A, 'M', u'q'), (0x211B, 'M', u'r'), (0x211E, 'V'), (0x2120, 'M', u'sm'), (0x2121, 'M', u'tel'), (0x2122, 'M', u'tm'), (0x2123, 'V'), (0x2124, 'M', u'z'), (0x2125, 'V'), (0x2126, 'M', u'ω'), (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), (0x212D, 'M', u'c'), (0x212E, 'V'), (0x212F, 'M', u'e'), (0x2131, 'M', u'f'), (0x2132, 'X'), (0x2133, 'M', u'm'), (0x2134, 'M', u'o'), (0x2135, 'M', u'א'), ] def _seg_22(): return [ (0x2136, 'M', u'ב'), (0x2137, 'M', u'ג'), (0x2138, 'M', u'ד'), (0x2139, 'M', u'i'), (0x213A, 'V'), (0x213B, 'M', u'fax'), (0x213C, 'M', u'π'), (0x213D, 'M', u'γ'), (0x213F, 'M', u'π'), (0x2140, 'M', u'∑'), (0x2141, 'V'), (0x2145, 'M', u'd'), (0x2147, 'M', u'e'), (0x2148, 'M', u'i'), (0x2149, 'M', u'j'), (0x214A, 'V'), (0x2150, 'M', u'1⁄7'), (0x2151, 'M', u'1⁄9'), (0x2152, 'M', u'1⁄10'), (0x2153, 'M', u'1⁄3'), (0x2154, 'M', u'2⁄3'), (0x2155, 'M', u'1⁄5'), (0x2156, 'M', u'2⁄5'), (0x2157, 'M', u'3⁄5'), (0x2158, 'M', u'4⁄5'), (0x2159, 'M', u'1⁄6'), (0x215A, 'M', u'5⁄6'), (0x215B, 'M', u'1⁄8'), (0x215C, 'M', u'3⁄8'), (0x215D, 'M', u'5⁄8'), (0x215E, 'M', u'7⁄8'), (0x215F, 'M', u'1⁄'), (0x2160, 'M', u'i'), (0x2161, 'M', u'ii'), (0x2162, 'M', u'iii'), (0x2163, 'M', u'iv'), (0x2164, 'M', u'v'), (0x2165, 'M', u'vi'), (0x2166, 'M', u'vii'), (0x2167, 'M', u'viii'), (0x2168, 'M', u'ix'), (0x2169, 'M', u'x'), (0x216A, 'M', u'xi'), (0x216B, 'M', u'xii'), (0x216C, 'M', u'l'), (0x216D, 'M', u'c'), (0x216E, 'M', u'd'), (0x216F, 'M', u'm'), (0x2170, 'M', u'i'), (0x2171, 'M', u'ii'), (0x2172, 'M', u'iii'), (0x2173, 'M', u'iv'), (0x2174, 'M', u'v'), (0x2175, 'M', u'vi'), (0x2176, 'M', u'vii'), (0x2177, 'M', u'viii'), (0x2178, 'M', u'ix'), (0x2179, 'M', u'x'), (0x217A, 'M', u'xi'), (0x217B, 'M', u'xii'), (0x217C, 'M', u'l'), (0x217D, 'M', u'c'), (0x217E, 'M', u'd'), (0x217F, 'M', u'm'), (0x2180, 'V'), (0x2183, 'X'), (0x2184, 'V'), (0x2189, 'M', u'0⁄3'), (0x218A, 'X'), (0x2190, 'V'), (0x222C, 'M', u'∫∫'), (0x222D, 'M', u'∫∫∫'), (0x222E, 'V'), (0x222F, 'M', u'∮∮'), (0x2230, 'M', u'∮∮∮'), (0x2231, 'V'), (0x2260, '3'), (0x2261, 'V'), (0x226E, '3'), (0x2270, 'V'), (0x2329, 'M', u'〈'), (0x232A, 'M', u'〉'), (0x232B, 'V'), (0x23F4, 'X'), (0x2400, 'V'), (0x2427, 'X'), (0x2440, 'V'), (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), (0x2465, 'M', u'6'), (0x2466, 'M', u'7'), (0x2467, 'M', u'8'), (0x2468, 'M', u'9'), (0x2469, 'M', u'10'), (0x246A, 'M', u'11'), (0x246B, 'M', u'12'), ] def _seg_23(): return [ (0x246C, 'M', u'13'), (0x246D, 'M', u'14'), (0x246E, 'M', u'15'), (0x246F, 'M', u'16'), (0x2470, 'M', u'17'), (0x2471, 'M', u'18'), (0x2472, 'M', u'19'), (0x2473, 'M', u'20'), (0x2474, '3', u'(1)'), (0x2475, '3', u'(2)'), (0x2476, '3', u'(3)'), (0x2477, '3', u'(4)'), (0x2478, '3', u'(5)'), (0x2479, '3', u'(6)'), (0x247A, '3', u'(7)'), (0x247B, '3', u'(8)'), (0x247C, '3', u'(9)'), (0x247D, '3', u'(10)'), (0x247E, '3', u'(11)'), (0x247F, '3', u'(12)'), (0x2480, '3', u'(13)'), (0x2481, '3', u'(14)'), (0x2482, '3', u'(15)'), (0x2483, '3', u'(16)'), (0x2484, '3', u'(17)'), (0x2485, '3', u'(18)'), (0x2486, '3', u'(19)'), (0x2487, '3', u'(20)'), (0x2488, 'X'), (0x249C, '3', u'(a)'), (0x249D, '3', u'(b)'), (0x249E, '3', u'(c)'), (0x249F, '3', u'(d)'), (0x24A0, '3', u'(e)'), (0x24A1, '3', u'(f)'), (0x24A2, '3', u'(g)'), (0x24A3, '3', u'(h)'), (0x24A4, '3', u'(i)'), (0x24A5, '3', u'(j)'), (0x24A6, '3', u'(k)'), (0x24A7, '3', u'(l)'), (0x24A8, '3', u'(m)'), (0x24A9, '3', u'(n)'), (0x24AA, '3', u'(o)'), (0x24AB, '3', u'(p)'), (0x24AC, '3', u'(q)'), (0x24AD, '3', u'(r)'), (0x24AE, '3', u'(s)'), (0x24AF, '3', u'(t)'), (0x24B0, '3', u'(u)'), (0x24B1, '3', u'(v)'), (0x24B2, '3', u'(w)'), (0x24B3, '3', u'(x)'), (0x24B4, '3', u'(y)'), (0x24B5, '3', u'(z)'), (0x24B6, 'M', u'a'), (0x24B7, 'M', u'b'), (0x24B8, 'M', u'c'), (0x24B9, 'M', u'd'), (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), ] def _seg_24(): return [ (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2700, 'X'), (0x2701, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B4D, 'X'), (0x2B50, 'V'), (0x2B5A, 'X'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), (0x2C26, 'M', u'ⱖ'), (0x2C27, 'M', u'ⱗ'), (0x2C28, 'M', u'ⱘ'), (0x2C29, 'M', u'ⱙ'), (0x2C2A, 'M', u'ⱚ'), (0x2C2B, 'M', u'ⱛ'), (0x2C2C, 'M', u'ⱜ'), (0x2C2D, 'M', u'ⱝ'), (0x2C2E, 'M', u'ⱞ'), (0x2C2F, 'X'), (0x2C30, 'V'), (0x2C5F, 'X'), (0x2C60, 'M', u'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', u'ɫ'), (0x2C63, 'M', u'ᵽ'), (0x2C64, 'M', u'ɽ'), (0x2C65, 'V'), (0x2C67, 'M', u'ⱨ'), (0x2C68, 'V'), (0x2C69, 'M', u'ⱪ'), (0x2C6A, 'V'), (0x2C6B, 'M', u'ⱬ'), (0x2C6C, 'V'), (0x2C6D, 'M', u'ɑ'), (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), (0x2C75, 'M', u'ⱶ'), (0x2C76, 'V'), (0x2C7C, 'M', u'j'), (0x2C7D, 'M', u'v'), (0x2C7E, 'M', u'ȿ'), (0x2C7F, 'M', u'ɀ'), (0x2C80, 'M', u'ⲁ'), (0x2C81, 'V'), (0x2C82, 'M', u'ⲃ'), ] def _seg_25(): return [ (0x2C83, 'V'), (0x2C84, 'M', u'ⲅ'), (0x2C85, 'V'), (0x2C86, 'M', u'ⲇ'), (0x2C87, 'V'), (0x2C88, 'M', u'ⲉ'), (0x2C89, 'V'), (0x2C8A, 'M', u'ⲋ'), (0x2C8B, 'V'), (0x2C8C, 'M', u'ⲍ'), (0x2C8D, 'V'), (0x2C8E, 'M', u'ⲏ'), (0x2C8F, 'V'), (0x2C90, 'M', u'ⲑ'), (0x2C91, 'V'), (0x2C92, 'M', u'ⲓ'), (0x2C93, 'V'), (0x2C94, 'M', u'ⲕ'), (0x2C95, 'V'), (0x2C96, 'M', u'ⲗ'), (0x2C97, 'V'), (0x2C98, 'M', u'ⲙ'), (0x2C99, 'V'), (0x2C9A, 'M', u'ⲛ'), (0x2C9B, 'V'), (0x2C9C, 'M', u'ⲝ'), (0x2C9D, 'V'), (0x2C9E, 'M', u'ⲟ'), (0x2C9F, 'V'), (0x2CA0, 'M', u'ⲡ'), (0x2CA1, 'V'), (0x2CA2, 'M', u'ⲣ'), (0x2CA3, 'V'), (0x2CA4, 'M', u'ⲥ'), (0x2CA5, 'V'), (0x2CA6, 'M', u'ⲧ'), (0x2CA7, 'V'), (0x2CA8, 'M', u'ⲩ'), (0x2CA9, 'V'), (0x2CAA, 'M', u'ⲫ'), (0x2CAB, 'V'), (0x2CAC, 'M', u'ⲭ'), (0x2CAD, 'V'), (0x2CAE, 'M', u'ⲯ'), (0x2CAF, 'V'), (0x2CB0, 'M', u'ⲱ'), (0x2CB1, 'V'), (0x2CB2, 'M', u'ⲳ'), (0x2CB3, 'V'), (0x2CB4, 'M', u'ⲵ'), (0x2CB5, 'V'), (0x2CB6, 'M', u'ⲷ'), (0x2CB7, 'V'), (0x2CB8, 'M', u'ⲹ'), (0x2CB9, 'V'), (0x2CBA, 'M', u'ⲻ'), (0x2CBB, 'V'), (0x2CBC, 'M', u'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', u'ⲿ'), (0x2CBF, 'V'), (0x2CC0, 'M', u'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', u'ⳃ'), (0x2CC3, 'V'), (0x2CC4, 'M', u'ⳅ'), (0x2CC5, 'V'), (0x2CC6, 'M', u'ⳇ'), (0x2CC7, 'V'), (0x2CC8, 'M', u'ⳉ'), (0x2CC9, 'V'), (0x2CCA, 'M', u'ⳋ'), (0x2CCB, 'V'), (0x2CCC, 'M', u'ⳍ'), (0x2CCD, 'V'), (0x2CCE, 'M', u'ⳏ'), (0x2CCF, 'V'), (0x2CD0, 'M', u'ⳑ'), (0x2CD1, 'V'), (0x2CD2, 'M', u'ⳓ'), (0x2CD3, 'V'), (0x2CD4, 'M', u'ⳕ'), (0x2CD5, 'V'), (0x2CD6, 'M', u'ⳗ'), (0x2CD7, 'V'), (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), (0x2CDE, 'M', u'ⳟ'), (0x2CDF, 'V'), (0x2CE0, 'M', u'ⳡ'), (0x2CE1, 'V'), (0x2CE2, 'M', u'ⳣ'), (0x2CE3, 'V'), (0x2CEB, 'M', u'ⳬ'), (0x2CEC, 'V'), (0x2CED, 'M', u'ⳮ'), ] def _seg_26(): return [ (0x2CEE, 'V'), (0x2CF2, 'M', u'ⳳ'), (0x2CF3, 'V'), (0x2CF4, 'X'), (0x2CF9, 'V'), (0x2D26, 'X'), (0x2D27, 'V'), (0x2D28, 'X'), (0x2D2D, 'V'), (0x2D2E, 'X'), (0x2D30, 'V'), (0x2D68, 'X'), (0x2D6F, 'M', u'ⵡ'), (0x2D70, 'V'), (0x2D71, 'X'), (0x2D7F, 'V'), (0x2D97, 'X'), (0x2DA0, 'V'), (0x2DA7, 'X'), (0x2DA8, 'V'), (0x2DAF, 'X'), (0x2DB0, 'V'), (0x2DB7, 'X'), (0x2DB8, 'V'), (0x2DBF, 'X'), (0x2DC0, 'V'), (0x2DC7, 'X'), (0x2DC8, 'V'), (0x2DCF, 'X'), (0x2DD0, 'V'), (0x2DD7, 'X'), (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), (0x2E3C, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), (0x2E9F, 'M', u'母'), (0x2EA0, 'V'), (0x2EF3, 'M', u'龟'), (0x2EF4, 'X'), (0x2F00, 'M', u'一'), (0x2F01, 'M', u'丨'), (0x2F02, 'M', u'丶'), (0x2F03, 'M', u'丿'), (0x2F04, 'M', u'乙'), (0x2F05, 'M', u'亅'), (0x2F06, 'M', u'二'), (0x2F07, 'M', u'亠'), (0x2F08, 'M', u'人'), (0x2F09, 'M', u'儿'), (0x2F0A, 'M', u'入'), (0x2F0B, 'M', u'八'), (0x2F0C, 'M', u'冂'), (0x2F0D, 'M', u'冖'), (0x2F0E, 'M', u'冫'), (0x2F0F, 'M', u'几'), (0x2F10, 'M', u'凵'), (0x2F11, 'M', u'刀'), (0x2F12, 'M', u'力'), (0x2F13, 'M', u'勹'), (0x2F14, 'M', u'匕'), (0x2F15, 'M', u'匚'), (0x2F16, 'M', u'匸'), (0x2F17, 'M', u'十'), (0x2F18, 'M', u'卜'), (0x2F19, 'M', u'卩'), (0x2F1A, 'M', u'厂'), (0x2F1B, 'M', u'厶'), (0x2F1C, 'M', u'又'), (0x2F1D, 'M', u'口'), (0x2F1E, 'M', u'囗'), (0x2F1F, 'M', u'土'), (0x2F20, 'M', u'士'), (0x2F21, 'M', u'夂'), (0x2F22, 'M', u'夊'), (0x2F23, 'M', u'夕'), (0x2F24, 'M', u'大'), (0x2F25, 'M', u'女'), (0x2F26, 'M', u'子'), (0x2F27, 'M', u'宀'), (0x2F28, 'M', u'寸'), (0x2F29, 'M', u'小'), (0x2F2A, 'M', u'尢'), (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), (0x2F31, 'M', u'巾'), (0x2F32, 'M', u'干'), (0x2F33, 'M', u'幺'), (0x2F34, 'M', u'广'), (0x2F35, 'M', u'廴'), (0x2F36, 'M', u'廾'), (0x2F37, 'M', u'弋'), (0x2F38, 'M', u'弓'), (0x2F39, 'M', u'彐'), ] def _seg_27(): return [ (0x2F3A, 'M', u'彡'), (0x2F3B, 'M', u'彳'), (0x2F3C, 'M', u'心'), (0x2F3D, 'M', u'戈'), (0x2F3E, 'M', u'戶'), (0x2F3F, 'M', u'手'), (0x2F40, 'M', u'支'), (0x2F41, 'M', u'攴'), (0x2F42, 'M', u'文'), (0x2F43, 'M', u'斗'), (0x2F44, 'M', u'斤'), (0x2F45, 'M', u'方'), (0x2F46, 'M', u'无'), (0x2F47, 'M', u'日'), (0x2F48, 'M', u'曰'), (0x2F49, 'M', u'月'), (0x2F4A, 'M', u'木'), (0x2F4B, 'M', u'欠'), (0x2F4C, 'M', u'止'), (0x2F4D, 'M', u'歹'), (0x2F4E, 'M', u'殳'), (0x2F4F, 'M', u'毋'), (0x2F50, 'M', u'比'), (0x2F51, 'M', u'毛'), (0x2F52, 'M', u'氏'), (0x2F53, 'M', u'气'), (0x2F54, 'M', u'水'), (0x2F55, 'M', u'火'), (0x2F56, 'M', u'爪'), (0x2F57, 'M', u'父'), (0x2F58, 'M', u'爻'), (0x2F59, 'M', u'爿'), (0x2F5A, 'M', u'片'), (0x2F5B, 'M', u'牙'), (0x2F5C, 'M', u'牛'), (0x2F5D, 'M', u'犬'), (0x2F5E, 'M', u'玄'), (0x2F5F, 'M', u'玉'), (0x2F60, 'M', u'瓜'), (0x2F61, 'M', u'瓦'), (0x2F62, 'M', u'甘'), (0x2F63, 'M', u'生'), (0x2F64, 'M', u'用'), (0x2F65, 'M', u'田'), (0x2F66, 'M', u'疋'), (0x2F67, 'M', u'疒'), (0x2F68, 'M', u'癶'), (0x2F69, 'M', u'白'), (0x2F6A, 'M', u'皮'), (0x2F6B, 'M', u'皿'), (0x2F6C, 'M', u'目'), (0x2F6D, 'M', u'矛'), (0x2F6E, 'M', u'矢'), (0x2F6F, 'M', u'石'), (0x2F70, 'M', u'示'), (0x2F71, 'M', u'禸'), (0x2F72, 'M', u'禾'), (0x2F73, 'M', u'穴'), (0x2F74, 'M', u'立'), (0x2F75, 'M', u'竹'), (0x2F76, 'M', u'米'), (0x2F77, 'M', u'糸'), (0x2F78, 'M', u'缶'), (0x2F79, 'M', u'网'), (0x2F7A, 'M', u'羊'), (0x2F7B, 'M', u'羽'), (0x2F7C, 'M', u'老'), (0x2F7D, 'M', u'而'), (0x2F7E, 'M', u'耒'), (0x2F7F, 'M', u'耳'), (0x2F80, 'M', u'聿'), (0x2F81, 'M', u'肉'), (0x2F82, 'M', u'臣'), (0x2F83, 'M', u'自'), (0x2F84, 'M', u'至'), (0x2F85, 'M', u'臼'), (0x2F86, 'M', u'舌'), (0x2F87, 'M', u'舛'), (0x2F88, 'M', u'舟'), (0x2F89, 'M', u'艮'), (0x2F8A, 'M', u'色'), (0x2F8B, 'M', u'艸'), (0x2F8C, 'M', u'虍'), (0x2F8D, 'M', u'虫'), (0x2F8E, 'M', u'血'), (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), (0x2F95, 'M', u'谷'), (0x2F96, 'M', u'豆'), (0x2F97, 'M', u'豕'), (0x2F98, 'M', u'豸'), (0x2F99, 'M', u'貝'), (0x2F9A, 'M', u'赤'), (0x2F9B, 'M', u'走'), (0x2F9C, 'M', u'足'), (0x2F9D, 'M', u'身'), ] def _seg_28(): return [ (0x2F9E, 'M', u'車'), (0x2F9F, 'M', u'辛'), (0x2FA0, 'M', u'辰'), (0x2FA1, 'M', u'辵'), (0x2FA2, 'M', u'邑'), (0x2FA3, 'M', u'酉'), (0x2FA4, 'M', u'釆'), (0x2FA5, 'M', u'里'), (0x2FA6, 'M', u'金'), (0x2FA7, 'M', u'長'), (0x2FA8, 'M', u'門'), (0x2FA9, 'M', u'阜'), (0x2FAA, 'M', u'隶'), (0x2FAB, 'M', u'隹'), (0x2FAC, 'M', u'雨'), (0x2FAD, 'M', u'靑'), (0x2FAE, 'M', u'非'), (0x2FAF, 'M', u'面'), (0x2FB0, 'M', u'革'), (0x2FB1, 'M', u'韋'), (0x2FB2, 'M', u'韭'), (0x2FB3, 'M', u'音'), (0x2FB4, 'M', u'頁'), (0x2FB5, 'M', u'風'), (0x2FB6, 'M', u'飛'), (0x2FB7, 'M', u'食'), (0x2FB8, 'M', u'首'), (0x2FB9, 'M', u'香'), (0x2FBA, 'M', u'馬'), (0x2FBB, 'M', u'骨'), (0x2FBC, 'M', u'高'), (0x2FBD, 'M', u'髟'), (0x2FBE, 'M', u'鬥'), (0x2FBF, 'M', u'鬯'), (0x2FC0, 'M', u'鬲'), (0x2FC1, 'M', u'鬼'), (0x2FC2, 'M', u'魚'), (0x2FC3, 'M', u'鳥'), (0x2FC4, 'M', u'鹵'), (0x2FC5, 'M', u'鹿'), (0x2FC6, 'M', u'麥'), (0x2FC7, 'M', u'麻'), (0x2FC8, 'M', u'黃'), (0x2FC9, 'M', u'黍'), (0x2FCA, 'M', u'黑'), (0x2FCB, 'M', u'黹'), (0x2FCC, 'M', u'黽'), (0x2FCD, 'M', u'鼎'), (0x2FCE, 'M', u'鼓'), (0x2FCF, 'M', u'鼠'), (0x2FD0, 'M', u'鼻'), (0x2FD1, 'M', u'齊'), (0x2FD2, 'M', u'齒'), (0x2FD3, 'M', u'龍'), (0x2FD4, 'M', u'龜'), (0x2FD5, 'M', u'龠'), (0x2FD6, 'X'), (0x3000, '3', u' '), (0x3001, 'V'), (0x3002, 'M', u'.'), (0x3003, 'V'), (0x3036, 'M', u'〒'), (0x3037, 'V'), (0x3038, 'M', u'十'), (0x3039, 'M', u'卄'), (0x303A, 'M', u'卅'), (0x303B, 'V'), (0x3040, 'X'), (0x3041, 'V'), (0x3097, 'X'), (0x3099, 'V'), (0x309B, '3', u' ゙'), (0x309C, '3', u' ゚'), (0x309D, 'V'), (0x309F, 'M', u'より'), (0x30A0, 'V'), (0x30FF, 'M', u'コト'), (0x3100, 'X'), (0x3105, 'V'), (0x312E, 'X'), (0x3131, 'M', u'ᄀ'), (0x3132, 'M', u'ᄁ'), (0x3133, 'M', u'ᆪ'), (0x3134, 'M', u'ᄂ'), (0x3135, 'M', u'ᆬ'), (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), (0x313C, 'M', u'ᆲ'), (0x313D, 'M', u'ᆳ'), (0x313E, 'M', u'ᆴ'), (0x313F, 'M', u'ᆵ'), (0x3140, 'M', u'ᄚ'), (0x3141, 'M', u'ᄆ'), (0x3142, 'M', u'ᄇ'), (0x3143, 'M', u'ᄈ'), (0x3144, 'M', u'ᄡ'), ] def _seg_29(): return [ (0x3145, 'M', u'ᄉ'), (0x3146, 'M', u'ᄊ'), (0x3147, 'M', u'ᄋ'), (0x3148, 'M', u'ᄌ'), (0x3149, 'M', u'ᄍ'), (0x314A, 'M', u'ᄎ'), (0x314B, 'M', u'ᄏ'), (0x314C, 'M', u'ᄐ'), (0x314D, 'M', u'ᄑ'), (0x314E, 'M', u'ᄒ'), (0x314F, 'M', u'ᅡ'), (0x3150, 'M', u'ᅢ'), (0x3151, 'M', u'ᅣ'), (0x3152, 'M', u'ᅤ'), (0x3153, 'M', u'ᅥ'), (0x3154, 'M', u'ᅦ'), (0x3155, 'M', u'ᅧ'), (0x3156, 'M', u'ᅨ'), (0x3157, 'M', u'ᅩ'), (0x3158, 'M', u'ᅪ'), (0x3159, 'M', u'ᅫ'), (0x315A, 'M', u'ᅬ'), (0x315B, 'M', u'ᅭ'), (0x315C, 'M', u'ᅮ'), (0x315D, 'M', u'ᅯ'), (0x315E, 'M', u'ᅰ'), (0x315F, 'M', u'ᅱ'), (0x3160, 'M', u'ᅲ'), (0x3161, 'M', u'ᅳ'), (0x3162, 'M', u'ᅴ'), (0x3163, 'M', u'ᅵ'), (0x3164, 'X'), (0x3165, 'M', u'ᄔ'), (0x3166, 'M', u'ᄕ'), (0x3167, 'M', u'ᇇ'), (0x3168, 'M', u'ᇈ'), (0x3169, 'M', u'ᇌ'), (0x316A, 'M', u'ᇎ'), (0x316B, 'M', u'ᇓ'), (0x316C, 'M', u'ᇗ'), (0x316D, 'M', u'ᇙ'), (0x316E, 'M', u'ᄜ'), (0x316F, 'M', u'ᇝ'), (0x3170, 'M', u'ᇟ'), (0x3171, 'M', u'ᄝ'), (0x3172, 'M', u'ᄞ'), (0x3173, 'M', u'ᄠ'), (0x3174, 'M', u'ᄢ'), (0x3175, 'M', u'ᄣ'), (0x3176, 'M', u'ᄧ'), (0x3177, 'M', u'ᄩ'), (0x3178, 'M', u'ᄫ'), (0x3179, 'M', u'ᄬ'), (0x317A, 'M', u'ᄭ'), (0x317B, 'M', u'ᄮ'), (0x317C, 'M', u'ᄯ'), (0x317D, 'M', u'ᄲ'), (0x317E, 'M', u'ᄶ'), (0x317F, 'M', u'ᅀ'), (0x3180, 'M', u'ᅇ'), (0x3181, 'M', u'ᅌ'), (0x3182, 'M', u'ᇱ'), (0x3183, 'M', u'ᇲ'), (0x3184, 'M', u'ᅗ'), (0x3185, 'M', u'ᅘ'), (0x3186, 'M', u'ᅙ'), (0x3187, 'M', u'ᆄ'), (0x3188, 'M', u'ᆅ'), (0x3189, 'M', u'ᆈ'), (0x318A, 'M', u'ᆑ'), (0x318B, 'M', u'ᆒ'), (0x318C, 'M', u'ᆔ'), (0x318D, 'M', u'ᆞ'), (0x318E, 'M', u'ᆡ'), (0x318F, 'X'), (0x3190, 'V'), (0x3192, 'M', u'一'), (0x3193, 'M', u'二'), (0x3194, 'M', u'三'), (0x3195, 'M', u'四'), (0x3196, 'M', u'上'), (0x3197, 'M', u'中'), (0x3198, 'M', u'下'), (0x3199, 'M', u'甲'), (0x319A, 'M', u'乙'), (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), (0x31BB, 'X'), (0x31C0, 'V'), (0x31E4, 'X'), (0x31F0, 'V'), (0x3200, '3', u'(ᄀ)'), (0x3201, '3', u'(ᄂ)'), (0x3202, '3', u'(ᄃ)'), (0x3203, '3', u'(ᄅ)'), (0x3204, '3', u'(ᄆ)'), ] def _seg_30(): return [ (0x3205, '3', u'(ᄇ)'), (0x3206, '3', u'(ᄉ)'), (0x3207, '3', u'(ᄋ)'), (0x3208, '3', u'(ᄌ)'), (0x3209, '3', u'(ᄎ)'), (0x320A, '3', u'(ᄏ)'), (0x320B, '3', u'(ᄐ)'), (0x320C, '3', u'(ᄑ)'), (0x320D, '3', u'(ᄒ)'), (0x320E, '3', u'(가)'), (0x320F, '3', u'(나)'), (0x3210, '3', u'(다)'), (0x3211, '3', u'(라)'), (0x3212, '3', u'(마)'), (0x3213, '3', u'(바)'), (0x3214, '3', u'(사)'), (0x3215, '3', u'(아)'), (0x3216, '3', u'(자)'), (0x3217, '3', u'(차)'), (0x3218, '3', u'(카)'), (0x3219, '3', u'(타)'), (0x321A, '3', u'(파)'), (0x321B, '3', u'(하)'), (0x321C, '3', u'(주)'), (0x321D, '3', u'(오전)'), (0x321E, '3', u'(오후)'), (0x321F, 'X'), (0x3220, '3', u'(一)'), (0x3221, '3', u'(二)'), (0x3222, '3', u'(三)'), (0x3223, '3', u'(四)'), (0x3224, '3', u'(五)'), (0x3225, '3', u'(六)'), (0x3226, '3', u'(七)'), (0x3227, '3', u'(八)'), (0x3228, '3', u'(九)'), (0x3229, '3', u'(十)'), (0x322A, '3', u'(月)'), (0x322B, '3', u'(火)'), (0x322C, '3', u'(水)'), (0x322D, '3', u'(木)'), (0x322E, '3', u'(金)'), (0x322F, '3', u'(土)'), (0x3230, '3', u'(日)'), (0x3231, '3', u'(株)'), (0x3232, '3', u'(有)'), (0x3233, '3', u'(社)'), (0x3234, '3', u'(名)'), (0x3235, '3', u'(特)'), (0x3236, '3', u'(財)'), (0x3237, '3', u'(祝)'), (0x3238, '3', u'(労)'), (0x3239, '3', u'(代)'), (0x323A, '3', u'(呼)'), (0x323B, '3', u'(学)'), (0x323C, '3', u'(監)'), (0x323D, '3', u'(企)'), (0x323E, '3', u'(資)'), (0x323F, '3', u'(協)'), (0x3240, '3', u'(祭)'), (0x3241, '3', u'(休)'), (0x3242, '3', u'(自)'), (0x3243, '3', u'(至)'), (0x3244, 'M', u'問'), (0x3245, 'M', u'幼'), (0x3246, 'M', u'文'), (0x3247, 'M', u'箏'), (0x3248, 'V'), (0x3250, 'M', u'pte'), (0x3251, 'M', u'21'), (0x3252, 'M', u'22'), (0x3253, 'M', u'23'), (0x3254, 'M', u'24'), (0x3255, 'M', u'25'), (0x3256, 'M', u'26'), (0x3257, 'M', u'27'), (0x3258, 'M', u'28'), (0x3259, 'M', u'29'), (0x325A, 'M', u'30'), (0x325B, 'M', u'31'), (0x325C, 'M', u'32'), (0x325D, 'M', u'33'), (0x325E, 'M', u'34'), (0x325F, 'M', u'35'), (0x3260, 'M', u'ᄀ'), (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), (0x3267, 'M', u'ᄋ'), (0x3268, 'M', u'ᄌ'), (0x3269, 'M', u'ᄎ'), (0x326A, 'M', u'ᄏ'), (0x326B, 'M', u'ᄐ'), (0x326C, 'M', u'ᄑ'), (0x326D, 'M', u'ᄒ'), (0x326E, 'M', u'가'), (0x326F, 'M', u'나'), ] def _seg_31(): return [ (0x3270, 'M', u'다'), (0x3271, 'M', u'라'), (0x3272, 'M', u'마'), (0x3273, 'M', u'바'), (0x3274, 'M', u'사'), (0x3275, 'M', u'아'), (0x3276, 'M', u'자'), (0x3277, 'M', u'차'), (0x3278, 'M', u'카'), (0x3279, 'M', u'타'), (0x327A, 'M', u'파'), (0x327B, 'M', u'하'), (0x327C, 'M', u'참고'), (0x327D, 'M', u'주의'), (0x327E, 'M', u'우'), (0x327F, 'V'), (0x3280, 'M', u'一'), (0x3281, 'M', u'二'), (0x3282, 'M', u'三'), (0x3283, 'M', u'四'), (0x3284, 'M', u'五'), (0x3285, 'M', u'六'), (0x3286, 'M', u'七'), (0x3287, 'M', u'八'), (0x3288, 'M', u'九'), (0x3289, 'M', u'十'), (0x328A, 'M', u'月'), (0x328B, 'M', u'火'), (0x328C, 'M', u'水'), (0x328D, 'M', u'木'), (0x328E, 'M', u'金'), (0x328F, 'M', u'土'), (0x3290, 'M', u'日'), (0x3291, 'M', u'株'), (0x3292, 'M', u'有'), (0x3293, 'M', u'社'), (0x3294, 'M', u'名'), (0x3295, 'M', u'特'), (0x3296, 'M', u'財'), (0x3297, 'M', u'祝'), (0x3298, 'M', u'労'), (0x3299, 'M', u'秘'), (0x329A, 'M', u'男'), (0x329B, 'M', u'女'), (0x329C, 'M', u'適'), (0x329D, 'M', u'優'), (0x329E, 'M', u'印'), (0x329F, 'M', u'注'), (0x32A0, 'M', u'項'), (0x32A1, 'M', u'休'), (0x32A2, 'M', u'写'), (0x32A3, 'M', u'正'), (0x32A4, 'M', u'上'), (0x32A5, 'M', u'中'), (0x32A6, 'M', u'下'), (0x32A7, 'M', u'左'), (0x32A8, 'M', u'右'), (0x32A9, 'M', u'医'), (0x32AA, 'M', u'宗'), (0x32AB, 'M', u'学'), (0x32AC, 'M', u'監'), (0x32AD, 'M', u'企'), (0x32AE, 'M', u'資'), (0x32AF, 'M', u'協'), (0x32B0, 'M', u'夜'), (0x32B1, 'M', u'36'), (0x32B2, 'M', u'37'), (0x32B3, 'M', u'38'), (0x32B4, 'M', u'39'), (0x32B5, 'M', u'40'), (0x32B6, 'M', u'41'), (0x32B7, 'M', u'42'), (0x32B8, 'M', u'43'), (0x32B9, 'M', u'44'), (0x32BA, 'M', u'45'), (0x32BB, 'M', u'46'), (0x32BC, 'M', u'47'), (0x32BD, 'M', u'48'), (0x32BE, 'M', u'49'), (0x32BF, 'M', u'50'), (0x32C0, 'M', u'1月'), (0x32C1, 'M', u'2月'), (0x32C2, 'M', u'3月'), (0x32C3, 'M', u'4月'), (0x32C4, 'M', u'5月'), (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), (0x32CB, 'M', u'12月'), (0x32CC, 'M', u'hg'), (0x32CD, 'M', u'erg'), (0x32CE, 'M', u'ev'), (0x32CF, 'M', u'ltd'), (0x32D0, 'M', u'ア'), (0x32D1, 'M', u'イ'), (0x32D2, 'M', u'ウ'), (0x32D3, 'M', u'エ'), ] def _seg_32(): return [ (0x32D4, 'M', u'オ'), (0x32D5, 'M', u'カ'), (0x32D6, 'M', u'キ'), (0x32D7, 'M', u'ク'), (0x32D8, 'M', u'ケ'), (0x32D9, 'M', u'コ'), (0x32DA, 'M', u'サ'), (0x32DB, 'M', u'シ'), (0x32DC, 'M', u'ス'), (0x32DD, 'M', u'セ'), (0x32DE, 'M', u'ソ'), (0x32DF, 'M', u'タ'), (0x32E0, 'M', u'チ'), (0x32E1, 'M', u'ツ'), (0x32E2, 'M', u'テ'), (0x32E3, 'M', u'ト'), (0x32E4, 'M', u'ナ'), (0x32E5, 'M', u'ニ'), (0x32E6, 'M', u'ヌ'), (0x32E7, 'M', u'ネ'), (0x32E8, 'M', u'ノ'), (0x32E9, 'M', u'ハ'), (0x32EA, 'M', u'ヒ'), (0x32EB, 'M', u'フ'), (0x32EC, 'M', u'ヘ'), (0x32ED, 'M', u'ホ'), (0x32EE, 'M', u'マ'), (0x32EF, 'M', u'ミ'), (0x32F0, 'M', u'ム'), (0x32F1, 'M', u'メ'), (0x32F2, 'M', u'モ'), (0x32F3, 'M', u'ヤ'), (0x32F4, 'M', u'ユ'), (0x32F5, 'M', u'ヨ'), (0x32F6, 'M', u'ラ'), (0x32F7, 'M', u'リ'), (0x32F8, 'M', u'ル'), (0x32F9, 'M', u'レ'), (0x32FA, 'M', u'ロ'), (0x32FB, 'M', u'ワ'), (0x32FC, 'M', u'ヰ'), (0x32FD, 'M', u'ヱ'), (0x32FE, 'M', u'ヲ'), (0x32FF, 'X'), (0x3300, 'M', u'アパート'), (0x3301, 'M', u'アルファ'), (0x3302, 'M', u'アンペア'), (0x3303, 'M', u'アール'), (0x3304, 'M', u'イニング'), (0x3305, 'M', u'インチ'), (0x3306, 'M', u'ウォン'), (0x3307, 'M', u'エスクード'), (0x3308, 'M', u'エーカー'), (0x3309, 'M', u'オンス'), (0x330A, 'M', u'オーム'), (0x330B, 'M', u'カイリ'), (0x330C, 'M', u'カラット'), (0x330D, 'M', u'カロリー'), (0x330E, 'M', u'ガロン'), (0x330F, 'M', u'ガンマ'), (0x3310, 'M', u'ギガ'), (0x3311, 'M', u'ギニー'), (0x3312, 'M', u'キュリー'), (0x3313, 'M', u'ギルダー'), (0x3314, 'M', u'キロ'), (0x3315, 'M', u'キログラム'), (0x3316, 'M', u'キロメートル'), (0x3317, 'M', u'キロワット'), (0x3318, 'M', u'グラム'), (0x3319, 'M', u'グラムトン'), (0x331A, 'M', u'クルゼイロ'), (0x331B, 'M', u'クローネ'), (0x331C, 'M', u'ケース'), (0x331D, 'M', u'コルナ'), (0x331E, 'M', u'コーポ'), (0x331F, 'M', u'サイクル'), (0x3320, 'M', u'サンチーム'), (0x3321, 'M', u'シリング'), (0x3322, 'M', u'センチ'), (0x3323, 'M', u'セント'), (0x3324, 'M', u'ダース'), (0x3325, 'M', u'デシ'), (0x3326, 'M', u'ドル'), (0x3327, 'M', u'トン'), (0x3328, 'M', u'ナノ'), (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), (0x332F, 'M', u'ピクル'), (0x3330, 'M', u'ピコ'), (0x3331, 'M', u'ビル'), (0x3332, 'M', u'ファラッド'), (0x3333, 'M', u'フィート'), (0x3334, 'M', u'ブッシェル'), (0x3335, 'M', u'フラン'), (0x3336, 'M', u'ヘクタール'), (0x3337, 'M', u'ペソ'), ] def _seg_33(): return [ (0x3338, 'M', u'ペニヒ'), (0x3339, 'M', u'ヘルツ'), (0x333A, 'M', u'ペンス'), (0x333B, 'M', u'ページ'), (0x333C, 'M', u'ベータ'), (0x333D, 'M', u'ポイント'), (0x333E, 'M', u'ボルト'), (0x333F, 'M', u'ホン'), (0x3340, 'M', u'ポンド'), (0x3341, 'M', u'ホール'), (0x3342, 'M', u'ホーン'), (0x3343, 'M', u'マイクロ'), (0x3344, 'M', u'マイル'), (0x3345, 'M', u'マッハ'), (0x3346, 'M', u'マルク'), (0x3347, 'M', u'マンション'), (0x3348, 'M', u'ミクロン'), (0x3349, 'M', u'ミリ'), (0x334A, 'M', u'ミリバール'), (0x334B, 'M', u'メガ'), (0x334C, 'M', u'メガトン'), (0x334D, 'M', u'メートル'), (0x334E, 'M', u'ヤード'), (0x334F, 'M', u'ヤール'), (0x3350, 'M', u'ユアン'), (0x3351, 'M', u'リットル'), (0x3352, 'M', u'リラ'), (0x3353, 'M', u'ルピー'), (0x3354, 'M', u'ルーブル'), (0x3355, 'M', u'レム'), (0x3356, 'M', u'レントゲン'), (0x3357, 'M', u'ワット'), (0x3358, 'M', u'0点'), (0x3359, 'M', u'1点'), (0x335A, 'M', u'2点'), (0x335B, 'M', u'3点'), (0x335C, 'M', u'4点'), (0x335D, 'M', u'5点'), (0x335E, 'M', u'6点'), (0x335F, 'M', u'7点'), (0x3360, 'M', u'8点'), (0x3361, 'M', u'9点'), (0x3362, 'M', u'10点'), (0x3363, 'M', u'11点'), (0x3364, 'M', u'12点'), (0x3365, 'M', u'13点'), (0x3366, 'M', u'14点'), (0x3367, 'M', u'15点'), (0x3368, 'M', u'16点'), (0x3369, 'M', u'17点'), (0x336A, 'M', u'18点'), (0x336B, 'M', u'19点'), (0x336C, 'M', u'20点'), (0x336D, 'M', u'21点'), (0x336E, 'M', u'22点'), (0x336F, 'M', u'23点'), (0x3370, 'M', u'24点'), (0x3371, 'M', u'hpa'), (0x3372, 'M', u'da'), (0x3373, 'M', u'au'), (0x3374, 'M', u'bar'), (0x3375, 'M', u'ov'), (0x3376, 'M', u'pc'), (0x3377, 'M', u'dm'), (0x3378, 'M', u'dm2'), (0x3379, 'M', u'dm3'), (0x337A, 'M', u'iu'), (0x337B, 'M', u'平成'), (0x337C, 'M', u'昭和'), (0x337D, 'M', u'大正'), (0x337E, 'M', u'明治'), (0x337F, 'M', u'株式会社'), (0x3380, 'M', u'pa'), (0x3381, 'M', u'na'), (0x3382, 'M', u'μa'), (0x3383, 'M', u'ma'), (0x3384, 'M', u'ka'), (0x3385, 'M', u'kb'), (0x3386, 'M', u'mb'), (0x3387, 'M', u'gb'), (0x3388, 'M', u'cal'), (0x3389, 'M', u'kcal'), (0x338A, 'M', u'pf'), (0x338B, 'M', u'nf'), (0x338C, 'M', u'μf'), (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), (0x3393, 'M', u'ghz'), (0x3394, 'M', u'thz'), (0x3395, 'M', u'μl'), (0x3396, 'M', u'ml'), (0x3397, 'M', u'dl'), (0x3398, 'M', u'kl'), (0x3399, 'M', u'fm'), (0x339A, 'M', u'nm'), (0x339B, 'M', u'μm'), ] def _seg_34(): return [ (0x339C, 'M', u'mm'), (0x339D, 'M', u'cm'), (0x339E, 'M', u'km'), (0x339F, 'M', u'mm2'), (0x33A0, 'M', u'cm2'), (0x33A1, 'M', u'm2'), (0x33A2, 'M', u'km2'), (0x33A3, 'M', u'mm3'), (0x33A4, 'M', u'cm3'), (0x33A5, 'M', u'm3'), (0x33A6, 'M', u'km3'), (0x33A7, 'M', u'm∕s'), (0x33A8, 'M', u'm∕s2'), (0x33A9, 'M', u'pa'), (0x33AA, 'M', u'kpa'), (0x33AB, 'M', u'mpa'), (0x33AC, 'M', u'gpa'), (0x33AD, 'M', u'rad'), (0x33AE, 'M', u'rad∕s'), (0x33AF, 'M', u'rad∕s2'), (0x33B0, 'M', u'ps'), (0x33B1, 'M', u'ns'), (0x33B2, 'M', u'μs'), (0x33B3, 'M', u'ms'), (0x33B4, 'M', u'pv'), (0x33B5, 'M', u'nv'), (0x33B6, 'M', u'μv'), (0x33B7, 'M', u'mv'), (0x33B8, 'M', u'kv'), (0x33B9, 'M', u'mv'), (0x33BA, 'M', u'pw'), (0x33BB, 'M', u'nw'), (0x33BC, 'M', u'μw'), (0x33BD, 'M', u'mw'), (0x33BE, 'M', u'kw'), (0x33BF, 'M', u'mw'), (0x33C0, 'M', u'kω'), (0x33C1, 'M', u'mω'), (0x33C2, 'X'), (0x33C3, 'M', u'bq'), (0x33C4, 'M', u'cc'), (0x33C5, 'M', u'cd'), (0x33C6, 'M', u'c∕kg'), (0x33C7, 'X'), (0x33C8, 'M', u'db'), (0x33C9, 'M', u'gy'), (0x33CA, 'M', u'ha'), (0x33CB, 'M', u'hp'), (0x33CC, 'M', u'in'), (0x33CD, 'M', u'kk'), (0x33CE, 'M', u'km'), (0x33CF, 'M', u'kt'), (0x33D0, 'M', u'lm'), (0x33D1, 'M', u'ln'), (0x33D2, 'M', u'log'), (0x33D3, 'M', u'lx'), (0x33D4, 'M', u'mb'), (0x33D5, 'M', u'mil'), (0x33D6, 'M', u'mol'), (0x33D7, 'M', u'ph'), (0x33D8, 'X'), (0x33D9, 'M', u'ppm'), (0x33DA, 'M', u'pr'), (0x33DB, 'M', u'sr'), (0x33DC, 'M', u'sv'), (0x33DD, 'M', u'wb'), (0x33DE, 'M', u'v∕m'), (0x33DF, 'M', u'a∕m'), (0x33E0, 'M', u'1日'), (0x33E1, 'M', u'2日'), (0x33E2, 'M', u'3日'), (0x33E3, 'M', u'4日'), (0x33E4, 'M', u'5日'), (0x33E5, 'M', u'6日'), (0x33E6, 'M', u'7日'), (0x33E7, 'M', u'8日'), (0x33E8, 'M', u'9日'), (0x33E9, 'M', u'10日'), (0x33EA, 'M', u'11日'), (0x33EB, 'M', u'12日'), (0x33EC, 'M', u'13日'), (0x33ED, 'M', u'14日'), (0x33EE, 'M', u'15日'), (0x33EF, 'M', u'16日'), (0x33F0, 'M', u'17日'), (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), (0x33F7, 'M', u'24日'), (0x33F8, 'M', u'25日'), (0x33F9, 'M', u'26日'), (0x33FA, 'M', u'27日'), (0x33FB, 'M', u'28日'), (0x33FC, 'M', u'29日'), (0x33FD, 'M', u'30日'), (0x33FE, 'M', u'31日'), (0x33FF, 'M', u'gal'), ] def _seg_35(): return [ (0x3400, 'V'), (0x4DB6, 'X'), (0x4DC0, 'V'), (0x9FCD, 'X'), (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), (0xA4D0, 'V'), (0xA62C, 'X'), (0xA640, 'M', u'ꙁ'), (0xA641, 'V'), (0xA642, 'M', u'ꙃ'), (0xA643, 'V'), (0xA644, 'M', u'ꙅ'), (0xA645, 'V'), (0xA646, 'M', u'ꙇ'), (0xA647, 'V'), (0xA648, 'M', u'ꙉ'), (0xA649, 'V'), (0xA64A, 'M', u'ꙋ'), (0xA64B, 'V'), (0xA64C, 'M', u'ꙍ'), (0xA64D, 'V'), (0xA64E, 'M', u'ꙏ'), (0xA64F, 'V'), (0xA650, 'M', u'ꙑ'), (0xA651, 'V'), (0xA652, 'M', u'ꙓ'), (0xA653, 'V'), (0xA654, 'M', u'ꙕ'), (0xA655, 'V'), (0xA656, 'M', u'ꙗ'), (0xA657, 'V'), (0xA658, 'M', u'ꙙ'), (0xA659, 'V'), (0xA65A, 'M', u'ꙛ'), (0xA65B, 'V'), (0xA65C, 'M', u'ꙝ'), (0xA65D, 'V'), (0xA65E, 'M', u'ꙟ'), (0xA65F, 'V'), (0xA660, 'M', u'ꙡ'), (0xA661, 'V'), (0xA662, 'M', u'ꙣ'), (0xA663, 'V'), (0xA664, 'M', u'ꙥ'), (0xA665, 'V'), (0xA666, 'M', u'ꙧ'), (0xA667, 'V'), (0xA668, 'M', u'ꙩ'), (0xA669, 'V'), (0xA66A, 'M', u'ꙫ'), (0xA66B, 'V'), (0xA66C, 'M', u'ꙭ'), (0xA66D, 'V'), (0xA680, 'M', u'ꚁ'), (0xA681, 'V'), (0xA682, 'M', u'ꚃ'), (0xA683, 'V'), (0xA684, 'M', u'ꚅ'), (0xA685, 'V'), (0xA686, 'M', u'ꚇ'), (0xA687, 'V'), (0xA688, 'M', u'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', u'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', u'ꚍ'), (0xA68D, 'V'), (0xA68E, 'M', u'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', u'ꚑ'), (0xA691, 'V'), (0xA692, 'M', u'ꚓ'), (0xA693, 'V'), (0xA694, 'M', u'ꚕ'), (0xA695, 'V'), (0xA696, 'M', u'ꚗ'), (0xA697, 'V'), (0xA698, 'X'), (0xA69F, 'V'), (0xA6F8, 'X'), (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), (0xA724, 'M', u'ꜥ'), (0xA725, 'V'), (0xA726, 'M', u'ꜧ'), (0xA727, 'V'), (0xA728, 'M', u'ꜩ'), (0xA729, 'V'), (0xA72A, 'M', u'ꜫ'), (0xA72B, 'V'), (0xA72C, 'M', u'ꜭ'), (0xA72D, 'V'), (0xA72E, 'M', u'ꜯ'), (0xA72F, 'V'), (0xA732, 'M', u'ꜳ'), (0xA733, 'V'), ] def _seg_36(): return [ (0xA734, 'M', u'ꜵ'), (0xA735, 'V'), (0xA736, 'M', u'ꜷ'), (0xA737, 'V'), (0xA738, 'M', u'ꜹ'), (0xA739, 'V'), (0xA73A, 'M', u'ꜻ'), (0xA73B, 'V'), (0xA73C, 'M', u'ꜽ'), (0xA73D, 'V'), (0xA73E, 'M', u'ꜿ'), (0xA73F, 'V'), (0xA740, 'M', u'ꝁ'), (0xA741, 'V'), (0xA742, 'M', u'ꝃ'), (0xA743, 'V'), (0xA744, 'M', u'ꝅ'), (0xA745, 'V'), (0xA746, 'M', u'ꝇ'), (0xA747, 'V'), (0xA748, 'M', u'ꝉ'), (0xA749, 'V'), (0xA74A, 'M', u'ꝋ'), (0xA74B, 'V'), (0xA74C, 'M', u'ꝍ'), (0xA74D, 'V'), (0xA74E, 'M', u'ꝏ'), (0xA74F, 'V'), (0xA750, 'M', u'ꝑ'), (0xA751, 'V'), (0xA752, 'M', u'ꝓ'), (0xA753, 'V'), (0xA754, 'M', u'ꝕ'), (0xA755, 'V'), (0xA756, 'M', u'ꝗ'), (0xA757, 'V'), (0xA758, 'M', u'ꝙ'), (0xA759, 'V'), (0xA75A, 'M', u'ꝛ'), (0xA75B, 'V'), (0xA75C, 'M', u'ꝝ'), (0xA75D, 'V'), (0xA75E, 'M', u'ꝟ'), (0xA75F, 'V'), (0xA760, 'M', u'ꝡ'), (0xA761, 'V'), (0xA762, 'M', u'ꝣ'), (0xA763, 'V'), (0xA764, 'M', u'ꝥ'), (0xA765, 'V'), (0xA766, 'M', u'ꝧ'), (0xA767, 'V'), (0xA768, 'M', u'ꝩ'), (0xA769, 'V'), (0xA76A, 'M', u'ꝫ'), (0xA76B, 'V'), (0xA76C, 'M', u'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', u'ꝯ'), (0xA76F, 'V'), (0xA770, 'M', u'ꝯ'), (0xA771, 'V'), (0xA779, 'M', u'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', u'ꝼ'), (0xA77C, 'V'), (0xA77D, 'M', u'ᵹ'), (0xA77E, 'M', u'ꝿ'), (0xA77F, 'V'), (0xA780, 'M', u'ꞁ'), (0xA781, 'V'), (0xA782, 'M', u'ꞃ'), (0xA783, 'V'), (0xA784, 'M', u'ꞅ'), (0xA785, 'V'), (0xA786, 'M', u'ꞇ'), (0xA787, 'V'), (0xA78B, 'M', u'ꞌ'), (0xA78C, 'V'), (0xA78D, 'M', u'ɥ'), (0xA78E, 'V'), (0xA78F, 'X'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA794, 'X'), (0xA7A0, 'M', u'ꞡ'), (0xA7A1, 'V'), (0xA7A2, 'M', u'ꞣ'), (0xA7A3, 'V'), (0xA7A4, 'M', u'ꞥ'), (0xA7A5, 'V'), (0xA7A6, 'M', u'ꞧ'), (0xA7A7, 'V'), (0xA7A8, 'M', u'ꞩ'), (0xA7A9, 'V'), (0xA7AA, 'M', u'ɦ'), (0xA7AB, 'X'), (0xA7F8, 'M', u'ħ'), ] def _seg_37(): return [ (0xA7F9, 'M', u'œ'), (0xA7FA, 'V'), (0xA82C, 'X'), (0xA830, 'V'), (0xA83A, 'X'), (0xA840, 'V'), (0xA878, 'X'), (0xA880, 'V'), (0xA8C5, 'X'), (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), (0xA8FC, 'X'), (0xA900, 'V'), (0xA954, 'X'), (0xA95F, 'V'), (0xA97D, 'X'), (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9E0, 'X'), (0xAA00, 'V'), (0xAA37, 'X'), (0xAA40, 'V'), (0xAA4E, 'X'), (0xAA50, 'V'), (0xAA5A, 'X'), (0xAA5C, 'V'), (0xAA7C, 'X'), (0xAA80, 'V'), (0xAAC3, 'X'), (0xAADB, 'V'), (0xAAF7, 'X'), (0xAB01, 'V'), (0xAB07, 'X'), (0xAB09, 'V'), (0xAB0F, 'X'), (0xAB11, 'V'), (0xAB17, 'X'), (0xAB20, 'V'), (0xAB27, 'X'), (0xAB28, 'V'), (0xAB2F, 'X'), (0xABC0, 'V'), (0xABEE, 'X'), (0xABF0, 'V'), (0xABFA, 'X'), (0xAC00, 'V'), (0xD7A4, 'X'), (0xD7B0, 'V'), (0xD7C7, 'X'), (0xD7CB, 'V'), (0xD7FC, 'X'), (0xF900, 'M', u'豈'), (0xF901, 'M', u'更'), (0xF902, 'M', u'車'), (0xF903, 'M', u'賈'), (0xF904, 'M', u'滑'), (0xF905, 'M', u'串'), (0xF906, 'M', u'句'), (0xF907, 'M', u'龜'), (0xF909, 'M', u'契'), (0xF90A, 'M', u'金'), (0xF90B, 'M', u'喇'), (0xF90C, 'M', u'奈'), (0xF90D, 'M', u'懶'), (0xF90E, 'M', u'癩'), (0xF90F, 'M', u'羅'), (0xF910, 'M', u'蘿'), (0xF911, 'M', u'螺'), (0xF912, 'M', u'裸'), (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), (0xF919, 'M', u'酪'), (0xF91A, 'M', u'駱'), (0xF91B, 'M', u'亂'), (0xF91C, 'M', u'卵'), (0xF91D, 'M', u'欄'), (0xF91E, 'M', u'爛'), (0xF91F, 'M', u'蘭'), (0xF920, 'M', u'鸞'), (0xF921, 'M', u'嵐'), (0xF922, 'M', u'濫'), (0xF923, 'M', u'藍'), (0xF924, 'M', u'襤'), (0xF925, 'M', u'拉'), (0xF926, 'M', u'臘'), (0xF927, 'M', u'蠟'), (0xF928, 'M', u'廊'), (0xF929, 'M', u'朗'), (0xF92A, 'M', u'浪'), (0xF92B, 'M', u'狼'), (0xF92C, 'M', u'郎'), (0xF92D, 'M', u'來'), ] def _seg_38(): return [ (0xF92E, 'M', u'冷'), (0xF92F, 'M', u'勞'), (0xF930, 'M', u'擄'), (0xF931, 'M', u'櫓'), (0xF932, 'M', u'爐'), (0xF933, 'M', u'盧'), (0xF934, 'M', u'老'), (0xF935, 'M', u'蘆'), (0xF936, 'M', u'虜'), (0xF937, 'M', u'路'), (0xF938, 'M', u'露'), (0xF939, 'M', u'魯'), (0xF93A, 'M', u'鷺'), (0xF93B, 'M', u'碌'), (0xF93C, 'M', u'祿'), (0xF93D, 'M', u'綠'), (0xF93E, 'M', u'菉'), (0xF93F, 'M', u'錄'), (0xF940, 'M', u'鹿'), (0xF941, 'M', u'論'), (0xF942, 'M', u'壟'), (0xF943, 'M', u'弄'), (0xF944, 'M', u'籠'), (0xF945, 'M', u'聾'), (0xF946, 'M', u'牢'), (0xF947, 'M', u'磊'), (0xF948, 'M', u'賂'), (0xF949, 'M', u'雷'), (0xF94A, 'M', u'壘'), (0xF94B, 'M', u'屢'), (0xF94C, 'M', u'樓'), (0xF94D, 'M', u'淚'), (0xF94E, 'M', u'漏'), (0xF94F, 'M', u'累'), (0xF950, 'M', u'縷'), (0xF951, 'M', u'陋'), (0xF952, 'M', u'勒'), (0xF953, 'M', u'肋'), (0xF954, 'M', u'凜'), (0xF955, 'M', u'凌'), (0xF956, 'M', u'稜'), (0xF957, 'M', u'綾'), (0xF958, 'M', u'菱'), (0xF959, 'M', u'陵'), (0xF95A, 'M', u'讀'), (0xF95B, 'M', u'拏'), (0xF95C, 'M', u'樂'), (0xF95D, 'M', u'諾'), (0xF95E, 'M', u'丹'), (0xF95F, 'M', u'寧'), (0xF960, 'M', u'怒'), (0xF961, 'M', u'率'), (0xF962, 'M', u'異'), (0xF963, 'M', u'北'), (0xF964, 'M', u'磻'), (0xF965, 'M', u'便'), (0xF966, 'M', u'復'), (0xF967, 'M', u'不'), (0xF968, 'M', u'泌'), (0xF969, 'M', u'數'), (0xF96A, 'M', u'索'), (0xF96B, 'M', u'參'), (0xF96C, 'M', u'塞'), (0xF96D, 'M', u'省'), (0xF96E, 'M', u'葉'), (0xF96F, 'M', u'說'), (0xF970, 'M', u'殺'), (0xF971, 'M', u'辰'), (0xF972, 'M', u'沈'), (0xF973, 'M', u'拾'), (0xF974, 'M', u'若'), (0xF975, 'M', u'掠'), (0xF976, 'M', u'略'), (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), (0xF97D, 'M', u'諒'), (0xF97E, 'M', u'量'), (0xF97F, 'M', u'勵'), (0xF980, 'M', u'呂'), (0xF981, 'M', u'女'), (0xF982, 'M', u'廬'), (0xF983, 'M', u'旅'), (0xF984, 'M', u'濾'), (0xF985, 'M', u'礪'), (0xF986, 'M', u'閭'), (0xF987, 'M', u'驪'), (0xF988, 'M', u'麗'), (0xF989, 'M', u'黎'), (0xF98A, 'M', u'力'), (0xF98B, 'M', u'曆'), (0xF98C, 'M', u'歷'), (0xF98D, 'M', u'轢'), (0xF98E, 'M', u'年'), (0xF98F, 'M', u'憐'), (0xF990, 'M', u'戀'), (0xF991, 'M', u'撚'), ] def _seg_39(): return [ (0xF992, 'M', u'漣'), (0xF993, 'M', u'煉'), (0xF994, 'M', u'璉'), (0xF995, 'M', u'秊'), (0xF996, 'M', u'練'), (0xF997, 'M', u'聯'), (0xF998, 'M', u'輦'), (0xF999, 'M', u'蓮'), (0xF99A, 'M', u'連'), (0xF99B, 'M', u'鍊'), (0xF99C, 'M', u'列'), (0xF99D, 'M', u'劣'), (0xF99E, 'M', u'咽'), (0xF99F, 'M', u'烈'), (0xF9A0, 'M', u'裂'), (0xF9A1, 'M', u'說'), (0xF9A2, 'M', u'廉'), (0xF9A3, 'M', u'念'), (0xF9A4, 'M', u'捻'), (0xF9A5, 'M', u'殮'), (0xF9A6, 'M', u'簾'), (0xF9A7, 'M', u'獵'), (0xF9A8, 'M', u'令'), (0xF9A9, 'M', u'囹'), (0xF9AA, 'M', u'寧'), (0xF9AB, 'M', u'嶺'), (0xF9AC, 'M', u'怜'), (0xF9AD, 'M', u'玲'), (0xF9AE, 'M', u'瑩'), (0xF9AF, 'M', u'羚'), (0xF9B0, 'M', u'聆'), (0xF9B1, 'M', u'鈴'), (0xF9B2, 'M', u'零'), (0xF9B3, 'M', u'靈'), (0xF9B4, 'M', u'領'), (0xF9B5, 'M', u'例'), (0xF9B6, 'M', u'禮'), (0xF9B7, 'M', u'醴'), (0xF9B8, 'M', u'隸'), (0xF9B9, 'M', u'惡'), (0xF9BA, 'M', u'了'), (0xF9BB, 'M', u'僚'), (0xF9BC, 'M', u'寮'), (0xF9BD, 'M', u'尿'), (0xF9BE, 'M', u'料'), (0xF9BF, 'M', u'樂'), (0xF9C0, 'M', u'燎'), (0xF9C1, 'M', u'療'), (0xF9C2, 'M', u'蓼'), (0xF9C3, 'M', u'遼'), (0xF9C4, 'M', u'龍'), (0xF9C5, 'M', u'暈'), (0xF9C6, 'M', u'阮'), (0xF9C7, 'M', u'劉'), (0xF9C8, 'M', u'杻'), (0xF9C9, 'M', u'柳'), (0xF9CA, 'M', u'流'), (0xF9CB, 'M', u'溜'), (0xF9CC, 'M', u'琉'), (0xF9CD, 'M', u'留'), (0xF9CE, 'M', u'硫'), (0xF9CF, 'M', u'紐'), (0xF9D0, 'M', u'類'), (0xF9D1, 'M', u'六'), (0xF9D2, 'M', u'戮'), (0xF9D3, 'M', u'陸'), (0xF9D4, 'M', u'倫'), (0xF9D5, 'M', u'崙'), (0xF9D6, 'M', u'淪'), (0xF9D7, 'M', u'輪'), (0xF9D8, 'M', u'律'), (0xF9D9, 'M', u'慄'), (0xF9DA, 'M', u'栗'), (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), (0xF9E1, 'M', u'李'), (0xF9E2, 'M', u'梨'), (0xF9E3, 'M', u'泥'), (0xF9E4, 'M', u'理'), (0xF9E5, 'M', u'痢'), (0xF9E6, 'M', u'罹'), (0xF9E7, 'M', u'裏'), (0xF9E8, 'M', u'裡'), (0xF9E9, 'M', u'里'), (0xF9EA, 'M', u'離'), (0xF9EB, 'M', u'匿'), (0xF9EC, 'M', u'溺'), (0xF9ED, 'M', u'吝'), (0xF9EE, 'M', u'燐'), (0xF9EF, 'M', u'璘'), (0xF9F0, 'M', u'藺'), (0xF9F1, 'M', u'隣'), (0xF9F2, 'M', u'鱗'), (0xF9F3, 'M', u'麟'), (0xF9F4, 'M', u'林'), (0xF9F5, 'M', u'淋'), ] def _seg_40(): return [ (0xF9F6, 'M', u'臨'), (0xF9F7, 'M', u'立'), (0xF9F8, 'M', u'笠'), (0xF9F9, 'M', u'粒'), (0xF9FA, 'M', u'狀'), (0xF9FB, 'M', u'炙'), (0xF9FC, 'M', u'識'), (0xF9FD, 'M', u'什'), (0xF9FE, 'M', u'茶'), (0xF9FF, 'M', u'刺'), (0xFA00, 'M', u'切'), (0xFA01, 'M', u'度'), (0xFA02, 'M', u'拓'), (0xFA03, 'M', u'糖'), (0xFA04, 'M', u'宅'), (0xFA05, 'M', u'洞'), (0xFA06, 'M', u'暴'), (0xFA07, 'M', u'輻'), (0xFA08, 'M', u'行'), (0xFA09, 'M', u'降'), (0xFA0A, 'M', u'見'), (0xFA0B, 'M', u'廓'), (0xFA0C, 'M', u'兀'), (0xFA0D, 'M', u'嗀'), (0xFA0E, 'V'), (0xFA10, 'M', u'塚'), (0xFA11, 'V'), (0xFA12, 'M', u'晴'), (0xFA13, 'V'), (0xFA15, 'M', u'凞'), (0xFA16, 'M', u'猪'), (0xFA17, 'M', u'益'), (0xFA18, 'M', u'礼'), (0xFA19, 'M', u'神'), (0xFA1A, 'M', u'祥'), (0xFA1B, 'M', u'福'), (0xFA1C, 'M', u'靖'), (0xFA1D, 'M', u'精'), (0xFA1E, 'M', u'羽'), (0xFA1F, 'V'), (0xFA20, 'M', u'蘒'), (0xFA21, 'V'), (0xFA22, 'M', u'諸'), (0xFA23, 'V'), (0xFA25, 'M', u'逸'), (0xFA26, 'M', u'都'), (0xFA27, 'V'), (0xFA2A, 'M', u'飯'), (0xFA2B, 'M', u'飼'), (0xFA2C, 'M', u'館'), (0xFA2D, 'M', u'鶴'), (0xFA2E, 'M', u'郞'), (0xFA2F, 'M', u'隷'), (0xFA30, 'M', u'侮'), (0xFA31, 'M', u'僧'), (0xFA32, 'M', u'免'), (0xFA33, 'M', u'勉'), (0xFA34, 'M', u'勤'), (0xFA35, 'M', u'卑'), (0xFA36, 'M', u'喝'), (0xFA37, 'M', u'嘆'), (0xFA38, 'M', u'器'), (0xFA39, 'M', u'塀'), (0xFA3A, 'M', u'墨'), (0xFA3B, 'M', u'層'), (0xFA3C, 'M', u'屮'), (0xFA3D, 'M', u'悔'), (0xFA3E, 'M', u'慨'), (0xFA3F, 'M', u'憎'), (0xFA40, 'M', u'懲'), (0xFA41, 'M', u'敏'), (0xFA42, 'M', u'既'), (0xFA43, 'M', u'暑'), (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), (0xFA4A, 'M', u'琢'), (0xFA4B, 'M', u'碑'), (0xFA4C, 'M', u'社'), (0xFA4D, 'M', u'祉'), (0xFA4E, 'M', u'祈'), (0xFA4F, 'M', u'祐'), (0xFA50, 'M', u'祖'), (0xFA51, 'M', u'祝'), (0xFA52, 'M', u'禍'), (0xFA53, 'M', u'禎'), (0xFA54, 'M', u'穀'), (0xFA55, 'M', u'突'), (0xFA56, 'M', u'節'), (0xFA57, 'M', u'練'), (0xFA58, 'M', u'縉'), (0xFA59, 'M', u'繁'), (0xFA5A, 'M', u'署'), (0xFA5B, 'M', u'者'), (0xFA5C, 'M', u'臭'), (0xFA5D, 'M', u'艹'), (0xFA5F, 'M', u'著'), ] def _seg_41(): return [ (0xFA60, 'M', u'褐'), (0xFA61, 'M', u'視'), (0xFA62, 'M', u'謁'), (0xFA63, 'M', u'謹'), (0xFA64, 'M', u'賓'), (0xFA65, 'M', u'贈'), (0xFA66, 'M', u'辶'), (0xFA67, 'M', u'逸'), (0xFA68, 'M', u'難'), (0xFA69, 'M', u'響'), (0xFA6A, 'M', u'頻'), (0xFA6B, 'M', u'恵'), (0xFA6C, 'M', u'𤋮'), (0xFA6D, 'M', u'舘'), (0xFA6E, 'X'), (0xFA70, 'M', u'並'), (0xFA71, 'M', u'况'), (0xFA72, 'M', u'全'), (0xFA73, 'M', u'侀'), (0xFA74, 'M', u'充'), (0xFA75, 'M', u'冀'), (0xFA76, 'M', u'勇'), (0xFA77, 'M', u'勺'), (0xFA78, 'M', u'喝'), (0xFA79, 'M', u'啕'), (0xFA7A, 'M', u'喙'), (0xFA7B, 'M', u'嗢'), (0xFA7C, 'M', u'塚'), (0xFA7D, 'M', u'墳'), (0xFA7E, 'M', u'奄'), (0xFA7F, 'M', u'奔'), (0xFA80, 'M', u'婢'), (0xFA81, 'M', u'嬨'), (0xFA82, 'M', u'廒'), (0xFA83, 'M', u'廙'), (0xFA84, 'M', u'彩'), (0xFA85, 'M', u'徭'), (0xFA86, 'M', u'惘'), (0xFA87, 'M', u'慎'), (0xFA88, 'M', u'愈'), (0xFA89, 'M', u'憎'), (0xFA8A, 'M', u'慠'), (0xFA8B, 'M', u'懲'), (0xFA8C, 'M', u'戴'), (0xFA8D, 'M', u'揄'), (0xFA8E, 'M', u'搜'), (0xFA8F, 'M', u'摒'), (0xFA90, 'M', u'敖'), (0xFA91, 'M', u'晴'), (0xFA92, 'M', u'朗'), (0xFA93, 'M', u'望'), (0xFA94, 'M', u'杖'), (0xFA95, 'M', u'歹'), (0xFA96, 'M', u'殺'), (0xFA97, 'M', u'流'), (0xFA98, 'M', u'滛'), (0xFA99, 'M', u'滋'), (0xFA9A, 'M', u'漢'), (0xFA9B, 'M', u'瀞'), (0xFA9C, 'M', u'煮'), (0xFA9D, 'M', u'瞧'), (0xFA9E, 'M', u'爵'), (0xFA9F, 'M', u'犯'), (0xFAA0, 'M', u'猪'), (0xFAA1, 'M', u'瑱'), (0xFAA2, 'M', u'甆'), (0xFAA3, 'M', u'画'), (0xFAA4, 'M', u'瘝'), (0xFAA5, 'M', u'瘟'), (0xFAA6, 'M', u'益'), (0xFAA7, 'M', u'盛'), (0xFAA8, 'M', u'直'), (0xFAA9, 'M', u'睊'), (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), (0xFAB0, 'M', u'練'), (0xFAB1, 'M', u'缾'), (0xFAB2, 'M', u'者'), (0xFAB3, 'M', u'荒'), (0xFAB4, 'M', u'華'), (0xFAB5, 'M', u'蝹'), (0xFAB6, 'M', u'襁'), (0xFAB7, 'M', u'覆'), (0xFAB8, 'M', u'視'), (0xFAB9, 'M', u'調'), (0xFABA, 'M', u'諸'), (0xFABB, 'M', u'請'), (0xFABC, 'M', u'謁'), (0xFABD, 'M', u'諾'), (0xFABE, 'M', u'諭'), (0xFABF, 'M', u'謹'), (0xFAC0, 'M', u'變'), (0xFAC1, 'M', u'贈'), (0xFAC2, 'M', u'輸'), (0xFAC3, 'M', u'遲'), (0xFAC4, 'M', u'醙'), ] def _seg_42(): return [ (0xFAC5, 'M', u'鉶'), (0xFAC6, 'M', u'陼'), (0xFAC7, 'M', u'難'), (0xFAC8, 'M', u'靖'), (0xFAC9, 'M', u'韛'), (0xFACA, 'M', u'響'), (0xFACB, 'M', u'頋'), (0xFACC, 'M', u'頻'), (0xFACD, 'M', u'鬒'), (0xFACE, 'M', u'龜'), (0xFACF, 'M', u'𢡊'), (0xFAD0, 'M', u'𢡄'), (0xFAD1, 'M', u'𣏕'), (0xFAD2, 'M', u'㮝'), (0xFAD3, 'M', u'䀘'), (0xFAD4, 'M', u'䀹'), (0xFAD5, 'M', u'𥉉'), (0xFAD6, 'M', u'𥳐'), (0xFAD7, 'M', u'𧻓'), (0xFAD8, 'M', u'齃'), (0xFAD9, 'M', u'龎'), (0xFADA, 'X'), (0xFB00, 'M', u'ff'), (0xFB01, 'M', u'fi'), (0xFB02, 'M', u'fl'), (0xFB03, 'M', u'ffi'), (0xFB04, 'M', u'ffl'), (0xFB05, 'M', u'st'), (0xFB07, 'X'), (0xFB13, 'M', u'մն'), (0xFB14, 'M', u'մե'), (0xFB15, 'M', u'մի'), (0xFB16, 'M', u'վն'), (0xFB17, 'M', u'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', u'יִ'), (0xFB1E, 'V'), (0xFB1F, 'M', u'ײַ'), (0xFB20, 'M', u'ע'), (0xFB21, 'M', u'א'), (0xFB22, 'M', u'ד'), (0xFB23, 'M', u'ה'), (0xFB24, 'M', u'כ'), (0xFB25, 'M', u'ל'), (0xFB26, 'M', u'ם'), (0xFB27, 'M', u'ר'), (0xFB28, 'M', u'ת'), (0xFB29, '3', u'+'), (0xFB2A, 'M', u'שׁ'), (0xFB2B, 'M', u'שׂ'), (0xFB2C, 'M', u'שּׁ'), (0xFB2D, 'M', u'שּׂ'), (0xFB2E, 'M', u'אַ'), (0xFB2F, 'M', u'אָ'), (0xFB30, 'M', u'אּ'), (0xFB31, 'M', u'בּ'), (0xFB32, 'M', u'גּ'), (0xFB33, 'M', u'דּ'), (0xFB34, 'M', u'הּ'), (0xFB35, 'M', u'וּ'), (0xFB36, 'M', u'זּ'), (0xFB37, 'X'), (0xFB38, 'M', u'טּ'), (0xFB39, 'M', u'יּ'), (0xFB3A, 'M', u'ךּ'), (0xFB3B, 'M', u'כּ'), (0xFB3C, 'M', u'לּ'), (0xFB3D, 'X'), (0xFB3E, 'M', u'מּ'), (0xFB3F, 'X'), (0xFB40, 'M', u'נּ'), (0xFB41, 'M', u'סּ'), (0xFB42, 'X'), (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), (0xFB49, 'M', u'שּ'), (0xFB4A, 'M', u'תּ'), (0xFB4B, 'M', u'וֹ'), (0xFB4C, 'M', u'בֿ'), (0xFB4D, 'M', u'כֿ'), (0xFB4E, 'M', u'פֿ'), (0xFB4F, 'M', u'אל'), (0xFB50, 'M', u'ٱ'), (0xFB52, 'M', u'ٻ'), (0xFB56, 'M', u'پ'), (0xFB5A, 'M', u'ڀ'), (0xFB5E, 'M', u'ٺ'), (0xFB62, 'M', u'ٿ'), (0xFB66, 'M', u'ٹ'), (0xFB6A, 'M', u'ڤ'), (0xFB6E, 'M', u'ڦ'), (0xFB72, 'M', u'ڄ'), (0xFB76, 'M', u'ڃ'), (0xFB7A, 'M', u'چ'), (0xFB7E, 'M', u'ڇ'), (0xFB82, 'M', u'ڍ'), ] def _seg_43(): return [ (0xFB84, 'M', u'ڌ'), (0xFB86, 'M', u'ڎ'), (0xFB88, 'M', u'ڈ'), (0xFB8A, 'M', u'ژ'), (0xFB8C, 'M', u'ڑ'), (0xFB8E, 'M', u'ک'), (0xFB92, 'M', u'گ'), (0xFB96, 'M', u'ڳ'), (0xFB9A, 'M', u'ڱ'), (0xFB9E, 'M', u'ں'), (0xFBA0, 'M', u'ڻ'), (0xFBA4, 'M', u'ۀ'), (0xFBA6, 'M', u'ہ'), (0xFBAA, 'M', u'ھ'), (0xFBAE, 'M', u'ے'), (0xFBB0, 'M', u'ۓ'), (0xFBB2, 'V'), (0xFBC2, 'X'), (0xFBD3, 'M', u'ڭ'), (0xFBD7, 'M', u'ۇ'), (0xFBD9, 'M', u'ۆ'), (0xFBDB, 'M', u'ۈ'), (0xFBDD, 'M', u'ۇٴ'), (0xFBDE, 'M', u'ۋ'), (0xFBE0, 'M', u'ۅ'), (0xFBE2, 'M', u'ۉ'), (0xFBE4, 'M', u'ې'), (0xFBE8, 'M', u'ى'), (0xFBEA, 'M', u'ئا'), (0xFBEC, 'M', u'ئە'), (0xFBEE, 'M', u'ئو'), (0xFBF0, 'M', u'ئۇ'), (0xFBF2, 'M', u'ئۆ'), (0xFBF4, 'M', u'ئۈ'), (0xFBF6, 'M', u'ئې'), (0xFBF9, 'M', u'ئى'), (0xFBFC, 'M', u'ی'), (0xFC00, 'M', u'ئج'), (0xFC01, 'M', u'ئح'), (0xFC02, 'M', u'ئم'), (0xFC03, 'M', u'ئى'), (0xFC04, 'M', u'ئي'), (0xFC05, 'M', u'بج'), (0xFC06, 'M', u'بح'), (0xFC07, 'M', u'بخ'), (0xFC08, 'M', u'بم'), (0xFC09, 'M', u'بى'), (0xFC0A, 'M', u'بي'), (0xFC0B, 'M', u'تج'), (0xFC0C, 'M', u'تح'), (0xFC0D, 'M', u'تخ'), (0xFC0E, 'M', u'تم'), (0xFC0F, 'M', u'تى'), (0xFC10, 'M', u'تي'), (0xFC11, 'M', u'ثج'), (0xFC12, 'M', u'ثم'), (0xFC13, 'M', u'ثى'), (0xFC14, 'M', u'ثي'), (0xFC15, 'M', u'جح'), (0xFC16, 'M', u'جم'), (0xFC17, 'M', u'حج'), (0xFC18, 'M', u'حم'), (0xFC19, 'M', u'خج'), (0xFC1A, 'M', u'خح'), (0xFC1B, 'M', u'خم'), (0xFC1C, 'M', u'سج'), (0xFC1D, 'M', u'سح'), (0xFC1E, 'M', u'سخ'), (0xFC1F, 'M', u'سم'), (0xFC20, 'M', u'صح'), (0xFC21, 'M', u'صم'), (0xFC22, 'M', u'ضج'), (0xFC23, 'M', u'ضح'), (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), (0xFC2A, 'M', u'عم'), (0xFC2B, 'M', u'غج'), (0xFC2C, 'M', u'غم'), (0xFC2D, 'M', u'فج'), (0xFC2E, 'M', u'فح'), (0xFC2F, 'M', u'فخ'), (0xFC30, 'M', u'فم'), (0xFC31, 'M', u'فى'), (0xFC32, 'M', u'في'), (0xFC33, 'M', u'قح'), (0xFC34, 'M', u'قم'), (0xFC35, 'M', u'قى'), (0xFC36, 'M', u'قي'), (0xFC37, 'M', u'كا'), (0xFC38, 'M', u'كج'), (0xFC39, 'M', u'كح'), (0xFC3A, 'M', u'كخ'), (0xFC3B, 'M', u'كل'), (0xFC3C, 'M', u'كم'), (0xFC3D, 'M', u'كى'), (0xFC3E, 'M', u'كي'), ] def _seg_44(): return [ (0xFC3F, 'M', u'لج'), (0xFC40, 'M', u'لح'), (0xFC41, 'M', u'لخ'), (0xFC42, 'M', u'لم'), (0xFC43, 'M', u'لى'), (0xFC44, 'M', u'لي'), (0xFC45, 'M', u'مج'), (0xFC46, 'M', u'مح'), (0xFC47, 'M', u'مخ'), (0xFC48, 'M', u'مم'), (0xFC49, 'M', u'مى'), (0xFC4A, 'M', u'مي'), (0xFC4B, 'M', u'نج'), (0xFC4C, 'M', u'نح'), (0xFC4D, 'M', u'نخ'), (0xFC4E, 'M', u'نم'), (0xFC4F, 'M', u'نى'), (0xFC50, 'M', u'ني'), (0xFC51, 'M', u'هج'), (0xFC52, 'M', u'هم'), (0xFC53, 'M', u'هى'), (0xFC54, 'M', u'هي'), (0xFC55, 'M', u'يج'), (0xFC56, 'M', u'يح'), (0xFC57, 'M', u'يخ'), (0xFC58, 'M', u'يم'), (0xFC59, 'M', u'يى'), (0xFC5A, 'M', u'يي'), (0xFC5B, 'M', u'ذٰ'), (0xFC5C, 'M', u'رٰ'), (0xFC5D, 'M', u'ىٰ'), (0xFC5E, '3', u' ٌّ'), (0xFC5F, '3', u' ٍّ'), (0xFC60, '3', u' َّ'), (0xFC61, '3', u' ُّ'), (0xFC62, '3', u' ِّ'), (0xFC63, '3', u' ّٰ'), (0xFC64, 'M', u'ئر'), (0xFC65, 'M', u'ئز'), (0xFC66, 'M', u'ئم'), (0xFC67, 'M', u'ئن'), (0xFC68, 'M', u'ئى'), (0xFC69, 'M', u'ئي'), (0xFC6A, 'M', u'بر'), (0xFC6B, 'M', u'بز'), (0xFC6C, 'M', u'بم'), (0xFC6D, 'M', u'بن'), (0xFC6E, 'M', u'بى'), (0xFC6F, 'M', u'بي'), (0xFC70, 'M', u'تر'), (0xFC71, 'M', u'تز'), (0xFC72, 'M', u'تم'), (0xFC73, 'M', u'تن'), (0xFC74, 'M', u'تى'), (0xFC75, 'M', u'تي'), (0xFC76, 'M', u'ثر'), (0xFC77, 'M', u'ثز'), (0xFC78, 'M', u'ثم'), (0xFC79, 'M', u'ثن'), (0xFC7A, 'M', u'ثى'), (0xFC7B, 'M', u'ثي'), (0xFC7C, 'M', u'فى'), (0xFC7D, 'M', u'في'), (0xFC7E, 'M', u'قى'), (0xFC7F, 'M', u'قي'), (0xFC80, 'M', u'كا'), (0xFC81, 'M', u'كل'), (0xFC82, 'M', u'كم'), (0xFC83, 'M', u'كى'), (0xFC84, 'M', u'كي'), (0xFC85, 'M', u'لم'), (0xFC86, 'M', u'لى'), (0xFC87, 'M', u'لي'), (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), (0xFC8E, 'M', u'نى'), (0xFC8F, 'M', u'ني'), (0xFC90, 'M', u'ىٰ'), (0xFC91, 'M', u'ير'), (0xFC92, 'M', u'يز'), (0xFC93, 'M', u'يم'), (0xFC94, 'M', u'ين'), (0xFC95, 'M', u'يى'), (0xFC96, 'M', u'يي'), (0xFC97, 'M', u'ئج'), (0xFC98, 'M', u'ئح'), (0xFC99, 'M', u'ئخ'), (0xFC9A, 'M', u'ئم'), (0xFC9B, 'M', u'ئه'), (0xFC9C, 'M', u'بج'), (0xFC9D, 'M', u'بح'), (0xFC9E, 'M', u'بخ'), (0xFC9F, 'M', u'بم'), (0xFCA0, 'M', u'به'), (0xFCA1, 'M', u'تج'), (0xFCA2, 'M', u'تح'), ] def _seg_45(): return [ (0xFCA3, 'M', u'تخ'), (0xFCA4, 'M', u'تم'), (0xFCA5, 'M', u'ته'), (0xFCA6, 'M', u'ثم'), (0xFCA7, 'M', u'جح'), (0xFCA8, 'M', u'جم'), (0xFCA9, 'M', u'حج'), (0xFCAA, 'M', u'حم'), (0xFCAB, 'M', u'خج'), (0xFCAC, 'M', u'خم'), (0xFCAD, 'M', u'سج'), (0xFCAE, 'M', u'سح'), (0xFCAF, 'M', u'سخ'), (0xFCB0, 'M', u'سم'), (0xFCB1, 'M', u'صح'), (0xFCB2, 'M', u'صخ'), (0xFCB3, 'M', u'صم'), (0xFCB4, 'M', u'ضج'), (0xFCB5, 'M', u'ضح'), (0xFCB6, 'M', u'ضخ'), (0xFCB7, 'M', u'ضم'), (0xFCB8, 'M', u'طح'), (0xFCB9, 'M', u'ظم'), (0xFCBA, 'M', u'عج'), (0xFCBB, 'M', u'عم'), (0xFCBC, 'M', u'غج'), (0xFCBD, 'M', u'غم'), (0xFCBE, 'M', u'فج'), (0xFCBF, 'M', u'فح'), (0xFCC0, 'M', u'فخ'), (0xFCC1, 'M', u'فم'), (0xFCC2, 'M', u'قح'), (0xFCC3, 'M', u'قم'), (0xFCC4, 'M', u'كج'), (0xFCC5, 'M', u'كح'), (0xFCC6, 'M', u'كخ'), (0xFCC7, 'M', u'كل'), (0xFCC8, 'M', u'كم'), (0xFCC9, 'M', u'لج'), (0xFCCA, 'M', u'لح'), (0xFCCB, 'M', u'لخ'), (0xFCCC, 'M', u'لم'), (0xFCCD, 'M', u'له'), (0xFCCE, 'M', u'مج'), (0xFCCF, 'M', u'مح'), (0xFCD0, 'M', u'مخ'), (0xFCD1, 'M', u'مم'), (0xFCD2, 'M', u'نج'), (0xFCD3, 'M', u'نح'), (0xFCD4, 'M', u'نخ'), (0xFCD5, 'M', u'نم'), (0xFCD6, 'M', u'نه'), (0xFCD7, 'M', u'هج'), (0xFCD8, 'M', u'هم'), (0xFCD9, 'M', u'هٰ'), (0xFCDA, 'M', u'يج'), (0xFCDB, 'M', u'يح'), (0xFCDC, 'M', u'يخ'), (0xFCDD, 'M', u'يم'), (0xFCDE, 'M', u'يه'), (0xFCDF, 'M', u'ئم'), (0xFCE0, 'M', u'ئه'), (0xFCE1, 'M', u'بم'), (0xFCE2, 'M', u'به'), (0xFCE3, 'M', u'تم'), (0xFCE4, 'M', u'ته'), (0xFCE5, 'M', u'ثم'), (0xFCE6, 'M', u'ثه'), (0xFCE7, 'M', u'سم'), (0xFCE8, 'M', u'سه'), (0xFCE9, 'M', u'شم'), (0xFCEA, 'M', u'شه'), (0xFCEB, 'M', u'كل'), (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), (0xFCF2, 'M', u'ـَّ'), (0xFCF3, 'M', u'ـُّ'), (0xFCF4, 'M', u'ـِّ'), (0xFCF5, 'M', u'طى'), (0xFCF6, 'M', u'طي'), (0xFCF7, 'M', u'عى'), (0xFCF8, 'M', u'عي'), (0xFCF9, 'M', u'غى'), (0xFCFA, 'M', u'غي'), (0xFCFB, 'M', u'سى'), (0xFCFC, 'M', u'سي'), (0xFCFD, 'M', u'شى'), (0xFCFE, 'M', u'شي'), (0xFCFF, 'M', u'حى'), (0xFD00, 'M', u'حي'), (0xFD01, 'M', u'جى'), (0xFD02, 'M', u'جي'), (0xFD03, 'M', u'خى'), (0xFD04, 'M', u'خي'), (0xFD05, 'M', u'صى'), (0xFD06, 'M', u'صي'), ] def _seg_46(): return [ (0xFD07, 'M', u'ضى'), (0xFD08, 'M', u'ضي'), (0xFD09, 'M', u'شج'), (0xFD0A, 'M', u'شح'), (0xFD0B, 'M', u'شخ'), (0xFD0C, 'M', u'شم'), (0xFD0D, 'M', u'شر'), (0xFD0E, 'M', u'سر'), (0xFD0F, 'M', u'صر'), (0xFD10, 'M', u'ضر'), (0xFD11, 'M', u'طى'), (0xFD12, 'M', u'طي'), (0xFD13, 'M', u'عى'), (0xFD14, 'M', u'عي'), (0xFD15, 'M', u'غى'), (0xFD16, 'M', u'غي'), (0xFD17, 'M', u'سى'), (0xFD18, 'M', u'سي'), (0xFD19, 'M', u'شى'), (0xFD1A, 'M', u'شي'), (0xFD1B, 'M', u'حى'), (0xFD1C, 'M', u'حي'), (0xFD1D, 'M', u'جى'), (0xFD1E, 'M', u'جي'), (0xFD1F, 'M', u'خى'), (0xFD20, 'M', u'خي'), (0xFD21, 'M', u'صى'), (0xFD22, 'M', u'صي'), (0xFD23, 'M', u'ضى'), (0xFD24, 'M', u'ضي'), (0xFD25, 'M', u'شج'), (0xFD26, 'M', u'شح'), (0xFD27, 'M', u'شخ'), (0xFD28, 'M', u'شم'), (0xFD29, 'M', u'شر'), (0xFD2A, 'M', u'سر'), (0xFD2B, 'M', u'صر'), (0xFD2C, 'M', u'ضر'), (0xFD2D, 'M', u'شج'), (0xFD2E, 'M', u'شح'), (0xFD2F, 'M', u'شخ'), (0xFD30, 'M', u'شم'), (0xFD31, 'M', u'سه'), (0xFD32, 'M', u'شه'), (0xFD33, 'M', u'طم'), (0xFD34, 'M', u'سج'), (0xFD35, 'M', u'سح'), (0xFD36, 'M', u'سخ'), (0xFD37, 'M', u'شج'), (0xFD38, 'M', u'شح'), (0xFD39, 'M', u'شخ'), (0xFD3A, 'M', u'طم'), (0xFD3B, 'M', u'ظم'), (0xFD3C, 'M', u'اً'), (0xFD3E, 'V'), (0xFD40, 'X'), (0xFD50, 'M', u'تجم'), (0xFD51, 'M', u'تحج'), (0xFD53, 'M', u'تحم'), (0xFD54, 'M', u'تخم'), (0xFD55, 'M', u'تمج'), (0xFD56, 'M', u'تمح'), (0xFD57, 'M', u'تمخ'), (0xFD58, 'M', u'جمح'), (0xFD5A, 'M', u'حمي'), (0xFD5B, 'M', u'حمى'), (0xFD5C, 'M', u'سحج'), (0xFD5D, 'M', u'سجح'), (0xFD5E, 'M', u'سجى'), (0xFD5F, 'M', u'سمح'), (0xFD61, 'M', u'سمج'), (0xFD62, 'M', u'سمم'), (0xFD64, 'M', u'صحح'), (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), (0xFD6F, 'M', u'ضخم'), (0xFD71, 'M', u'طمح'), (0xFD73, 'M', u'طمم'), (0xFD74, 'M', u'طمي'), (0xFD75, 'M', u'عجم'), (0xFD76, 'M', u'عمم'), (0xFD78, 'M', u'عمى'), (0xFD79, 'M', u'غمم'), (0xFD7A, 'M', u'غمي'), (0xFD7B, 'M', u'غمى'), (0xFD7C, 'M', u'فخم'), (0xFD7E, 'M', u'قمح'), (0xFD7F, 'M', u'قمم'), (0xFD80, 'M', u'لحم'), (0xFD81, 'M', u'لحي'), (0xFD82, 'M', u'لحى'), (0xFD83, 'M', u'لجج'), (0xFD85, 'M', u'لخم'), (0xFD87, 'M', u'لمح'), (0xFD89, 'M', u'محج'), (0xFD8A, 'M', u'محم'), ] def _seg_47(): return [ (0xFD8B, 'M', u'محي'), (0xFD8C, 'M', u'مجح'), (0xFD8D, 'M', u'مجم'), (0xFD8E, 'M', u'مخج'), (0xFD8F, 'M', u'مخم'), (0xFD90, 'X'), (0xFD92, 'M', u'مجخ'), (0xFD93, 'M', u'همج'), (0xFD94, 'M', u'همم'), (0xFD95, 'M', u'نحم'), (0xFD96, 'M', u'نحى'), (0xFD97, 'M', u'نجم'), (0xFD99, 'M', u'نجى'), (0xFD9A, 'M', u'نمي'), (0xFD9B, 'M', u'نمى'), (0xFD9C, 'M', u'يمم'), (0xFD9E, 'M', u'بخي'), (0xFD9F, 'M', u'تجي'), (0xFDA0, 'M', u'تجى'), (0xFDA1, 'M', u'تخي'), (0xFDA2, 'M', u'تخى'), (0xFDA3, 'M', u'تمي'), (0xFDA4, 'M', u'تمى'), (0xFDA5, 'M', u'جمي'), (0xFDA6, 'M', u'جحى'), (0xFDA7, 'M', u'جمى'), (0xFDA8, 'M', u'سخى'), (0xFDA9, 'M', u'صحي'), (0xFDAA, 'M', u'شحي'), (0xFDAB, 'M', u'ضحي'), (0xFDAC, 'M', u'لجي'), (0xFDAD, 'M', u'لمي'), (0xFDAE, 'M', u'يحي'), (0xFDAF, 'M', u'يجي'), (0xFDB0, 'M', u'يمي'), (0xFDB1, 'M', u'ممي'), (0xFDB2, 'M', u'قمي'), (0xFDB3, 'M', u'نحي'), (0xFDB4, 'M', u'قمح'), (0xFDB5, 'M', u'لحم'), (0xFDB6, 'M', u'عمي'), (0xFDB7, 'M', u'كمي'), (0xFDB8, 'M', u'نجح'), (0xFDB9, 'M', u'مخي'), (0xFDBA, 'M', u'لجم'), (0xFDBB, 'M', u'كمم'), (0xFDBC, 'M', u'لجم'), (0xFDBD, 'M', u'نجح'), (0xFDBE, 'M', u'جحي'), (0xFDBF, 'M', u'حجي'), (0xFDC0, 'M', u'مجي'), (0xFDC1, 'M', u'فمي'), (0xFDC2, 'M', u'بحي'), (0xFDC3, 'M', u'كمم'), (0xFDC4, 'M', u'عجم'), (0xFDC5, 'M', u'صمم'), (0xFDC6, 'M', u'سخي'), (0xFDC7, 'M', u'نجي'), (0xFDC8, 'X'), (0xFDF0, 'M', u'صلے'), (0xFDF1, 'M', u'قلے'), (0xFDF2, 'M', u'الله'), (0xFDF3, 'M', u'اكبر'), (0xFDF4, 'M', u'محمد'), (0xFDF5, 'M', u'صلعم'), (0xFDF6, 'M', u'رسول'), (0xFDF7, 'M', u'عليه'), (0xFDF8, 'M', u'وسلم'), (0xFDF9, 'M', u'صلى'), (0xFDFA, '3', u'صلى الله عليه وسلم'), (0xFDFB, '3', u'جل جلاله'), (0xFDFC, 'M', u'ریال'), (0xFDFD, 'V'), (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), (0xFE14, '3', u';'), (0xFE15, '3', u'!'), (0xFE16, '3', u'?'), (0xFE17, 'M', u'〖'), (0xFE18, 'M', u'〗'), (0xFE19, 'X'), (0xFE20, 'V'), (0xFE27, 'X'), (0xFE31, 'M', u'—'), (0xFE32, 'M', u'–'), (0xFE33, '3', u'_'), (0xFE35, '3', u'('), (0xFE36, '3', u')'), (0xFE37, '3', u'{'), (0xFE38, '3', u'}'), (0xFE39, 'M', u'〔'), (0xFE3A, 'M', u'〕'), (0xFE3B, 'M', u'【'), (0xFE3C, 'M', u'】'), (0xFE3D, 'M', u'《'), (0xFE3E, 'M', u'》'), ] def _seg_48(): return [ (0xFE3F, 'M', u'〈'), (0xFE40, 'M', u'〉'), (0xFE41, 'M', u'「'), (0xFE42, 'M', u'」'), (0xFE43, 'M', u'『'), (0xFE44, 'M', u'』'), (0xFE45, 'V'), (0xFE47, '3', u'['), (0xFE48, '3', u']'), (0xFE49, '3', u' ̅'), (0xFE4D, '3', u'_'), (0xFE50, '3', u','), (0xFE51, 'M', u'、'), (0xFE52, 'X'), (0xFE54, '3', u';'), (0xFE55, '3', u':'), (0xFE56, '3', u'?'), (0xFE57, '3', u'!'), (0xFE58, 'M', u'—'), (0xFE59, '3', u'('), (0xFE5A, '3', u')'), (0xFE5B, '3', u'{'), (0xFE5C, '3', u'}'), (0xFE5D, 'M', u'〔'), (0xFE5E, 'M', u'〕'), (0xFE5F, '3', u'#'), (0xFE60, '3', u'&'), (0xFE61, '3', u'*'), (0xFE62, '3', u'+'), (0xFE63, 'M', u'-'), (0xFE64, '3', u'<'), (0xFE65, '3', u'>'), (0xFE66, '3', u'='), (0xFE67, 'X'), (0xFE68, '3', u'\\'), (0xFE69, '3', u'$'), (0xFE6A, '3', u'%'), (0xFE6B, '3', u'@'), (0xFE6C, 'X'), (0xFE70, '3', u' ً'), (0xFE71, 'M', u'ـً'), (0xFE72, '3', u' ٌ'), (0xFE73, 'V'), (0xFE74, '3', u' ٍ'), (0xFE75, 'X'), (0xFE76, '3', u' َ'), (0xFE77, 'M', u'ـَ'), (0xFE78, '3', u' ُ'), (0xFE79, 'M', u'ـُ'), (0xFE7A, '3', u' ِ'), (0xFE7B, 'M', u'ـِ'), (0xFE7C, '3', u' ّ'), (0xFE7D, 'M', u'ـّ'), (0xFE7E, '3', u' ْ'), (0xFE7F, 'M', u'ـْ'), (0xFE80, 'M', u'ء'), (0xFE81, 'M', u'آ'), (0xFE83, 'M', u'أ'), (0xFE85, 'M', u'ؤ'), (0xFE87, 'M', u'إ'), (0xFE89, 'M', u'ئ'), (0xFE8D, 'M', u'ا'), (0xFE8F, 'M', u'ب'), (0xFE93, 'M', u'ة'), (0xFE95, 'M', u'ت'), (0xFE99, 'M', u'ث'), (0xFE9D, 'M', u'ج'), (0xFEA1, 'M', u'ح'), (0xFEA5, 'M', u'خ'), (0xFEA9, 'M', u'د'), (0xFEAB, 'M', u'ذ'), (0xFEAD, 'M', u'ر'), (0xFEAF, 'M', u'ز'), (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), (0xFEC9, 'M', u'ع'), (0xFECD, 'M', u'غ'), (0xFED1, 'M', u'ف'), (0xFED5, 'M', u'ق'), (0xFED9, 'M', u'ك'), (0xFEDD, 'M', u'ل'), (0xFEE1, 'M', u'م'), (0xFEE5, 'M', u'ن'), (0xFEE9, 'M', u'ه'), (0xFEED, 'M', u'و'), (0xFEEF, 'M', u'ى'), (0xFEF1, 'M', u'ي'), (0xFEF5, 'M', u'لآ'), (0xFEF7, 'M', u'لأ'), (0xFEF9, 'M', u'لإ'), (0xFEFB, 'M', u'لا'), (0xFEFD, 'X'), (0xFEFF, 'I'), (0xFF00, 'X'), (0xFF01, '3', u'!'), (0xFF02, '3', u'"'), ] def _seg_49(): return [ (0xFF03, '3', u'#'), (0xFF04, '3', u'$'), (0xFF05, '3', u'%'), (0xFF06, '3', u'&'), (0xFF07, '3', u'\''), (0xFF08, '3', u'('), (0xFF09, '3', u')'), (0xFF0A, '3', u'*'), (0xFF0B, '3', u'+'), (0xFF0C, '3', u','), (0xFF0D, 'M', u'-'), (0xFF0E, 'M', u'.'), (0xFF0F, '3', u'/'), (0xFF10, 'M', u'0'), (0xFF11, 'M', u'1'), (0xFF12, 'M', u'2'), (0xFF13, 'M', u'3'), (0xFF14, 'M', u'4'), (0xFF15, 'M', u'5'), (0xFF16, 'M', u'6'), (0xFF17, 'M', u'7'), (0xFF18, 'M', u'8'), (0xFF19, 'M', u'9'), (0xFF1A, '3', u':'), (0xFF1B, '3', u';'), (0xFF1C, '3', u'<'), (0xFF1D, '3', u'='), (0xFF1E, '3', u'>'), (0xFF1F, '3', u'?'), (0xFF20, '3', u'@'), (0xFF21, 'M', u'a'), (0xFF22, 'M', u'b'), (0xFF23, 'M', u'c'), (0xFF24, 'M', u'd'), (0xFF25, 'M', u'e'), (0xFF26, 'M', u'f'), (0xFF27, 'M', u'g'), (0xFF28, 'M', u'h'), (0xFF29, 'M', u'i'), (0xFF2A, 'M', u'j'), (0xFF2B, 'M', u'k'), (0xFF2C, 'M', u'l'), (0xFF2D, 'M', u'm'), (0xFF2E, 'M', u'n'), (0xFF2F, 'M', u'o'), (0xFF30, 'M', u'p'), (0xFF31, 'M', u'q'), (0xFF32, 'M', u'r'), (0xFF33, 'M', u's'), (0xFF34, 'M', u't'), (0xFF35, 'M', u'u'), (0xFF36, 'M', u'v'), (0xFF37, 'M', u'w'), (0xFF38, 'M', u'x'), (0xFF39, 'M', u'y'), (0xFF3A, 'M', u'z'), (0xFF3B, '3', u'['), (0xFF3C, '3', u'\\'), (0xFF3D, '3', u']'), (0xFF3E, '3', u'^'), (0xFF3F, '3', u'_'), (0xFF40, '3', u'`'), (0xFF41, 'M', u'a'), (0xFF42, 'M', u'b'), (0xFF43, 'M', u'c'), (0xFF44, 'M', u'd'), (0xFF45, 'M', u'e'), (0xFF46, 'M', u'f'), (0xFF47, 'M', u'g'), (0xFF48, 'M', u'h'), (0xFF49, 'M', u'i'), (0xFF4A, 'M', u'j'), (0xFF4B, 'M', u'k'), (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), (0xFF52, 'M', u'r'), (0xFF53, 'M', u's'), (0xFF54, 'M', u't'), (0xFF55, 'M', u'u'), (0xFF56, 'M', u'v'), (0xFF57, 'M', u'w'), (0xFF58, 'M', u'x'), (0xFF59, 'M', u'y'), (0xFF5A, 'M', u'z'), (0xFF5B, '3', u'{'), (0xFF5C, '3', u'|'), (0xFF5D, '3', u'}'), (0xFF5E, '3', u'~'), (0xFF5F, 'M', u'⦅'), (0xFF60, 'M', u'⦆'), (0xFF61, 'M', u'.'), (0xFF62, 'M', u'「'), (0xFF63, 'M', u'」'), (0xFF64, 'M', u'、'), (0xFF65, 'M', u'・'), (0xFF66, 'M', u'ヲ'), ] def _seg_50(): return [ (0xFF67, 'M', u'ァ'), (0xFF68, 'M', u'ィ'), (0xFF69, 'M', u'ゥ'), (0xFF6A, 'M', u'ェ'), (0xFF6B, 'M', u'ォ'), (0xFF6C, 'M', u'ャ'), (0xFF6D, 'M', u'ュ'), (0xFF6E, 'M', u'ョ'), (0xFF6F, 'M', u'ッ'), (0xFF70, 'M', u'ー'), (0xFF71, 'M', u'ア'), (0xFF72, 'M', u'イ'), (0xFF73, 'M', u'ウ'), (0xFF74, 'M', u'エ'), (0xFF75, 'M', u'オ'), (0xFF76, 'M', u'カ'), (0xFF77, 'M', u'キ'), (0xFF78, 'M', u'ク'), (0xFF79, 'M', u'ケ'), (0xFF7A, 'M', u'コ'), (0xFF7B, 'M', u'サ'), (0xFF7C, 'M', u'シ'), (0xFF7D, 'M', u'ス'), (0xFF7E, 'M', u'セ'), (0xFF7F, 'M', u'ソ'), (0xFF80, 'M', u'タ'), (0xFF81, 'M', u'チ'), (0xFF82, 'M', u'ツ'), (0xFF83, 'M', u'テ'), (0xFF84, 'M', u'ト'), (0xFF85, 'M', u'ナ'), (0xFF86, 'M', u'ニ'), (0xFF87, 'M', u'ヌ'), (0xFF88, 'M', u'ネ'), (0xFF89, 'M', u'ノ'), (0xFF8A, 'M', u'ハ'), (0xFF8B, 'M', u'ヒ'), (0xFF8C, 'M', u'フ'), (0xFF8D, 'M', u'ヘ'), (0xFF8E, 'M', u'ホ'), (0xFF8F, 'M', u'マ'), (0xFF90, 'M', u'ミ'), (0xFF91, 'M', u'ム'), (0xFF92, 'M', u'メ'), (0xFF93, 'M', u'モ'), (0xFF94, 'M', u'ヤ'), (0xFF95, 'M', u'ユ'), (0xFF96, 'M', u'ヨ'), (0xFF97, 'M', u'ラ'), (0xFF98, 'M', u'リ'), (0xFF99, 'M', u'ル'), (0xFF9A, 'M', u'レ'), (0xFF9B, 'M', u'ロ'), (0xFF9C, 'M', u'ワ'), (0xFF9D, 'M', u'ン'), (0xFF9E, 'M', u'゙'), (0xFF9F, 'M', u'゚'), (0xFFA0, 'X'), (0xFFA1, 'M', u'ᄀ'), (0xFFA2, 'M', u'ᄁ'), (0xFFA3, 'M', u'ᆪ'), (0xFFA4, 'M', u'ᄂ'), (0xFFA5, 'M', u'ᆬ'), (0xFFA6, 'M', u'ᆭ'), (0xFFA7, 'M', u'ᄃ'), (0xFFA8, 'M', u'ᄄ'), (0xFFA9, 'M', u'ᄅ'), (0xFFAA, 'M', u'ᆰ'), (0xFFAB, 'M', u'ᆱ'), (0xFFAC, 'M', u'ᆲ'), (0xFFAD, 'M', u'ᆳ'), (0xFFAE, 'M', u'ᆴ'), (0xFFAF, 'M', u'ᆵ'), (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), (0xFFB6, 'M', u'ᄊ'), (0xFFB7, 'M', u'ᄋ'), (0xFFB8, 'M', u'ᄌ'), (0xFFB9, 'M', u'ᄍ'), (0xFFBA, 'M', u'ᄎ'), (0xFFBB, 'M', u'ᄏ'), (0xFFBC, 'M', u'ᄐ'), (0xFFBD, 'M', u'ᄑ'), (0xFFBE, 'M', u'ᄒ'), (0xFFBF, 'X'), (0xFFC2, 'M', u'ᅡ'), (0xFFC3, 'M', u'ᅢ'), (0xFFC4, 'M', u'ᅣ'), (0xFFC5, 'M', u'ᅤ'), (0xFFC6, 'M', u'ᅥ'), (0xFFC7, 'M', u'ᅦ'), (0xFFC8, 'X'), (0xFFCA, 'M', u'ᅧ'), (0xFFCB, 'M', u'ᅨ'), (0xFFCC, 'M', u'ᅩ'), (0xFFCD, 'M', u'ᅪ'), ] def _seg_51(): return [ (0xFFCE, 'M', u'ᅫ'), (0xFFCF, 'M', u'ᅬ'), (0xFFD0, 'X'), (0xFFD2, 'M', u'ᅭ'), (0xFFD3, 'M', u'ᅮ'), (0xFFD4, 'M', u'ᅯ'), (0xFFD5, 'M', u'ᅰ'), (0xFFD6, 'M', u'ᅱ'), (0xFFD7, 'M', u'ᅲ'), (0xFFD8, 'X'), (0xFFDA, 'M', u'ᅳ'), (0xFFDB, 'M', u'ᅴ'), (0xFFDC, 'M', u'ᅵ'), (0xFFDD, 'X'), (0xFFE0, 'M', u'¢'), (0xFFE1, 'M', u'£'), (0xFFE2, 'M', u'¬'), (0xFFE3, '3', u' ̄'), (0xFFE4, 'M', u'¦'), (0xFFE5, 'M', u'¥'), (0xFFE6, 'M', u'₩'), (0xFFE7, 'X'), (0xFFE8, 'M', u'│'), (0xFFE9, 'M', u'←'), (0xFFEA, 'M', u'↑'), (0xFFEB, 'M', u'→'), (0xFFEC, 'M', u'↓'), (0xFFED, 'M', u'■'), (0xFFEE, 'M', u'○'), (0xFFEF, 'X'), (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), (0x1003C, 'V'), (0x1003E, 'X'), (0x1003F, 'V'), (0x1004E, 'X'), (0x10050, 'V'), (0x1005E, 'X'), (0x10080, 'V'), (0x100FB, 'X'), (0x10100, 'V'), (0x10103, 'X'), (0x10107, 'V'), (0x10134, 'X'), (0x10137, 'V'), (0x1018B, 'X'), (0x10190, 'V'), (0x1019C, 'X'), (0x101D0, 'V'), (0x101FE, 'X'), (0x10280, 'V'), (0x1029D, 'X'), (0x102A0, 'V'), (0x102D1, 'X'), (0x10300, 'V'), (0x1031F, 'X'), (0x10320, 'V'), (0x10324, 'X'), (0x10330, 'V'), (0x1034B, 'X'), (0x10380, 'V'), (0x1039E, 'X'), (0x1039F, 'V'), (0x103C4, 'X'), (0x103C8, 'V'), (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), (0x10405, 'M', u'𐐭'), (0x10406, 'M', u'𐐮'), (0x10407, 'M', u'𐐯'), (0x10408, 'M', u'𐐰'), (0x10409, 'M', u'𐐱'), (0x1040A, 'M', u'𐐲'), (0x1040B, 'M', u'𐐳'), (0x1040C, 'M', u'𐐴'), (0x1040D, 'M', u'𐐵'), (0x1040E, 'M', u'𐐶'), (0x1040F, 'M', u'𐐷'), (0x10410, 'M', u'𐐸'), (0x10411, 'M', u'𐐹'), (0x10412, 'M', u'𐐺'), (0x10413, 'M', u'𐐻'), (0x10414, 'M', u'𐐼'), (0x10415, 'M', u'𐐽'), (0x10416, 'M', u'𐐾'), (0x10417, 'M', u'𐐿'), (0x10418, 'M', u'𐑀'), (0x10419, 'M', u'𐑁'), (0x1041A, 'M', u'𐑂'), (0x1041B, 'M', u'𐑃'), (0x1041C, 'M', u'𐑄'), (0x1041D, 'M', u'𐑅'), ] def _seg_52(): return [ (0x1041E, 'M', u'𐑆'), (0x1041F, 'M', u'𐑇'), (0x10420, 'M', u'𐑈'), (0x10421, 'M', u'𐑉'), (0x10422, 'M', u'𐑊'), (0x10423, 'M', u'𐑋'), (0x10424, 'M', u'𐑌'), (0x10425, 'M', u'𐑍'), (0x10426, 'M', u'𐑎'), (0x10427, 'M', u'𐑏'), (0x10428, 'V'), (0x1049E, 'X'), (0x104A0, 'V'), (0x104AA, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), (0x10809, 'X'), (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), (0x1083F, 'V'), (0x10856, 'X'), (0x10857, 'V'), (0x10860, 'X'), (0x10900, 'V'), (0x1091C, 'X'), (0x1091F, 'V'), (0x1093A, 'X'), (0x1093F, 'V'), (0x10940, 'X'), (0x10980, 'V'), (0x109B8, 'X'), (0x109BE, 'V'), (0x109C0, 'X'), (0x10A00, 'V'), (0x10A04, 'X'), (0x10A05, 'V'), (0x10A07, 'X'), (0x10A0C, 'V'), (0x10A14, 'X'), (0x10A15, 'V'), (0x10A18, 'X'), (0x10A19, 'V'), (0x10A34, 'X'), (0x10A38, 'V'), (0x10A3B, 'X'), (0x10A3F, 'V'), (0x10A48, 'X'), (0x10A50, 'V'), (0x10A59, 'X'), (0x10A60, 'V'), (0x10A80, 'X'), (0x10B00, 'V'), (0x10B36, 'X'), (0x10B39, 'V'), (0x10B56, 'X'), (0x10B58, 'V'), (0x10B73, 'X'), (0x10B78, 'V'), (0x10B80, 'X'), (0x10C00, 'V'), (0x10C49, 'X'), (0x10E60, 'V'), (0x10E7F, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), (0x11070, 'X'), (0x11080, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), (0x110C2, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), (0x110FA, 'X'), (0x11100, 'V'), (0x11135, 'X'), (0x11136, 'V'), (0x11144, 'X'), (0x11180, 'V'), (0x111C9, 'X'), (0x111D0, 'V'), (0x111DA, 'X'), (0x11680, 'V'), (0x116B8, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x12000, 'V'), (0x1236F, 'X'), (0x12400, 'V'), (0x12463, 'X'), (0x12470, 'V'), (0x12474, 'X'), (0x13000, 'V'), (0x1342F, 'X'), ] def _seg_53(): return [ (0x16800, 'V'), (0x16A39, 'X'), (0x16F00, 'V'), (0x16F45, 'X'), (0x16F50, 'V'), (0x16F7F, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x1B000, 'V'), (0x1B002, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), (0x1D127, 'X'), (0x1D129, 'V'), (0x1D15E, 'M', u'𝅗𝅥'), (0x1D15F, 'M', u'𝅘𝅥'), (0x1D160, 'M', u'𝅘𝅥𝅮'), (0x1D161, 'M', u'𝅘𝅥𝅯'), (0x1D162, 'M', u'𝅘𝅥𝅰'), (0x1D163, 'M', u'𝅘𝅥𝅱'), (0x1D164, 'M', u'𝅘𝅥𝅲'), (0x1D165, 'V'), (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', u'𝆹𝅥'), (0x1D1BC, 'M', u'𝆺𝅥'), (0x1D1BD, 'M', u'𝆹𝅥𝅮'), (0x1D1BE, 'M', u'𝆺𝅥𝅮'), (0x1D1BF, 'M', u'𝆹𝅥𝅯'), (0x1D1C0, 'M', u'𝆺𝅥𝅯'), (0x1D1C1, 'V'), (0x1D1DE, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), (0x1D300, 'V'), (0x1D357, 'X'), (0x1D360, 'V'), (0x1D372, 'X'), (0x1D400, 'M', u'a'), (0x1D401, 'M', u'b'), (0x1D402, 'M', u'c'), (0x1D403, 'M', u'd'), (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), (0x1D406, 'M', u'g'), (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), (0x1D409, 'M', u'j'), (0x1D40A, 'M', u'k'), (0x1D40B, 'M', u'l'), (0x1D40C, 'M', u'm'), (0x1D40D, 'M', u'n'), (0x1D40E, 'M', u'o'), (0x1D40F, 'M', u'p'), (0x1D410, 'M', u'q'), (0x1D411, 'M', u'r'), (0x1D412, 'M', u's'), (0x1D413, 'M', u't'), (0x1D414, 'M', u'u'), (0x1D415, 'M', u'v'), (0x1D416, 'M', u'w'), (0x1D417, 'M', u'x'), (0x1D418, 'M', u'y'), (0x1D419, 'M', u'z'), (0x1D41A, 'M', u'a'), (0x1D41B, 'M', u'b'), (0x1D41C, 'M', u'c'), (0x1D41D, 'M', u'd'), (0x1D41E, 'M', u'e'), (0x1D41F, 'M', u'f'), (0x1D420, 'M', u'g'), (0x1D421, 'M', u'h'), (0x1D422, 'M', u'i'), (0x1D423, 'M', u'j'), (0x1D424, 'M', u'k'), (0x1D425, 'M', u'l'), (0x1D426, 'M', u'm'), (0x1D427, 'M', u'n'), (0x1D428, 'M', u'o'), (0x1D429, 'M', u'p'), (0x1D42A, 'M', u'q'), (0x1D42B, 'M', u'r'), (0x1D42C, 'M', u's'), (0x1D42D, 'M', u't'), (0x1D42E, 'M', u'u'), (0x1D42F, 'M', u'v'), (0x1D430, 'M', u'w'), (0x1D431, 'M', u'x'), (0x1D432, 'M', u'y'), (0x1D433, 'M', u'z'), (0x1D434, 'M', u'a'), (0x1D435, 'M', u'b'), (0x1D436, 'M', u'c'), (0x1D437, 'M', u'd'), (0x1D438, 'M', u'e'), (0x1D439, 'M', u'f'), (0x1D43A, 'M', u'g'), (0x1D43B, 'M', u'h'), (0x1D43C, 'M', u'i'), ] def _seg_54(): return [ (0x1D43D, 'M', u'j'), (0x1D43E, 'M', u'k'), (0x1D43F, 'M', u'l'), (0x1D440, 'M', u'm'), (0x1D441, 'M', u'n'), (0x1D442, 'M', u'o'), (0x1D443, 'M', u'p'), (0x1D444, 'M', u'q'), (0x1D445, 'M', u'r'), (0x1D446, 'M', u's'), (0x1D447, 'M', u't'), (0x1D448, 'M', u'u'), (0x1D449, 'M', u'v'), (0x1D44A, 'M', u'w'), (0x1D44B, 'M', u'x'), (0x1D44C, 'M', u'y'), (0x1D44D, 'M', u'z'), (0x1D44E, 'M', u'a'), (0x1D44F, 'M', u'b'), (0x1D450, 'M', u'c'), (0x1D451, 'M', u'd'), (0x1D452, 'M', u'e'), (0x1D453, 'M', u'f'), (0x1D454, 'M', u'g'), (0x1D455, 'X'), (0x1D456, 'M', u'i'), (0x1D457, 'M', u'j'), (0x1D458, 'M', u'k'), (0x1D459, 'M', u'l'), (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), (0x1D460, 'M', u's'), (0x1D461, 'M', u't'), (0x1D462, 'M', u'u'), (0x1D463, 'M', u'v'), (0x1D464, 'M', u'w'), (0x1D465, 'M', u'x'), (0x1D466, 'M', u'y'), (0x1D467, 'M', u'z'), (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), (0x1D46A, 'M', u'c'), (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), (0x1D46D, 'M', u'f'), (0x1D46E, 'M', u'g'), (0x1D46F, 'M', u'h'), (0x1D470, 'M', u'i'), (0x1D471, 'M', u'j'), (0x1D472, 'M', u'k'), (0x1D473, 'M', u'l'), (0x1D474, 'M', u'm'), (0x1D475, 'M', u'n'), (0x1D476, 'M', u'o'), (0x1D477, 'M', u'p'), (0x1D478, 'M', u'q'), (0x1D479, 'M', u'r'), (0x1D47A, 'M', u's'), (0x1D47B, 'M', u't'), (0x1D47C, 'M', u'u'), (0x1D47D, 'M', u'v'), (0x1D47E, 'M', u'w'), (0x1D47F, 'M', u'x'), (0x1D480, 'M', u'y'), (0x1D481, 'M', u'z'), (0x1D482, 'M', u'a'), (0x1D483, 'M', u'b'), (0x1D484, 'M', u'c'), (0x1D485, 'M', u'd'), (0x1D486, 'M', u'e'), (0x1D487, 'M', u'f'), (0x1D488, 'M', u'g'), (0x1D489, 'M', u'h'), (0x1D48A, 'M', u'i'), (0x1D48B, 'M', u'j'), (0x1D48C, 'M', u'k'), (0x1D48D, 'M', u'l'), (0x1D48E, 'M', u'm'), (0x1D48F, 'M', u'n'), (0x1D490, 'M', u'o'), (0x1D491, 'M', u'p'), (0x1D492, 'M', u'q'), (0x1D493, 'M', u'r'), (0x1D494, 'M', u's'), (0x1D495, 'M', u't'), (0x1D496, 'M', u'u'), (0x1D497, 'M', u'v'), (0x1D498, 'M', u'w'), (0x1D499, 'M', u'x'), (0x1D49A, 'M', u'y'), (0x1D49B, 'M', u'z'), (0x1D49C, 'M', u'a'), (0x1D49D, 'X'), (0x1D49E, 'M', u'c'), (0x1D49F, 'M', u'd'), (0x1D4A0, 'X'), ] def _seg_55(): return [ (0x1D4A2, 'M', u'g'), (0x1D4A3, 'X'), (0x1D4A5, 'M', u'j'), (0x1D4A6, 'M', u'k'), (0x1D4A7, 'X'), (0x1D4A9, 'M', u'n'), (0x1D4AA, 'M', u'o'), (0x1D4AB, 'M', u'p'), (0x1D4AC, 'M', u'q'), (0x1D4AD, 'X'), (0x1D4AE, 'M', u's'), (0x1D4AF, 'M', u't'), (0x1D4B0, 'M', u'u'), (0x1D4B1, 'M', u'v'), (0x1D4B2, 'M', u'w'), (0x1D4B3, 'M', u'x'), (0x1D4B4, 'M', u'y'), (0x1D4B5, 'M', u'z'), (0x1D4B6, 'M', u'a'), (0x1D4B7, 'M', u'b'), (0x1D4B8, 'M', u'c'), (0x1D4B9, 'M', u'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', u'f'), (0x1D4BC, 'X'), (0x1D4BD, 'M', u'h'), (0x1D4BE, 'M', u'i'), (0x1D4BF, 'M', u'j'), (0x1D4C0, 'M', u'k'), (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), (0x1D4C7, 'M', u'r'), (0x1D4C8, 'M', u's'), (0x1D4C9, 'M', u't'), (0x1D4CA, 'M', u'u'), (0x1D4CB, 'M', u'v'), (0x1D4CC, 'M', u'w'), (0x1D4CD, 'M', u'x'), (0x1D4CE, 'M', u'y'), (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), (0x1D4D1, 'M', u'b'), (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), (0x1D4D4, 'M', u'e'), (0x1D4D5, 'M', u'f'), (0x1D4D6, 'M', u'g'), (0x1D4D7, 'M', u'h'), (0x1D4D8, 'M', u'i'), (0x1D4D9, 'M', u'j'), (0x1D4DA, 'M', u'k'), (0x1D4DB, 'M', u'l'), (0x1D4DC, 'M', u'm'), (0x1D4DD, 'M', u'n'), (0x1D4DE, 'M', u'o'), (0x1D4DF, 'M', u'p'), (0x1D4E0, 'M', u'q'), (0x1D4E1, 'M', u'r'), (0x1D4E2, 'M', u's'), (0x1D4E3, 'M', u't'), (0x1D4E4, 'M', u'u'), (0x1D4E5, 'M', u'v'), (0x1D4E6, 'M', u'w'), (0x1D4E7, 'M', u'x'), (0x1D4E8, 'M', u'y'), (0x1D4E9, 'M', u'z'), (0x1D4EA, 'M', u'a'), (0x1D4EB, 'M', u'b'), (0x1D4EC, 'M', u'c'), (0x1D4ED, 'M', u'd'), (0x1D4EE, 'M', u'e'), (0x1D4EF, 'M', u'f'), (0x1D4F0, 'M', u'g'), (0x1D4F1, 'M', u'h'), (0x1D4F2, 'M', u'i'), (0x1D4F3, 'M', u'j'), (0x1D4F4, 'M', u'k'), (0x1D4F5, 'M', u'l'), (0x1D4F6, 'M', u'm'), (0x1D4F7, 'M', u'n'), (0x1D4F8, 'M', u'o'), (0x1D4F9, 'M', u'p'), (0x1D4FA, 'M', u'q'), (0x1D4FB, 'M', u'r'), (0x1D4FC, 'M', u's'), (0x1D4FD, 'M', u't'), (0x1D4FE, 'M', u'u'), (0x1D4FF, 'M', u'v'), (0x1D500, 'M', u'w'), (0x1D501, 'M', u'x'), (0x1D502, 'M', u'y'), (0x1D503, 'M', u'z'), (0x1D504, 'M', u'a'), (0x1D505, 'M', u'b'), (0x1D506, 'X'), (0x1D507, 'M', u'd'), ] def _seg_56(): return [ (0x1D508, 'M', u'e'), (0x1D509, 'M', u'f'), (0x1D50A, 'M', u'g'), (0x1D50B, 'X'), (0x1D50D, 'M', u'j'), (0x1D50E, 'M', u'k'), (0x1D50F, 'M', u'l'), (0x1D510, 'M', u'm'), (0x1D511, 'M', u'n'), (0x1D512, 'M', u'o'), (0x1D513, 'M', u'p'), (0x1D514, 'M', u'q'), (0x1D515, 'X'), (0x1D516, 'M', u's'), (0x1D517, 'M', u't'), (0x1D518, 'M', u'u'), (0x1D519, 'M', u'v'), (0x1D51A, 'M', u'w'), (0x1D51B, 'M', u'x'), (0x1D51C, 'M', u'y'), (0x1D51D, 'X'), (0x1D51E, 'M', u'a'), (0x1D51F, 'M', u'b'), (0x1D520, 'M', u'c'), (0x1D521, 'M', u'd'), (0x1D522, 'M', u'e'), (0x1D523, 'M', u'f'), (0x1D524, 'M', u'g'), (0x1D525, 'M', u'h'), (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), (0x1D52C, 'M', u'o'), (0x1D52D, 'M', u'p'), (0x1D52E, 'M', u'q'), (0x1D52F, 'M', u'r'), (0x1D530, 'M', u's'), (0x1D531, 'M', u't'), (0x1D532, 'M', u'u'), (0x1D533, 'M', u'v'), (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), (0x1D536, 'M', u'y'), (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), (0x1D539, 'M', u'b'), (0x1D53A, 'X'), (0x1D53B, 'M', u'd'), (0x1D53C, 'M', u'e'), (0x1D53D, 'M', u'f'), (0x1D53E, 'M', u'g'), (0x1D53F, 'X'), (0x1D540, 'M', u'i'), (0x1D541, 'M', u'j'), (0x1D542, 'M', u'k'), (0x1D543, 'M', u'l'), (0x1D544, 'M', u'm'), (0x1D545, 'X'), (0x1D546, 'M', u'o'), (0x1D547, 'X'), (0x1D54A, 'M', u's'), (0x1D54B, 'M', u't'), (0x1D54C, 'M', u'u'), (0x1D54D, 'M', u'v'), (0x1D54E, 'M', u'w'), (0x1D54F, 'M', u'x'), (0x1D550, 'M', u'y'), (0x1D551, 'X'), (0x1D552, 'M', u'a'), (0x1D553, 'M', u'b'), (0x1D554, 'M', u'c'), (0x1D555, 'M', u'd'), (0x1D556, 'M', u'e'), (0x1D557, 'M', u'f'), (0x1D558, 'M', u'g'), (0x1D559, 'M', u'h'), (0x1D55A, 'M', u'i'), (0x1D55B, 'M', u'j'), (0x1D55C, 'M', u'k'), (0x1D55D, 'M', u'l'), (0x1D55E, 'M', u'm'), (0x1D55F, 'M', u'n'), (0x1D560, 'M', u'o'), (0x1D561, 'M', u'p'), (0x1D562, 'M', u'q'), (0x1D563, 'M', u'r'), (0x1D564, 'M', u's'), (0x1D565, 'M', u't'), (0x1D566, 'M', u'u'), (0x1D567, 'M', u'v'), (0x1D568, 'M', u'w'), (0x1D569, 'M', u'x'), (0x1D56A, 'M', u'y'), (0x1D56B, 'M', u'z'), (0x1D56C, 'M', u'a'), (0x1D56D, 'M', u'b'), (0x1D56E, 'M', u'c'), ] def _seg_57(): return [ (0x1D56F, 'M', u'd'), (0x1D570, 'M', u'e'), (0x1D571, 'M', u'f'), (0x1D572, 'M', u'g'), (0x1D573, 'M', u'h'), (0x1D574, 'M', u'i'), (0x1D575, 'M', u'j'), (0x1D576, 'M', u'k'), (0x1D577, 'M', u'l'), (0x1D578, 'M', u'm'), (0x1D579, 'M', u'n'), (0x1D57A, 'M', u'o'), (0x1D57B, 'M', u'p'), (0x1D57C, 'M', u'q'), (0x1D57D, 'M', u'r'), (0x1D57E, 'M', u's'), (0x1D57F, 'M', u't'), (0x1D580, 'M', u'u'), (0x1D581, 'M', u'v'), (0x1D582, 'M', u'w'), (0x1D583, 'M', u'x'), (0x1D584, 'M', u'y'), (0x1D585, 'M', u'z'), (0x1D586, 'M', u'a'), (0x1D587, 'M', u'b'), (0x1D588, 'M', u'c'), (0x1D589, 'M', u'd'), (0x1D58A, 'M', u'e'), (0x1D58B, 'M', u'f'), (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), (0x1D592, 'M', u'm'), (0x1D593, 'M', u'n'), (0x1D594, 'M', u'o'), (0x1D595, 'M', u'p'), (0x1D596, 'M', u'q'), (0x1D597, 'M', u'r'), (0x1D598, 'M', u's'), (0x1D599, 'M', u't'), (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), (0x1D59C, 'M', u'w'), (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), (0x1D59F, 'M', u'z'), (0x1D5A0, 'M', u'a'), (0x1D5A1, 'M', u'b'), (0x1D5A2, 'M', u'c'), (0x1D5A3, 'M', u'd'), (0x1D5A4, 'M', u'e'), (0x1D5A5, 'M', u'f'), (0x1D5A6, 'M', u'g'), (0x1D5A7, 'M', u'h'), (0x1D5A8, 'M', u'i'), (0x1D5A9, 'M', u'j'), (0x1D5AA, 'M', u'k'), (0x1D5AB, 'M', u'l'), (0x1D5AC, 'M', u'm'), (0x1D5AD, 'M', u'n'), (0x1D5AE, 'M', u'o'), (0x1D5AF, 'M', u'p'), (0x1D5B0, 'M', u'q'), (0x1D5B1, 'M', u'r'), (0x1D5B2, 'M', u's'), (0x1D5B3, 'M', u't'), (0x1D5B4, 'M', u'u'), (0x1D5B5, 'M', u'v'), (0x1D5B6, 'M', u'w'), (0x1D5B7, 'M', u'x'), (0x1D5B8, 'M', u'y'), (0x1D5B9, 'M', u'z'), (0x1D5BA, 'M', u'a'), (0x1D5BB, 'M', u'b'), (0x1D5BC, 'M', u'c'), (0x1D5BD, 'M', u'd'), (0x1D5BE, 'M', u'e'), (0x1D5BF, 'M', u'f'), (0x1D5C0, 'M', u'g'), (0x1D5C1, 'M', u'h'), (0x1D5C2, 'M', u'i'), (0x1D5C3, 'M', u'j'), (0x1D5C4, 'M', u'k'), (0x1D5C5, 'M', u'l'), (0x1D5C6, 'M', u'm'), (0x1D5C7, 'M', u'n'), (0x1D5C8, 'M', u'o'), (0x1D5C9, 'M', u'p'), (0x1D5CA, 'M', u'q'), (0x1D5CB, 'M', u'r'), (0x1D5CC, 'M', u's'), (0x1D5CD, 'M', u't'), (0x1D5CE, 'M', u'u'), (0x1D5CF, 'M', u'v'), (0x1D5D0, 'M', u'w'), (0x1D5D1, 'M', u'x'), (0x1D5D2, 'M', u'y'), ] def _seg_58(): return [ (0x1D5D3, 'M', u'z'), (0x1D5D4, 'M', u'a'), (0x1D5D5, 'M', u'b'), (0x1D5D6, 'M', u'c'), (0x1D5D7, 'M', u'd'), (0x1D5D8, 'M', u'e'), (0x1D5D9, 'M', u'f'), (0x1D5DA, 'M', u'g'), (0x1D5DB, 'M', u'h'), (0x1D5DC, 'M', u'i'), (0x1D5DD, 'M', u'j'), (0x1D5DE, 'M', u'k'), (0x1D5DF, 'M', u'l'), (0x1D5E0, 'M', u'm'), (0x1D5E1, 'M', u'n'), (0x1D5E2, 'M', u'o'), (0x1D5E3, 'M', u'p'), (0x1D5E4, 'M', u'q'), (0x1D5E5, 'M', u'r'), (0x1D5E6, 'M', u's'), (0x1D5E7, 'M', u't'), (0x1D5E8, 'M', u'u'), (0x1D5E9, 'M', u'v'), (0x1D5EA, 'M', u'w'), (0x1D5EB, 'M', u'x'), (0x1D5EC, 'M', u'y'), (0x1D5ED, 'M', u'z'), (0x1D5EE, 'M', u'a'), (0x1D5EF, 'M', u'b'), (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), (0x1D5F6, 'M', u'i'), (0x1D5F7, 'M', u'j'), (0x1D5F8, 'M', u'k'), (0x1D5F9, 'M', u'l'), (0x1D5FA, 'M', u'm'), (0x1D5FB, 'M', u'n'), (0x1D5FC, 'M', u'o'), (0x1D5FD, 'M', u'p'), (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), (0x1D600, 'M', u's'), (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), (0x1D603, 'M', u'v'), (0x1D604, 'M', u'w'), (0x1D605, 'M', u'x'), (0x1D606, 'M', u'y'), (0x1D607, 'M', u'z'), (0x1D608, 'M', u'a'), (0x1D609, 'M', u'b'), (0x1D60A, 'M', u'c'), (0x1D60B, 'M', u'd'), (0x1D60C, 'M', u'e'), (0x1D60D, 'M', u'f'), (0x1D60E, 'M', u'g'), (0x1D60F, 'M', u'h'), (0x1D610, 'M', u'i'), (0x1D611, 'M', u'j'), (0x1D612, 'M', u'k'), (0x1D613, 'M', u'l'), (0x1D614, 'M', u'm'), (0x1D615, 'M', u'n'), (0x1D616, 'M', u'o'), (0x1D617, 'M', u'p'), (0x1D618, 'M', u'q'), (0x1D619, 'M', u'r'), (0x1D61A, 'M', u's'), (0x1D61B, 'M', u't'), (0x1D61C, 'M', u'u'), (0x1D61D, 'M', u'v'), (0x1D61E, 'M', u'w'), (0x1D61F, 'M', u'x'), (0x1D620, 'M', u'y'), (0x1D621, 'M', u'z'), (0x1D622, 'M', u'a'), (0x1D623, 'M', u'b'), (0x1D624, 'M', u'c'), (0x1D625, 'M', u'd'), (0x1D626, 'M', u'e'), (0x1D627, 'M', u'f'), (0x1D628, 'M', u'g'), (0x1D629, 'M', u'h'), (0x1D62A, 'M', u'i'), (0x1D62B, 'M', u'j'), (0x1D62C, 'M', u'k'), (0x1D62D, 'M', u'l'), (0x1D62E, 'M', u'm'), (0x1D62F, 'M', u'n'), (0x1D630, 'M', u'o'), (0x1D631, 'M', u'p'), (0x1D632, 'M', u'q'), (0x1D633, 'M', u'r'), (0x1D634, 'M', u's'), (0x1D635, 'M', u't'), (0x1D636, 'M', u'u'), ] def _seg_59(): return [ (0x1D637, 'M', u'v'), (0x1D638, 'M', u'w'), (0x1D639, 'M', u'x'), (0x1D63A, 'M', u'y'), (0x1D63B, 'M', u'z'), (0x1D63C, 'M', u'a'), (0x1D63D, 'M', u'b'), (0x1D63E, 'M', u'c'), (0x1D63F, 'M', u'd'), (0x1D640, 'M', u'e'), (0x1D641, 'M', u'f'), (0x1D642, 'M', u'g'), (0x1D643, 'M', u'h'), (0x1D644, 'M', u'i'), (0x1D645, 'M', u'j'), (0x1D646, 'M', u'k'), (0x1D647, 'M', u'l'), (0x1D648, 'M', u'm'), (0x1D649, 'M', u'n'), (0x1D64A, 'M', u'o'), (0x1D64B, 'M', u'p'), (0x1D64C, 'M', u'q'), (0x1D64D, 'M', u'r'), (0x1D64E, 'M', u's'), (0x1D64F, 'M', u't'), (0x1D650, 'M', u'u'), (0x1D651, 'M', u'v'), (0x1D652, 'M', u'w'), (0x1D653, 'M', u'x'), (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), (0x1D65A, 'M', u'e'), (0x1D65B, 'M', u'f'), (0x1D65C, 'M', u'g'), (0x1D65D, 'M', u'h'), (0x1D65E, 'M', u'i'), (0x1D65F, 'M', u'j'), (0x1D660, 'M', u'k'), (0x1D661, 'M', u'l'), (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), (0x1D664, 'M', u'o'), (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), (0x1D667, 'M', u'r'), (0x1D668, 'M', u's'), (0x1D669, 'M', u't'), (0x1D66A, 'M', u'u'), (0x1D66B, 'M', u'v'), (0x1D66C, 'M', u'w'), (0x1D66D, 'M', u'x'), (0x1D66E, 'M', u'y'), (0x1D66F, 'M', u'z'), (0x1D670, 'M', u'a'), (0x1D671, 'M', u'b'), (0x1D672, 'M', u'c'), (0x1D673, 'M', u'd'), (0x1D674, 'M', u'e'), (0x1D675, 'M', u'f'), (0x1D676, 'M', u'g'), (0x1D677, 'M', u'h'), (0x1D678, 'M', u'i'), (0x1D679, 'M', u'j'), (0x1D67A, 'M', u'k'), (0x1D67B, 'M', u'l'), (0x1D67C, 'M', u'm'), (0x1D67D, 'M', u'n'), (0x1D67E, 'M', u'o'), (0x1D67F, 'M', u'p'), (0x1D680, 'M', u'q'), (0x1D681, 'M', u'r'), (0x1D682, 'M', u's'), (0x1D683, 'M', u't'), (0x1D684, 'M', u'u'), (0x1D685, 'M', u'v'), (0x1D686, 'M', u'w'), (0x1D687, 'M', u'x'), (0x1D688, 'M', u'y'), (0x1D689, 'M', u'z'), (0x1D68A, 'M', u'a'), (0x1D68B, 'M', u'b'), (0x1D68C, 'M', u'c'), (0x1D68D, 'M', u'd'), (0x1D68E, 'M', u'e'), (0x1D68F, 'M', u'f'), (0x1D690, 'M', u'g'), (0x1D691, 'M', u'h'), (0x1D692, 'M', u'i'), (0x1D693, 'M', u'j'), (0x1D694, 'M', u'k'), (0x1D695, 'M', u'l'), (0x1D696, 'M', u'm'), (0x1D697, 'M', u'n'), (0x1D698, 'M', u'o'), (0x1D699, 'M', u'p'), (0x1D69A, 'M', u'q'), ] def _seg_60(): return [ (0x1D69B, 'M', u'r'), (0x1D69C, 'M', u's'), (0x1D69D, 'M', u't'), (0x1D69E, 'M', u'u'), (0x1D69F, 'M', u'v'), (0x1D6A0, 'M', u'w'), (0x1D6A1, 'M', u'x'), (0x1D6A2, 'M', u'y'), (0x1D6A3, 'M', u'z'), (0x1D6A4, 'M', u'ı'), (0x1D6A5, 'M', u'ȷ'), (0x1D6A6, 'X'), (0x1D6A8, 'M', u'α'), (0x1D6A9, 'M', u'β'), (0x1D6AA, 'M', u'γ'), (0x1D6AB, 'M', u'δ'), (0x1D6AC, 'M', u'ε'), (0x1D6AD, 'M', u'ζ'), (0x1D6AE, 'M', u'η'), (0x1D6AF, 'M', u'θ'), (0x1D6B0, 'M', u'ι'), (0x1D6B1, 'M', u'κ'), (0x1D6B2, 'M', u'λ'), (0x1D6B3, 'M', u'μ'), (0x1D6B4, 'M', u'ν'), (0x1D6B5, 'M', u'ξ'), (0x1D6B6, 'M', u'ο'), (0x1D6B7, 'M', u'π'), (0x1D6B8, 'M', u'ρ'), (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), (0x1D6BF, 'M', u'ψ'), (0x1D6C0, 'M', u'ω'), (0x1D6C1, 'M', u'∇'), (0x1D6C2, 'M', u'α'), (0x1D6C3, 'M', u'β'), (0x1D6C4, 'M', u'γ'), (0x1D6C5, 'M', u'δ'), (0x1D6C6, 'M', u'ε'), (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), (0x1D6C9, 'M', u'θ'), (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), (0x1D6CC, 'M', u'λ'), (0x1D6CD, 'M', u'μ'), (0x1D6CE, 'M', u'ν'), (0x1D6CF, 'M', u'ξ'), (0x1D6D0, 'M', u'ο'), (0x1D6D1, 'M', u'π'), (0x1D6D2, 'M', u'ρ'), (0x1D6D3, 'M', u'σ'), (0x1D6D5, 'M', u'τ'), (0x1D6D6, 'M', u'υ'), (0x1D6D7, 'M', u'φ'), (0x1D6D8, 'M', u'χ'), (0x1D6D9, 'M', u'ψ'), (0x1D6DA, 'M', u'ω'), (0x1D6DB, 'M', u'∂'), (0x1D6DC, 'M', u'ε'), (0x1D6DD, 'M', u'θ'), (0x1D6DE, 'M', u'κ'), (0x1D6DF, 'M', u'φ'), (0x1D6E0, 'M', u'ρ'), (0x1D6E1, 'M', u'π'), (0x1D6E2, 'M', u'α'), (0x1D6E3, 'M', u'β'), (0x1D6E4, 'M', u'γ'), (0x1D6E5, 'M', u'δ'), (0x1D6E6, 'M', u'ε'), (0x1D6E7, 'M', u'ζ'), (0x1D6E8, 'M', u'η'), (0x1D6E9, 'M', u'θ'), (0x1D6EA, 'M', u'ι'), (0x1D6EB, 'M', u'κ'), (0x1D6EC, 'M', u'λ'), (0x1D6ED, 'M', u'μ'), (0x1D6EE, 'M', u'ν'), (0x1D6EF, 'M', u'ξ'), (0x1D6F0, 'M', u'ο'), (0x1D6F1, 'M', u'π'), (0x1D6F2, 'M', u'ρ'), (0x1D6F3, 'M', u'θ'), (0x1D6F4, 'M', u'σ'), (0x1D6F5, 'M', u'τ'), (0x1D6F6, 'M', u'υ'), (0x1D6F7, 'M', u'φ'), (0x1D6F8, 'M', u'χ'), (0x1D6F9, 'M', u'ψ'), (0x1D6FA, 'M', u'ω'), (0x1D6FB, 'M', u'∇'), (0x1D6FC, 'M', u'α'), (0x1D6FD, 'M', u'β'), (0x1D6FE, 'M', u'γ'), (0x1D6FF, 'M', u'δ'), (0x1D700, 'M', u'ε'), ] def _seg_61(): return [ (0x1D701, 'M', u'ζ'), (0x1D702, 'M', u'η'), (0x1D703, 'M', u'θ'), (0x1D704, 'M', u'ι'), (0x1D705, 'M', u'κ'), (0x1D706, 'M', u'λ'), (0x1D707, 'M', u'μ'), (0x1D708, 'M', u'ν'), (0x1D709, 'M', u'ξ'), (0x1D70A, 'M', u'ο'), (0x1D70B, 'M', u'π'), (0x1D70C, 'M', u'ρ'), (0x1D70D, 'M', u'σ'), (0x1D70F, 'M', u'τ'), (0x1D710, 'M', u'υ'), (0x1D711, 'M', u'φ'), (0x1D712, 'M', u'χ'), (0x1D713, 'M', u'ψ'), (0x1D714, 'M', u'ω'), (0x1D715, 'M', u'∂'), (0x1D716, 'M', u'ε'), (0x1D717, 'M', u'θ'), (0x1D718, 'M', u'κ'), (0x1D719, 'M', u'φ'), (0x1D71A, 'M', u'ρ'), (0x1D71B, 'M', u'π'), (0x1D71C, 'M', u'α'), (0x1D71D, 'M', u'β'), (0x1D71E, 'M', u'γ'), (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), (0x1D725, 'M', u'κ'), (0x1D726, 'M', u'λ'), (0x1D727, 'M', u'μ'), (0x1D728, 'M', u'ν'), (0x1D729, 'M', u'ξ'), (0x1D72A, 'M', u'ο'), (0x1D72B, 'M', u'π'), (0x1D72C, 'M', u'ρ'), (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), (0x1D72F, 'M', u'τ'), (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), (0x1D732, 'M', u'χ'), (0x1D733, 'M', u'ψ'), (0x1D734, 'M', u'ω'), (0x1D735, 'M', u'∇'), (0x1D736, 'M', u'α'), (0x1D737, 'M', u'β'), (0x1D738, 'M', u'γ'), (0x1D739, 'M', u'δ'), (0x1D73A, 'M', u'ε'), (0x1D73B, 'M', u'ζ'), (0x1D73C, 'M', u'η'), (0x1D73D, 'M', u'θ'), (0x1D73E, 'M', u'ι'), (0x1D73F, 'M', u'κ'), (0x1D740, 'M', u'λ'), (0x1D741, 'M', u'μ'), (0x1D742, 'M', u'ν'), (0x1D743, 'M', u'ξ'), (0x1D744, 'M', u'ο'), (0x1D745, 'M', u'π'), (0x1D746, 'M', u'ρ'), (0x1D747, 'M', u'σ'), (0x1D749, 'M', u'τ'), (0x1D74A, 'M', u'υ'), (0x1D74B, 'M', u'φ'), (0x1D74C, 'M', u'χ'), (0x1D74D, 'M', u'ψ'), (0x1D74E, 'M', u'ω'), (0x1D74F, 'M', u'∂'), (0x1D750, 'M', u'ε'), (0x1D751, 'M', u'θ'), (0x1D752, 'M', u'κ'), (0x1D753, 'M', u'φ'), (0x1D754, 'M', u'ρ'), (0x1D755, 'M', u'π'), (0x1D756, 'M', u'α'), (0x1D757, 'M', u'β'), (0x1D758, 'M', u'γ'), (0x1D759, 'M', u'δ'), (0x1D75A, 'M', u'ε'), (0x1D75B, 'M', u'ζ'), (0x1D75C, 'M', u'η'), (0x1D75D, 'M', u'θ'), (0x1D75E, 'M', u'ι'), (0x1D75F, 'M', u'κ'), (0x1D760, 'M', u'λ'), (0x1D761, 'M', u'μ'), (0x1D762, 'M', u'ν'), (0x1D763, 'M', u'ξ'), (0x1D764, 'M', u'ο'), (0x1D765, 'M', u'π'), (0x1D766, 'M', u'ρ'), ] def _seg_62(): return [ (0x1D767, 'M', u'θ'), (0x1D768, 'M', u'σ'), (0x1D769, 'M', u'τ'), (0x1D76A, 'M', u'υ'), (0x1D76B, 'M', u'φ'), (0x1D76C, 'M', u'χ'), (0x1D76D, 'M', u'ψ'), (0x1D76E, 'M', u'ω'), (0x1D76F, 'M', u'∇'), (0x1D770, 'M', u'α'), (0x1D771, 'M', u'β'), (0x1D772, 'M', u'γ'), (0x1D773, 'M', u'δ'), (0x1D774, 'M', u'ε'), (0x1D775, 'M', u'ζ'), (0x1D776, 'M', u'η'), (0x1D777, 'M', u'θ'), (0x1D778, 'M', u'ι'), (0x1D779, 'M', u'κ'), (0x1D77A, 'M', u'λ'), (0x1D77B, 'M', u'μ'), (0x1D77C, 'M', u'ν'), (0x1D77D, 'M', u'ξ'), (0x1D77E, 'M', u'ο'), (0x1D77F, 'M', u'π'), (0x1D780, 'M', u'ρ'), (0x1D781, 'M', u'σ'), (0x1D783, 'M', u'τ'), (0x1D784, 'M', u'υ'), (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), (0x1D78B, 'M', u'θ'), (0x1D78C, 'M', u'κ'), (0x1D78D, 'M', u'φ'), (0x1D78E, 'M', u'ρ'), (0x1D78F, 'M', u'π'), (0x1D790, 'M', u'α'), (0x1D791, 'M', u'β'), (0x1D792, 'M', u'γ'), (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), (0x1D795, 'M', u'ζ'), (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), (0x1D798, 'M', u'ι'), (0x1D799, 'M', u'κ'), (0x1D79A, 'M', u'λ'), (0x1D79B, 'M', u'μ'), (0x1D79C, 'M', u'ν'), (0x1D79D, 'M', u'ξ'), (0x1D79E, 'M', u'ο'), (0x1D79F, 'M', u'π'), (0x1D7A0, 'M', u'ρ'), (0x1D7A1, 'M', u'θ'), (0x1D7A2, 'M', u'σ'), (0x1D7A3, 'M', u'τ'), (0x1D7A4, 'M', u'υ'), (0x1D7A5, 'M', u'φ'), (0x1D7A6, 'M', u'χ'), (0x1D7A7, 'M', u'ψ'), (0x1D7A8, 'M', u'ω'), (0x1D7A9, 'M', u'∇'), (0x1D7AA, 'M', u'α'), (0x1D7AB, 'M', u'β'), (0x1D7AC, 'M', u'γ'), (0x1D7AD, 'M', u'δ'), (0x1D7AE, 'M', u'ε'), (0x1D7AF, 'M', u'ζ'), (0x1D7B0, 'M', u'η'), (0x1D7B1, 'M', u'θ'), (0x1D7B2, 'M', u'ι'), (0x1D7B3, 'M', u'κ'), (0x1D7B4, 'M', u'λ'), (0x1D7B5, 'M', u'μ'), (0x1D7B6, 'M', u'ν'), (0x1D7B7, 'M', u'ξ'), (0x1D7B8, 'M', u'ο'), (0x1D7B9, 'M', u'π'), (0x1D7BA, 'M', u'ρ'), (0x1D7BB, 'M', u'σ'), (0x1D7BD, 'M', u'τ'), (0x1D7BE, 'M', u'υ'), (0x1D7BF, 'M', u'φ'), (0x1D7C0, 'M', u'χ'), (0x1D7C1, 'M', u'ψ'), (0x1D7C2, 'M', u'ω'), (0x1D7C3, 'M', u'∂'), (0x1D7C4, 'M', u'ε'), (0x1D7C5, 'M', u'θ'), (0x1D7C6, 'M', u'κ'), (0x1D7C7, 'M', u'φ'), (0x1D7C8, 'M', u'ρ'), (0x1D7C9, 'M', u'π'), (0x1D7CA, 'M', u'ϝ'), (0x1D7CC, 'X'), (0x1D7CE, 'M', u'0'), ] def _seg_63(): return [ (0x1D7CF, 'M', u'1'), (0x1D7D0, 'M', u'2'), (0x1D7D1, 'M', u'3'), (0x1D7D2, 'M', u'4'), (0x1D7D3, 'M', u'5'), (0x1D7D4, 'M', u'6'), (0x1D7D5, 'M', u'7'), (0x1D7D6, 'M', u'8'), (0x1D7D7, 'M', u'9'), (0x1D7D8, 'M', u'0'), (0x1D7D9, 'M', u'1'), (0x1D7DA, 'M', u'2'), (0x1D7DB, 'M', u'3'), (0x1D7DC, 'M', u'4'), (0x1D7DD, 'M', u'5'), (0x1D7DE, 'M', u'6'), (0x1D7DF, 'M', u'7'), (0x1D7E0, 'M', u'8'), (0x1D7E1, 'M', u'9'), (0x1D7E2, 'M', u'0'), (0x1D7E3, 'M', u'1'), (0x1D7E4, 'M', u'2'), (0x1D7E5, 'M', u'3'), (0x1D7E6, 'M', u'4'), (0x1D7E7, 'M', u'5'), (0x1D7E8, 'M', u'6'), (0x1D7E9, 'M', u'7'), (0x1D7EA, 'M', u'8'), (0x1D7EB, 'M', u'9'), (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), (0x1D7F2, 'M', u'6'), (0x1D7F3, 'M', u'7'), (0x1D7F4, 'M', u'8'), (0x1D7F5, 'M', u'9'), (0x1D7F6, 'M', u'0'), (0x1D7F7, 'M', u'1'), (0x1D7F8, 'M', u'2'), (0x1D7F9, 'M', u'3'), (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), (0x1D7FC, 'M', u'6'), (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), (0x1D7FF, 'M', u'9'), (0x1D800, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), (0x1EE03, 'M', u'د'), (0x1EE04, 'X'), (0x1EE05, 'M', u'و'), (0x1EE06, 'M', u'ز'), (0x1EE07, 'M', u'ح'), (0x1EE08, 'M', u'ط'), (0x1EE09, 'M', u'ي'), (0x1EE0A, 'M', u'ك'), (0x1EE0B, 'M', u'ل'), (0x1EE0C, 'M', u'م'), (0x1EE0D, 'M', u'ن'), (0x1EE0E, 'M', u'س'), (0x1EE0F, 'M', u'ع'), (0x1EE10, 'M', u'ف'), (0x1EE11, 'M', u'ص'), (0x1EE12, 'M', u'ق'), (0x1EE13, 'M', u'ر'), (0x1EE14, 'M', u'ش'), (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), (0x1EE17, 'M', u'خ'), (0x1EE18, 'M', u'ذ'), (0x1EE19, 'M', u'ض'), (0x1EE1A, 'M', u'ظ'), (0x1EE1B, 'M', u'غ'), (0x1EE1C, 'M', u'ٮ'), (0x1EE1D, 'M', u'ں'), (0x1EE1E, 'M', u'ڡ'), (0x1EE1F, 'M', u'ٯ'), (0x1EE20, 'X'), (0x1EE21, 'M', u'ب'), (0x1EE22, 'M', u'ج'), (0x1EE23, 'X'), (0x1EE24, 'M', u'ه'), (0x1EE25, 'X'), (0x1EE27, 'M', u'ح'), (0x1EE28, 'X'), (0x1EE29, 'M', u'ي'), (0x1EE2A, 'M', u'ك'), (0x1EE2B, 'M', u'ل'), (0x1EE2C, 'M', u'م'), (0x1EE2D, 'M', u'ن'), (0x1EE2E, 'M', u'س'), (0x1EE2F, 'M', u'ع'), (0x1EE30, 'M', u'ف'), (0x1EE31, 'M', u'ص'), (0x1EE32, 'M', u'ق'), ] def _seg_64(): return [ (0x1EE33, 'X'), (0x1EE34, 'M', u'ش'), (0x1EE35, 'M', u'ت'), (0x1EE36, 'M', u'ث'), (0x1EE37, 'M', u'خ'), (0x1EE38, 'X'), (0x1EE39, 'M', u'ض'), (0x1EE3A, 'X'), (0x1EE3B, 'M', u'غ'), (0x1EE3C, 'X'), (0x1EE42, 'M', u'ج'), (0x1EE43, 'X'), (0x1EE47, 'M', u'ح'), (0x1EE48, 'X'), (0x1EE49, 'M', u'ي'), (0x1EE4A, 'X'), (0x1EE4B, 'M', u'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', u'ن'), (0x1EE4E, 'M', u'س'), (0x1EE4F, 'M', u'ع'), (0x1EE50, 'X'), (0x1EE51, 'M', u'ص'), (0x1EE52, 'M', u'ق'), (0x1EE53, 'X'), (0x1EE54, 'M', u'ش'), (0x1EE55, 'X'), (0x1EE57, 'M', u'خ'), (0x1EE58, 'X'), (0x1EE59, 'M', u'ض'), (0x1EE5A, 'X'), (0x1EE5B, 'M', u'غ'), (0x1EE5C, 'X'), (0x1EE5D, 'M', u'ں'), (0x1EE5E, 'X'), (0x1EE5F, 'M', u'ٯ'), (0x1EE60, 'X'), (0x1EE61, 'M', u'ب'), (0x1EE62, 'M', u'ج'), (0x1EE63, 'X'), (0x1EE64, 'M', u'ه'), (0x1EE65, 'X'), (0x1EE67, 'M', u'ح'), (0x1EE68, 'M', u'ط'), (0x1EE69, 'M', u'ي'), (0x1EE6A, 'M', u'ك'), (0x1EE6B, 'X'), (0x1EE6C, 'M', u'م'), (0x1EE6D, 'M', u'ن'), (0x1EE6E, 'M', u'س'), (0x1EE6F, 'M', u'ع'), (0x1EE70, 'M', u'ف'), (0x1EE71, 'M', u'ص'), (0x1EE72, 'M', u'ق'), (0x1EE73, 'X'), (0x1EE74, 'M', u'ش'), (0x1EE75, 'M', u'ت'), (0x1EE76, 'M', u'ث'), (0x1EE77, 'M', u'خ'), (0x1EE78, 'X'), (0x1EE79, 'M', u'ض'), (0x1EE7A, 'M', u'ظ'), (0x1EE7B, 'M', u'غ'), (0x1EE7C, 'M', u'ٮ'), (0x1EE7D, 'X'), (0x1EE7E, 'M', u'ڡ'), (0x1EE7F, 'X'), (0x1EE80, 'M', u'ا'), (0x1EE81, 'M', u'ب'), (0x1EE82, 'M', u'ج'), (0x1EE83, 'M', u'د'), (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), (0x1EE86, 'M', u'ز'), (0x1EE87, 'M', u'ح'), (0x1EE88, 'M', u'ط'), (0x1EE89, 'M', u'ي'), (0x1EE8A, 'X'), (0x1EE8B, 'M', u'ل'), (0x1EE8C, 'M', u'م'), (0x1EE8D, 'M', u'ن'), (0x1EE8E, 'M', u'س'), (0x1EE8F, 'M', u'ع'), (0x1EE90, 'M', u'ف'), (0x1EE91, 'M', u'ص'), (0x1EE92, 'M', u'ق'), (0x1EE93, 'M', u'ر'), (0x1EE94, 'M', u'ش'), (0x1EE95, 'M', u'ت'), (0x1EE96, 'M', u'ث'), (0x1EE97, 'M', u'خ'), (0x1EE98, 'M', u'ذ'), (0x1EE99, 'M', u'ض'), (0x1EE9A, 'M', u'ظ'), (0x1EE9B, 'M', u'غ'), (0x1EE9C, 'X'), (0x1EEA1, 'M', u'ب'), (0x1EEA2, 'M', u'ج'), (0x1EEA3, 'M', u'د'), (0x1EEA4, 'X'), ] def _seg_65(): return [ (0x1EEA5, 'M', u'و'), (0x1EEA6, 'M', u'ز'), (0x1EEA7, 'M', u'ح'), (0x1EEA8, 'M', u'ط'), (0x1EEA9, 'M', u'ي'), (0x1EEAA, 'X'), (0x1EEAB, 'M', u'ل'), (0x1EEAC, 'M', u'م'), (0x1EEAD, 'M', u'ن'), (0x1EEAE, 'M', u'س'), (0x1EEAF, 'M', u'ع'), (0x1EEB0, 'M', u'ف'), (0x1EEB1, 'M', u'ص'), (0x1EEB2, 'M', u'ق'), (0x1EEB3, 'M', u'ر'), (0x1EEB4, 'M', u'ش'), (0x1EEB5, 'M', u'ت'), (0x1EEB6, 'M', u'ث'), (0x1EEB7, 'M', u'خ'), (0x1EEB8, 'M', u'ذ'), (0x1EEB9, 'M', u'ض'), (0x1EEBA, 'M', u'ظ'), (0x1EEBB, 'M', u'غ'), (0x1EEBC, 'X'), (0x1EEF0, 'V'), (0x1EEF2, 'X'), (0x1F000, 'V'), (0x1F02C, 'X'), (0x1F030, 'V'), (0x1F094, 'X'), (0x1F0A0, 'V'), (0x1F0AF, 'X'), (0x1F0B1, 'V'), (0x1F0BF, 'X'), (0x1F0C1, 'V'), (0x1F0D0, 'X'), (0x1F0D1, 'V'), (0x1F0E0, 'X'), (0x1F101, '3', u'0,'), (0x1F102, '3', u'1,'), (0x1F103, '3', u'2,'), (0x1F104, '3', u'3,'), (0x1F105, '3', u'4,'), (0x1F106, '3', u'5,'), (0x1F107, '3', u'6,'), (0x1F108, '3', u'7,'), (0x1F109, '3', u'8,'), (0x1F10A, '3', u'9,'), (0x1F10B, 'X'), (0x1F110, '3', u'(a)'), (0x1F111, '3', u'(b)'), (0x1F112, '3', u'(c)'), (0x1F113, '3', u'(d)'), (0x1F114, '3', u'(e)'), (0x1F115, '3', u'(f)'), (0x1F116, '3', u'(g)'), (0x1F117, '3', u'(h)'), (0x1F118, '3', u'(i)'), (0x1F119, '3', u'(j)'), (0x1F11A, '3', u'(k)'), (0x1F11B, '3', u'(l)'), (0x1F11C, '3', u'(m)'), (0x1F11D, '3', u'(n)'), (0x1F11E, '3', u'(o)'), (0x1F11F, '3', u'(p)'), (0x1F120, '3', u'(q)'), (0x1F121, '3', u'(r)'), (0x1F122, '3', u'(s)'), (0x1F123, '3', u'(t)'), (0x1F124, '3', u'(u)'), (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), (0x1F127, '3', u'(x)'), (0x1F128, '3', u'(y)'), (0x1F129, '3', u'(z)'), (0x1F12A, 'M', u'〔s〕'), (0x1F12B, 'M', u'c'), (0x1F12C, 'M', u'r'), (0x1F12D, 'M', u'cd'), (0x1F12E, 'M', u'wz'), (0x1F12F, 'X'), (0x1F130, 'M', u'a'), (0x1F131, 'M', u'b'), (0x1F132, 'M', u'c'), (0x1F133, 'M', u'd'), (0x1F134, 'M', u'e'), (0x1F135, 'M', u'f'), (0x1F136, 'M', u'g'), (0x1F137, 'M', u'h'), (0x1F138, 'M', u'i'), (0x1F139, 'M', u'j'), (0x1F13A, 'M', u'k'), (0x1F13B, 'M', u'l'), (0x1F13C, 'M', u'm'), (0x1F13D, 'M', u'n'), (0x1F13E, 'M', u'o'), (0x1F13F, 'M', u'p'), (0x1F140, 'M', u'q'), (0x1F141, 'M', u'r'), (0x1F142, 'M', u's'), ] def _seg_66(): return [ (0x1F143, 'M', u't'), (0x1F144, 'M', u'u'), (0x1F145, 'M', u'v'), (0x1F146, 'M', u'w'), (0x1F147, 'M', u'x'), (0x1F148, 'M', u'y'), (0x1F149, 'M', u'z'), (0x1F14A, 'M', u'hv'), (0x1F14B, 'M', u'mv'), (0x1F14C, 'M', u'sd'), (0x1F14D, 'M', u'ss'), (0x1F14E, 'M', u'ppv'), (0x1F14F, 'M', u'wc'), (0x1F150, 'V'), (0x1F16A, 'M', u'mc'), (0x1F16B, 'M', u'md'), (0x1F16C, 'X'), (0x1F170, 'V'), (0x1F190, 'M', u'dj'), (0x1F191, 'V'), (0x1F19B, 'X'), (0x1F1E6, 'V'), (0x1F200, 'M', u'ほか'), (0x1F201, 'M', u'ココ'), (0x1F202, 'M', u'サ'), (0x1F203, 'X'), (0x1F210, 'M', u'手'), (0x1F211, 'M', u'字'), (0x1F212, 'M', u'双'), (0x1F213, 'M', u'デ'), (0x1F214, 'M', u'二'), (0x1F215, 'M', u'多'), (0x1F216, 'M', u'解'), (0x1F217, 'M', u'天'), (0x1F218, 'M', u'交'), (0x1F219, 'M', u'映'), (0x1F21A, 'M', u'無'), (0x1F21B, 'M', u'料'), (0x1F21C, 'M', u'前'), (0x1F21D, 'M', u'後'), (0x1F21E, 'M', u'再'), (0x1F21F, 'M', u'新'), (0x1F220, 'M', u'初'), (0x1F221, 'M', u'終'), (0x1F222, 'M', u'生'), (0x1F223, 'M', u'販'), (0x1F224, 'M', u'声'), (0x1F225, 'M', u'吹'), (0x1F226, 'M', u'演'), (0x1F227, 'M', u'投'), (0x1F228, 'M', u'捕'), (0x1F229, 'M', u'一'), (0x1F22A, 'M', u'三'), (0x1F22B, 'M', u'遊'), (0x1F22C, 'M', u'左'), (0x1F22D, 'M', u'中'), (0x1F22E, 'M', u'右'), (0x1F22F, 'M', u'指'), (0x1F230, 'M', u'走'), (0x1F231, 'M', u'打'), (0x1F232, 'M', u'禁'), (0x1F233, 'M', u'空'), (0x1F234, 'M', u'合'), (0x1F235, 'M', u'満'), (0x1F236, 'M', u'有'), (0x1F237, 'M', u'月'), (0x1F238, 'M', u'申'), (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), (0x1F23B, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), (0x1F242, 'M', u'〔二〕'), (0x1F243, 'M', u'〔安〕'), (0x1F244, 'M', u'〔点〕'), (0x1F245, 'M', u'〔打〕'), (0x1F246, 'M', u'〔盗〕'), (0x1F247, 'M', u'〔勝〕'), (0x1F248, 'M', u'〔敗〕'), (0x1F249, 'X'), (0x1F250, 'M', u'得'), (0x1F251, 'M', u'可'), (0x1F252, 'X'), (0x1F300, 'V'), (0x1F321, 'X'), (0x1F330, 'V'), (0x1F336, 'X'), (0x1F337, 'V'), (0x1F37D, 'X'), (0x1F380, 'V'), (0x1F394, 'X'), (0x1F3A0, 'V'), (0x1F3C5, 'X'), (0x1F3C6, 'V'), (0x1F3CB, 'X'), (0x1F3E0, 'V'), (0x1F3F1, 'X'), (0x1F400, 'V'), (0x1F43F, 'X'), (0x1F440, 'V'), ] def _seg_67(): return [ (0x1F441, 'X'), (0x1F442, 'V'), (0x1F4F8, 'X'), (0x1F4F9, 'V'), (0x1F4FD, 'X'), (0x1F500, 'V'), (0x1F53E, 'X'), (0x1F540, 'V'), (0x1F544, 'X'), (0x1F550, 'V'), (0x1F568, 'X'), (0x1F5FB, 'V'), (0x1F641, 'X'), (0x1F645, 'V'), (0x1F650, 'X'), (0x1F680, 'V'), (0x1F6C6, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x20000, 'V'), (0x2A6D7, 'X'), (0x2A700, 'V'), (0x2B735, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2F800, 'M', u'丽'), (0x2F801, 'M', u'丸'), (0x2F802, 'M', u'乁'), (0x2F803, 'M', u'𠄢'), (0x2F804, 'M', u'你'), (0x2F805, 'M', u'侮'), (0x2F806, 'M', u'侻'), (0x2F807, 'M', u'倂'), (0x2F808, 'M', u'偺'), (0x2F809, 'M', u'備'), (0x2F80A, 'M', u'僧'), (0x2F80B, 'M', u'像'), (0x2F80C, 'M', u'㒞'), (0x2F80D, 'M', u'𠘺'), (0x2F80E, 'M', u'免'), (0x2F80F, 'M', u'兔'), (0x2F810, 'M', u'兤'), (0x2F811, 'M', u'具'), (0x2F812, 'M', u'𠔜'), (0x2F813, 'M', u'㒹'), (0x2F814, 'M', u'內'), (0x2F815, 'M', u'再'), (0x2F816, 'M', u'𠕋'), (0x2F817, 'M', u'冗'), (0x2F818, 'M', u'冤'), (0x2F819, 'M', u'仌'), (0x2F81A, 'M', u'冬'), (0x2F81B, 'M', u'况'), (0x2F81C, 'M', u'𩇟'), (0x2F81D, 'M', u'凵'), (0x2F81E, 'M', u'刃'), (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), (0x2F825, 'M', u'勇'), (0x2F826, 'M', u'勉'), (0x2F827, 'M', u'勤'), (0x2F828, 'M', u'勺'), (0x2F829, 'M', u'包'), (0x2F82A, 'M', u'匆'), (0x2F82B, 'M', u'北'), (0x2F82C, 'M', u'卉'), (0x2F82D, 'M', u'卑'), (0x2F82E, 'M', u'博'), (0x2F82F, 'M', u'即'), (0x2F830, 'M', u'卽'), (0x2F831, 'M', u'卿'), (0x2F834, 'M', u'𠨬'), (0x2F835, 'M', u'灰'), (0x2F836, 'M', u'及'), (0x2F837, 'M', u'叟'), (0x2F838, 'M', u'𠭣'), (0x2F839, 'M', u'叫'), (0x2F83A, 'M', u'叱'), (0x2F83B, 'M', u'吆'), (0x2F83C, 'M', u'咞'), (0x2F83D, 'M', u'吸'), (0x2F83E, 'M', u'呈'), (0x2F83F, 'M', u'周'), (0x2F840, 'M', u'咢'), (0x2F841, 'M', u'哶'), (0x2F842, 'M', u'唐'), (0x2F843, 'M', u'啓'), (0x2F844, 'M', u'啣'), (0x2F845, 'M', u'善'), (0x2F847, 'M', u'喙'), (0x2F848, 'M', u'喫'), (0x2F849, 'M', u'喳'), (0x2F84A, 'M', u'嗂'), (0x2F84B, 'M', u'圖'), (0x2F84C, 'M', u'嘆'), (0x2F84D, 'M', u'圗'), ] def _seg_68(): return [ (0x2F84E, 'M', u'噑'), (0x2F84F, 'M', u'噴'), (0x2F850, 'M', u'切'), (0x2F851, 'M', u'壮'), (0x2F852, 'M', u'城'), (0x2F853, 'M', u'埴'), (0x2F854, 'M', u'堍'), (0x2F855, 'M', u'型'), (0x2F856, 'M', u'堲'), (0x2F857, 'M', u'報'), (0x2F858, 'M', u'墬'), (0x2F859, 'M', u'𡓤'), (0x2F85A, 'M', u'売'), (0x2F85B, 'M', u'壷'), (0x2F85C, 'M', u'夆'), (0x2F85D, 'M', u'多'), (0x2F85E, 'M', u'夢'), (0x2F85F, 'M', u'奢'), (0x2F860, 'M', u'𡚨'), (0x2F861, 'M', u'𡛪'), (0x2F862, 'M', u'姬'), (0x2F863, 'M', u'娛'), (0x2F864, 'M', u'娧'), (0x2F865, 'M', u'姘'), (0x2F866, 'M', u'婦'), (0x2F867, 'M', u'㛮'), (0x2F868, 'X'), (0x2F869, 'M', u'嬈'), (0x2F86A, 'M', u'嬾'), (0x2F86C, 'M', u'𡧈'), (0x2F86D, 'M', u'寃'), (0x2F86E, 'M', u'寘'), (0x2F86F, 'M', u'寧'), (0x2F870, 'M', u'寳'), (0x2F871, 'M', u'𡬘'), (0x2F872, 'M', u'寿'), (0x2F873, 'M', u'将'), (0x2F874, 'X'), (0x2F875, 'M', u'尢'), (0x2F876, 'M', u'㞁'), (0x2F877, 'M', u'屠'), (0x2F878, 'M', u'屮'), (0x2F879, 'M', u'峀'), (0x2F87A, 'M', u'岍'), (0x2F87B, 'M', u'𡷤'), (0x2F87C, 'M', u'嵃'), (0x2F87D, 'M', u'𡷦'), (0x2F87E, 'M', u'嵮'), (0x2F87F, 'M', u'嵫'), (0x2F880, 'M', u'嵼'), (0x2F881, 'M', u'巡'), (0x2F882, 'M', u'巢'), (0x2F883, 'M', u'㠯'), (0x2F884, 'M', u'巽'), (0x2F885, 'M', u'帨'), (0x2F886, 'M', u'帽'), (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), (0x2F88D, 'M', u'庶'), (0x2F88E, 'M', u'廊'), (0x2F88F, 'M', u'𪎒'), (0x2F890, 'M', u'廾'), (0x2F891, 'M', u'𢌱'), (0x2F893, 'M', u'舁'), (0x2F894, 'M', u'弢'), (0x2F896, 'M', u'㣇'), (0x2F897, 'M', u'𣊸'), (0x2F898, 'M', u'𦇚'), (0x2F899, 'M', u'形'), (0x2F89A, 'M', u'彫'), (0x2F89B, 'M', u'㣣'), (0x2F89C, 'M', u'徚'), (0x2F89D, 'M', u'忍'), (0x2F89E, 'M', u'志'), (0x2F89F, 'M', u'忹'), (0x2F8A0, 'M', u'悁'), (0x2F8A1, 'M', u'㤺'), (0x2F8A2, 'M', u'㤜'), (0x2F8A3, 'M', u'悔'), (0x2F8A4, 'M', u'𢛔'), (0x2F8A5, 'M', u'惇'), (0x2F8A6, 'M', u'慈'), (0x2F8A7, 'M', u'慌'), (0x2F8A8, 'M', u'慎'), (0x2F8A9, 'M', u'慌'), (0x2F8AA, 'M', u'慺'), (0x2F8AB, 'M', u'憎'), (0x2F8AC, 'M', u'憲'), (0x2F8AD, 'M', u'憤'), (0x2F8AE, 'M', u'憯'), (0x2F8AF, 'M', u'懞'), (0x2F8B0, 'M', u'懲'), (0x2F8B1, 'M', u'懶'), (0x2F8B2, 'M', u'成'), (0x2F8B3, 'M', u'戛'), (0x2F8B4, 'M', u'扝'), ] def _seg_69(): return [ (0x2F8B5, 'M', u'抱'), (0x2F8B6, 'M', u'拔'), (0x2F8B7, 'M', u'捐'), (0x2F8B8, 'M', u'𢬌'), (0x2F8B9, 'M', u'挽'), (0x2F8BA, 'M', u'拼'), (0x2F8BB, 'M', u'捨'), (0x2F8BC, 'M', u'掃'), (0x2F8BD, 'M', u'揤'), (0x2F8BE, 'M', u'𢯱'), (0x2F8BF, 'M', u'搢'), (0x2F8C0, 'M', u'揅'), (0x2F8C1, 'M', u'掩'), (0x2F8C2, 'M', u'㨮'), (0x2F8C3, 'M', u'摩'), (0x2F8C4, 'M', u'摾'), (0x2F8C5, 'M', u'撝'), (0x2F8C6, 'M', u'摷'), (0x2F8C7, 'M', u'㩬'), (0x2F8C8, 'M', u'敏'), (0x2F8C9, 'M', u'敬'), (0x2F8CA, 'M', u'𣀊'), (0x2F8CB, 'M', u'旣'), (0x2F8CC, 'M', u'書'), (0x2F8CD, 'M', u'晉'), (0x2F8CE, 'M', u'㬙'), (0x2F8CF, 'M', u'暑'), (0x2F8D0, 'M', u'㬈'), (0x2F8D1, 'M', u'㫤'), (0x2F8D2, 'M', u'冒'), (0x2F8D3, 'M', u'冕'), (0x2F8D4, 'M', u'最'), (0x2F8D5, 'M', u'暜'), (0x2F8D6, 'M', u'肭'), (0x2F8D7, 'M', u'䏙'), (0x2F8D8, 'M', u'朗'), (0x2F8D9, 'M', u'望'), (0x2F8DA, 'M', u'朡'), (0x2F8DB, 'M', u'杞'), (0x2F8DC, 'M', u'杓'), (0x2F8DD, 'M', u'𣏃'), (0x2F8DE, 'M', u'㭉'), (0x2F8DF, 'M', u'柺'), (0x2F8E0, 'M', u'枅'), (0x2F8E1, 'M', u'桒'), (0x2F8E2, 'M', u'梅'), (0x2F8E3, 'M', u'𣑭'), (0x2F8E4, 'M', u'梎'), (0x2F8E5, 'M', u'栟'), (0x2F8E6, 'M', u'椔'), (0x2F8E7, 'M', u'㮝'), (0x2F8E8, 'M', u'楂'), (0x2F8E9, 'M', u'榣'), (0x2F8EA, 'M', u'槪'), (0x2F8EB, 'M', u'檨'), (0x2F8EC, 'M', u'𣚣'), (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), (0x2F8F3, 'M', u'歲'), (0x2F8F4, 'M', u'殟'), (0x2F8F5, 'M', u'殺'), (0x2F8F6, 'M', u'殻'), (0x2F8F7, 'M', u'𣪍'), (0x2F8F8, 'M', u'𡴋'), (0x2F8F9, 'M', u'𣫺'), (0x2F8FA, 'M', u'汎'), (0x2F8FB, 'M', u'𣲼'), (0x2F8FC, 'M', u'沿'), (0x2F8FD, 'M', u'泍'), (0x2F8FE, 'M', u'汧'), (0x2F8FF, 'M', u'洖'), (0x2F900, 'M', u'派'), (0x2F901, 'M', u'海'), (0x2F902, 'M', u'流'), (0x2F903, 'M', u'浩'), (0x2F904, 'M', u'浸'), (0x2F905, 'M', u'涅'), (0x2F906, 'M', u'𣴞'), (0x2F907, 'M', u'洴'), (0x2F908, 'M', u'港'), (0x2F909, 'M', u'湮'), (0x2F90A, 'M', u'㴳'), (0x2F90B, 'M', u'滋'), (0x2F90C, 'M', u'滇'), (0x2F90D, 'M', u'𣻑'), (0x2F90E, 'M', u'淹'), (0x2F90F, 'M', u'潮'), (0x2F910, 'M', u'𣽞'), (0x2F911, 'M', u'𣾎'), (0x2F912, 'M', u'濆'), (0x2F913, 'M', u'瀹'), (0x2F914, 'M', u'瀞'), (0x2F915, 'M', u'瀛'), (0x2F916, 'M', u'㶖'), (0x2F917, 'M', u'灊'), (0x2F918, 'M', u'災'), ] def _seg_70(): return [ (0x2F919, 'M', u'灷'), (0x2F91A, 'M', u'炭'), (0x2F91B, 'M', u'𠔥'), (0x2F91C, 'M', u'煅'), (0x2F91D, 'M', u'𤉣'), (0x2F91E, 'M', u'熜'), (0x2F91F, 'X'), (0x2F920, 'M', u'爨'), (0x2F921, 'M', u'爵'), (0x2F922, 'M', u'牐'), (0x2F923, 'M', u'𤘈'), (0x2F924, 'M', u'犀'), (0x2F925, 'M', u'犕'), (0x2F926, 'M', u'𤜵'), (0x2F927, 'M', u'𤠔'), (0x2F928, 'M', u'獺'), (0x2F929, 'M', u'王'), (0x2F92A, 'M', u'㺬'), (0x2F92B, 'M', u'玥'), (0x2F92C, 'M', u'㺸'), (0x2F92E, 'M', u'瑇'), (0x2F92F, 'M', u'瑜'), (0x2F930, 'M', u'瑱'), (0x2F931, 'M', u'璅'), (0x2F932, 'M', u'瓊'), (0x2F933, 'M', u'㼛'), (0x2F934, 'M', u'甤'), (0x2F935, 'M', u'𤰶'), (0x2F936, 'M', u'甾'), (0x2F937, 'M', u'𤲒'), (0x2F938, 'M', u'異'), (0x2F939, 'M', u'𢆟'), (0x2F93A, 'M', u'瘐'), (0x2F93B, 'M', u'𤾡'), (0x2F93C, 'M', u'𤾸'), (0x2F93D, 'M', u'𥁄'), (0x2F93E, 'M', u'㿼'), (0x2F93F, 'M', u'䀈'), (0x2F940, 'M', u'直'), (0x2F941, 'M', u'𥃳'), (0x2F942, 'M', u'𥃲'), (0x2F943, 'M', u'𥄙'), (0x2F944, 'M', u'𥄳'), (0x2F945, 'M', u'眞'), (0x2F946, 'M', u'真'), (0x2F948, 'M', u'睊'), (0x2F949, 'M', u'䀹'), (0x2F94A, 'M', u'瞋'), (0x2F94B, 'M', u'䁆'), (0x2F94C, 'M', u'䂖'), (0x2F94D, 'M', u'𥐝'), (0x2F94E, 'M', u'硎'), (0x2F94F, 'M', u'碌'), (0x2F950, 'M', u'磌'), (0x2F951, 'M', u'䃣'), (0x2F952, 'M', u'𥘦'), (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), (0x2F959, 'M', u'穀'), (0x2F95A, 'M', u'穊'), (0x2F95B, 'M', u'穏'), (0x2F95C, 'M', u'𥥼'), (0x2F95D, 'M', u'𥪧'), (0x2F95F, 'X'), (0x2F960, 'M', u'䈂'), (0x2F961, 'M', u'𥮫'), (0x2F962, 'M', u'篆'), (0x2F963, 'M', u'築'), (0x2F964, 'M', u'䈧'), (0x2F965, 'M', u'𥲀'), (0x2F966, 'M', u'糒'), (0x2F967, 'M', u'䊠'), (0x2F968, 'M', u'糨'), (0x2F969, 'M', u'糣'), (0x2F96A, 'M', u'紀'), (0x2F96B, 'M', u'𥾆'), (0x2F96C, 'M', u'絣'), (0x2F96D, 'M', u'䌁'), (0x2F96E, 'M', u'緇'), (0x2F96F, 'M', u'縂'), (0x2F970, 'M', u'繅'), (0x2F971, 'M', u'䌴'), (0x2F972, 'M', u'𦈨'), (0x2F973, 'M', u'𦉇'), (0x2F974, 'M', u'䍙'), (0x2F975, 'M', u'𦋙'), (0x2F976, 'M', u'罺'), (0x2F977, 'M', u'𦌾'), (0x2F978, 'M', u'羕'), (0x2F979, 'M', u'翺'), (0x2F97A, 'M', u'者'), (0x2F97B, 'M', u'𦓚'), (0x2F97C, 'M', u'𦔣'), (0x2F97D, 'M', u'聠'), (0x2F97E, 'M', u'𦖨'), (0x2F97F, 'M', u'聰'), ] def _seg_71(): return [ (0x2F980, 'M', u'𣍟'), (0x2F981, 'M', u'䏕'), (0x2F982, 'M', u'育'), (0x2F983, 'M', u'脃'), (0x2F984, 'M', u'䐋'), (0x2F985, 'M', u'脾'), (0x2F986, 'M', u'媵'), (0x2F987, 'M', u'𦞧'), (0x2F988, 'M', u'𦞵'), (0x2F989, 'M', u'𣎓'), (0x2F98A, 'M', u'𣎜'), (0x2F98B, 'M', u'舁'), (0x2F98C, 'M', u'舄'), (0x2F98D, 'M', u'辞'), (0x2F98E, 'M', u'䑫'), (0x2F98F, 'M', u'芑'), (0x2F990, 'M', u'芋'), (0x2F991, 'M', u'芝'), (0x2F992, 'M', u'劳'), (0x2F993, 'M', u'花'), (0x2F994, 'M', u'芳'), (0x2F995, 'M', u'芽'), (0x2F996, 'M', u'苦'), (0x2F997, 'M', u'𦬼'), (0x2F998, 'M', u'若'), (0x2F999, 'M', u'茝'), (0x2F99A, 'M', u'荣'), (0x2F99B, 'M', u'莭'), (0x2F99C, 'M', u'茣'), (0x2F99D, 'M', u'莽'), (0x2F99E, 'M', u'菧'), (0x2F99F, 'M', u'著'), (0x2F9A0, 'M', u'荓'), (0x2F9A1, 'M', u'菊'), (0x2F9A2, 'M', u'菌'), (0x2F9A3, 'M', u'菜'), (0x2F9A4, 'M', u'𦰶'), (0x2F9A5, 'M', u'𦵫'), (0x2F9A6, 'M', u'𦳕'), (0x2F9A7, 'M', u'䔫'), (0x2F9A8, 'M', u'蓱'), (0x2F9A9, 'M', u'蓳'), (0x2F9AA, 'M', u'蔖'), (0x2F9AB, 'M', u'𧏊'), (0x2F9AC, 'M', u'蕤'), (0x2F9AD, 'M', u'𦼬'), (0x2F9AE, 'M', u'䕝'), (0x2F9AF, 'M', u'䕡'), (0x2F9B0, 'M', u'𦾱'), (0x2F9B1, 'M', u'𧃒'), (0x2F9B2, 'M', u'䕫'), (0x2F9B3, 'M', u'虐'), (0x2F9B4, 'M', u'虜'), (0x2F9B5, 'M', u'虧'), (0x2F9B6, 'M', u'虩'), (0x2F9B7, 'M', u'蚩'), (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), (0x2F9BE, 'M', u'螆'), (0x2F9BF, 'X'), (0x2F9C0, 'M', u'蟡'), (0x2F9C1, 'M', u'蠁'), (0x2F9C2, 'M', u'䗹'), (0x2F9C3, 'M', u'衠'), (0x2F9C4, 'M', u'衣'), (0x2F9C5, 'M', u'𧙧'), (0x2F9C6, 'M', u'裗'), (0x2F9C7, 'M', u'裞'), (0x2F9C8, 'M', u'䘵'), (0x2F9C9, 'M', u'裺'), (0x2F9CA, 'M', u'㒻'), (0x2F9CB, 'M', u'𧢮'), (0x2F9CC, 'M', u'𧥦'), (0x2F9CD, 'M', u'䚾'), (0x2F9CE, 'M', u'䛇'), (0x2F9CF, 'M', u'誠'), (0x2F9D0, 'M', u'諭'), (0x2F9D1, 'M', u'變'), (0x2F9D2, 'M', u'豕'), (0x2F9D3, 'M', u'𧲨'), (0x2F9D4, 'M', u'貫'), (0x2F9D5, 'M', u'賁'), (0x2F9D6, 'M', u'贛'), (0x2F9D7, 'M', u'起'), (0x2F9D8, 'M', u'𧼯'), (0x2F9D9, 'M', u'𠠄'), (0x2F9DA, 'M', u'跋'), (0x2F9DB, 'M', u'趼'), (0x2F9DC, 'M', u'跰'), (0x2F9DD, 'M', u'𠣞'), (0x2F9DE, 'M', u'軔'), (0x2F9DF, 'M', u'輸'), (0x2F9E0, 'M', u'𨗒'), (0x2F9E1, 'M', u'𨗭'), (0x2F9E2, 'M', u'邔'), (0x2F9E3, 'M', u'郱'), ] def _seg_72(): return [ (0x2F9E4, 'M', u'鄑'), (0x2F9E5, 'M', u'𨜮'), (0x2F9E6, 'M', u'鄛'), (0x2F9E7, 'M', u'鈸'), (0x2F9E8, 'M', u'鋗'), (0x2F9E9, 'M', u'鋘'), (0x2F9EA, 'M', u'鉼'), (0x2F9EB, 'M', u'鏹'), (0x2F9EC, 'M', u'鐕'), (0x2F9ED, 'M', u'𨯺'), (0x2F9EE, 'M', u'開'), (0x2F9EF, 'M', u'䦕'), (0x2F9F0, 'M', u'閷'), (0x2F9F1, 'M', u'𨵷'), (0x2F9F2, 'M', u'䧦'), (0x2F9F3, 'M', u'雃'), (0x2F9F4, 'M', u'嶲'), (0x2F9F5, 'M', u'霣'), (0x2F9F6, 'M', u'𩅅'), (0x2F9F7, 'M', u'𩈚'), (0x2F9F8, 'M', u'䩮'), (0x2F9F9, 'M', u'䩶'), (0x2F9FA, 'M', u'韠'), (0x2F9FB, 'M', u'𩐊'), (0x2F9FC, 'M', u'䪲'), (0x2F9FD, 'M', u'𩒖'), (0x2F9FE, 'M', u'頋'), (0x2FA00, 'M', u'頩'), (0x2FA01, 'M', u'𩖶'), (0x2FA02, 'M', u'飢'), (0x2FA03, 'M', u'䬳'), (0x2FA04, 'M', u'餩'), (0x2FA05, 'M', u'馧'), (0x2FA06, 'M', u'駂'), (0x2FA07, 'M', u'駾'), (0x2FA08, 'M', u'䯎'), (0x2FA09, 'M', u'𩬰'), (0x2FA0A, 'M', u'鬒'), (0x2FA0B, 'M', u'鱀'), (0x2FA0C, 'M', u'鳽'), (0x2FA0D, 'M', u'䳎'), (0x2FA0E, 'M', u'䳭'), (0x2FA0F, 'M', u'鵧'), (0x2FA10, 'M', u'𪃎'), (0x2FA11, 'M', u'䳸'), (0x2FA12, 'M', u'𪄅'), (0x2FA13, 'M', u'𪈎'), (0x2FA14, 'M', u'𪊑'), (0x2FA15, 'M', u'麻'), (0x2FA16, 'M', u'䵖'), (0x2FA17, 'M', u'黹'), (0x2FA18, 'M', u'黾'), (0x2FA19, 'M', u'鼅'), (0x2FA1A, 'M', u'鼏'), (0x2FA1B, 'M', u'鼖'), (0x2FA1C, 'M', u'鼻'), (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] uts46data = tuple( _seg_0() + _seg_1() + _seg_2() + _seg_3() + _seg_4() + _seg_5() + _seg_6() + _seg_7() + _seg_8() + _seg_9() + _seg_10() + _seg_11() + _seg_12() + _seg_13() + _seg_14() + _seg_15() + _seg_16() + _seg_17() + _seg_18() + _seg_19() + _seg_20() + _seg_21() + _seg_22() + _seg_23() + _seg_24() + _seg_25() + _seg_26() + _seg_27() + _seg_28() + _seg_29() + _seg_30() + _seg_31() + _seg_32() + _seg_33() + _seg_34() + _seg_35() + _seg_36() + _seg_37() + _seg_38() + _seg_39() + _seg_40() + _seg_41() + _seg_42() + _seg_43() + _seg_44() + _seg_45() + _seg_46() + _seg_47() + _seg_48() + _seg_49() + _seg_50() + _seg_51() + _seg_52() + _seg_53() + _seg_54() + _seg_55() + _seg_56() + _seg_57() + _seg_58() + _seg_59() + _seg_60() + _seg_61() + _seg_62() + _seg_63() + _seg_64() + _seg_65() + _seg_66() + _seg_67() + _seg_68() + _seg_69() + _seg_70() + _seg_71() + _seg_72() )
176,814
22.158481
57
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/compat.py
from .core import * from .codec import * def ToASCII(label): return encode(label) def ToUnicode(label): return decode(label) def nameprep(s): raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
232
16.923077
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/idna/idnadata.py
# This file is automatically generated by tools/idna-data __version__ = "6.3.0" scripts = { 'Greek': ( 0x37000000374, 0x37500000378, 0x37a0000037e, 0x38400000385, 0x38600000387, 0x3880000038b, 0x38c0000038d, 0x38e000003a2, 0x3a3000003e2, 0x3f000000400, 0x1d2600001d2b, 0x1d5d00001d62, 0x1d6600001d6b, 0x1dbf00001dc0, 0x1f0000001f16, 0x1f1800001f1e, 0x1f2000001f46, 0x1f4800001f4e, 0x1f5000001f58, 0x1f5900001f5a, 0x1f5b00001f5c, 0x1f5d00001f5e, 0x1f5f00001f7e, 0x1f8000001fb5, 0x1fb600001fc5, 0x1fc600001fd4, 0x1fd600001fdc, 0x1fdd00001ff0, 0x1ff200001ff5, 0x1ff600001fff, 0x212600002127, 0x101400001018b, 0x1d2000001d246, ), 'Han': ( 0x2e8000002e9a, 0x2e9b00002ef4, 0x2f0000002fd6, 0x300500003006, 0x300700003008, 0x30210000302a, 0x30380000303c, 0x340000004db6, 0x4e0000009fcd, 0xf9000000fa6e, 0xfa700000fada, 0x200000002a6d7, 0x2a7000002b735, 0x2b7400002b81e, 0x2f8000002fa1e, ), 'Hebrew': ( 0x591000005c8, 0x5d0000005eb, 0x5f0000005f5, 0xfb1d0000fb37, 0xfb380000fb3d, 0xfb3e0000fb3f, 0xfb400000fb42, 0xfb430000fb45, 0xfb460000fb50, ), 'Hiragana': ( 0x304100003097, 0x309d000030a0, 0x1b0010001b002, 0x1f2000001f201, ), 'Katakana': ( 0x30a1000030fb, 0x30fd00003100, 0x31f000003200, 0x32d0000032ff, 0x330000003358, 0xff660000ff70, 0xff710000ff9e, 0x1b0000001b001, ), } joining_types = { 0x600: 85, 0x601: 85, 0x602: 85, 0x603: 85, 0x604: 85, 0x608: 85, 0x60b: 85, 0x620: 68, 0x621: 85, 0x622: 82, 0x623: 82, 0x624: 82, 0x625: 82, 0x626: 68, 0x627: 82, 0x628: 68, 0x629: 82, 0x62a: 68, 0x62b: 68, 0x62c: 68, 0x62d: 68, 0x62e: 68, 0x62f: 82, 0x630: 82, 0x631: 82, 0x632: 82, 0x633: 68, 0x634: 68, 0x635: 68, 0x636: 68, 0x637: 68, 0x638: 68, 0x639: 68, 0x63a: 68, 0x63b: 68, 0x63c: 68, 0x63d: 68, 0x63e: 68, 0x63f: 68, 0x640: 67, 0x641: 68, 0x642: 68, 0x643: 68, 0x644: 68, 0x645: 68, 0x646: 68, 0x647: 68, 0x648: 82, 0x649: 68, 0x64a: 68, 0x66e: 68, 0x66f: 68, 0x671: 82, 0x672: 82, 0x673: 82, 0x674: 85, 0x675: 82, 0x676: 82, 0x677: 82, 0x678: 68, 0x679: 68, 0x67a: 68, 0x67b: 68, 0x67c: 68, 0x67d: 68, 0x67e: 68, 0x67f: 68, 0x680: 68, 0x681: 68, 0x682: 68, 0x683: 68, 0x684: 68, 0x685: 68, 0x686: 68, 0x687: 68, 0x688: 82, 0x689: 82, 0x68a: 82, 0x68b: 82, 0x68c: 82, 0x68d: 82, 0x68e: 82, 0x68f: 82, 0x690: 82, 0x691: 82, 0x692: 82, 0x693: 82, 0x694: 82, 0x695: 82, 0x696: 82, 0x697: 82, 0x698: 82, 0x699: 82, 0x69a: 68, 0x69b: 68, 0x69c: 68, 0x69d: 68, 0x69e: 68, 0x69f: 68, 0x6a0: 68, 0x6a1: 68, 0x6a2: 68, 0x6a3: 68, 0x6a4: 68, 0x6a5: 68, 0x6a6: 68, 0x6a7: 68, 0x6a8: 68, 0x6a9: 68, 0x6aa: 68, 0x6ab: 68, 0x6ac: 68, 0x6ad: 68, 0x6ae: 68, 0x6af: 68, 0x6b0: 68, 0x6b1: 68, 0x6b2: 68, 0x6b3: 68, 0x6b4: 68, 0x6b5: 68, 0x6b6: 68, 0x6b7: 68, 0x6b8: 68, 0x6b9: 68, 0x6ba: 68, 0x6bb: 68, 0x6bc: 68, 0x6bd: 68, 0x6be: 68, 0x6bf: 68, 0x6c0: 82, 0x6c1: 68, 0x6c2: 68, 0x6c3: 82, 0x6c4: 82, 0x6c5: 82, 0x6c6: 82, 0x6c7: 82, 0x6c8: 82, 0x6c9: 82, 0x6ca: 82, 0x6cb: 82, 0x6cc: 68, 0x6cd: 82, 0x6ce: 68, 0x6cf: 82, 0x6d0: 68, 0x6d1: 68, 0x6d2: 82, 0x6d3: 82, 0x6d5: 82, 0x6dd: 85, 0x6ee: 82, 0x6ef: 82, 0x6fa: 68, 0x6fb: 68, 0x6fc: 68, 0x6ff: 68, 0x710: 82, 0x712: 68, 0x713: 68, 0x714: 68, 0x715: 82, 0x716: 82, 0x717: 82, 0x718: 82, 0x719: 82, 0x71a: 68, 0x71b: 68, 0x71c: 68, 0x71d: 68, 0x71e: 82, 0x71f: 68, 0x720: 68, 0x721: 68, 0x722: 68, 0x723: 68, 0x724: 68, 0x725: 68, 0x726: 68, 0x727: 68, 0x728: 82, 0x729: 68, 0x72a: 82, 0x72b: 68, 0x72c: 82, 0x72d: 68, 0x72e: 68, 0x72f: 82, 0x74d: 82, 0x74e: 68, 0x74f: 68, 0x750: 68, 0x751: 68, 0x752: 68, 0x753: 68, 0x754: 68, 0x755: 68, 0x756: 68, 0x757: 68, 0x758: 68, 0x759: 82, 0x75a: 82, 0x75b: 82, 0x75c: 68, 0x75d: 68, 0x75e: 68, 0x75f: 68, 0x760: 68, 0x761: 68, 0x762: 68, 0x763: 68, 0x764: 68, 0x765: 68, 0x766: 68, 0x767: 68, 0x768: 68, 0x769: 68, 0x76a: 68, 0x76b: 82, 0x76c: 82, 0x76d: 68, 0x76e: 68, 0x76f: 68, 0x770: 68, 0x771: 82, 0x772: 68, 0x773: 82, 0x774: 82, 0x775: 68, 0x776: 68, 0x777: 68, 0x778: 82, 0x779: 82, 0x77a: 68, 0x77b: 68, 0x77c: 68, 0x77d: 68, 0x77e: 68, 0x77f: 68, 0x7ca: 68, 0x7cb: 68, 0x7cc: 68, 0x7cd: 68, 0x7ce: 68, 0x7cf: 68, 0x7d0: 68, 0x7d1: 68, 0x7d2: 68, 0x7d3: 68, 0x7d4: 68, 0x7d5: 68, 0x7d6: 68, 0x7d7: 68, 0x7d8: 68, 0x7d9: 68, 0x7da: 68, 0x7db: 68, 0x7dc: 68, 0x7dd: 68, 0x7de: 68, 0x7df: 68, 0x7e0: 68, 0x7e1: 68, 0x7e2: 68, 0x7e3: 68, 0x7e4: 68, 0x7e5: 68, 0x7e6: 68, 0x7e7: 68, 0x7e8: 68, 0x7e9: 68, 0x7ea: 68, 0x7fa: 67, 0x840: 82, 0x841: 68, 0x842: 68, 0x843: 68, 0x844: 68, 0x845: 68, 0x846: 82, 0x847: 68, 0x848: 68, 0x849: 82, 0x84a: 68, 0x84b: 68, 0x84c: 68, 0x84d: 68, 0x84e: 68, 0x84f: 82, 0x850: 68, 0x851: 68, 0x852: 68, 0x853: 68, 0x854: 82, 0x855: 68, 0x856: 85, 0x857: 85, 0x858: 85, 0x8a0: 68, 0x8a2: 68, 0x8a3: 68, 0x8a4: 68, 0x8a5: 68, 0x8a6: 68, 0x8a7: 68, 0x8a8: 68, 0x8a9: 68, 0x8aa: 82, 0x8ab: 82, 0x8ac: 82, 0x1806: 85, 0x1807: 68, 0x180a: 67, 0x180e: 85, 0x1820: 68, 0x1821: 68, 0x1822: 68, 0x1823: 68, 0x1824: 68, 0x1825: 68, 0x1826: 68, 0x1827: 68, 0x1828: 68, 0x1829: 68, 0x182a: 68, 0x182b: 68, 0x182c: 68, 0x182d: 68, 0x182e: 68, 0x182f: 68, 0x1830: 68, 0x1831: 68, 0x1832: 68, 0x1833: 68, 0x1834: 68, 0x1835: 68, 0x1836: 68, 0x1837: 68, 0x1838: 68, 0x1839: 68, 0x183a: 68, 0x183b: 68, 0x183c: 68, 0x183d: 68, 0x183e: 68, 0x183f: 68, 0x1840: 68, 0x1841: 68, 0x1842: 68, 0x1843: 68, 0x1844: 68, 0x1845: 68, 0x1846: 68, 0x1847: 68, 0x1848: 68, 0x1849: 68, 0x184a: 68, 0x184b: 68, 0x184c: 68, 0x184d: 68, 0x184e: 68, 0x184f: 68, 0x1850: 68, 0x1851: 68, 0x1852: 68, 0x1853: 68, 0x1854: 68, 0x1855: 68, 0x1856: 68, 0x1857: 68, 0x1858: 68, 0x1859: 68, 0x185a: 68, 0x185b: 68, 0x185c: 68, 0x185d: 68, 0x185e: 68, 0x185f: 68, 0x1860: 68, 0x1861: 68, 0x1862: 68, 0x1863: 68, 0x1864: 68, 0x1865: 68, 0x1866: 68, 0x1867: 68, 0x1868: 68, 0x1869: 68, 0x186a: 68, 0x186b: 68, 0x186c: 68, 0x186d: 68, 0x186e: 68, 0x186f: 68, 0x1870: 68, 0x1871: 68, 0x1872: 68, 0x1873: 68, 0x1874: 68, 0x1875: 68, 0x1876: 68, 0x1877: 68, 0x1880: 85, 0x1881: 85, 0x1882: 85, 0x1883: 85, 0x1884: 85, 0x1885: 85, 0x1886: 85, 0x1887: 68, 0x1888: 68, 0x1889: 68, 0x188a: 68, 0x188b: 68, 0x188c: 68, 0x188d: 68, 0x188e: 68, 0x188f: 68, 0x1890: 68, 0x1891: 68, 0x1892: 68, 0x1893: 68, 0x1894: 68, 0x1895: 68, 0x1896: 68, 0x1897: 68, 0x1898: 68, 0x1899: 68, 0x189a: 68, 0x189b: 68, 0x189c: 68, 0x189d: 68, 0x189e: 68, 0x189f: 68, 0x18a0: 68, 0x18a1: 68, 0x18a2: 68, 0x18a3: 68, 0x18a4: 68, 0x18a5: 68, 0x18a6: 68, 0x18a7: 68, 0x18a8: 68, 0x18aa: 68, 0x200c: 85, 0x200d: 67, 0x2066: 85, 0x2067: 85, 0x2068: 85, 0x2069: 85, 0xa840: 68, 0xa841: 68, 0xa842: 68, 0xa843: 68, 0xa844: 68, 0xa845: 68, 0xa846: 68, 0xa847: 68, 0xa848: 68, 0xa849: 68, 0xa84a: 68, 0xa84b: 68, 0xa84c: 68, 0xa84d: 68, 0xa84e: 68, 0xa84f: 68, 0xa850: 68, 0xa851: 68, 0xa852: 68, 0xa853: 68, 0xa854: 68, 0xa855: 68, 0xa856: 68, 0xa857: 68, 0xa858: 68, 0xa859: 68, 0xa85a: 68, 0xa85b: 68, 0xa85c: 68, 0xa85d: 68, 0xa85e: 68, 0xa85f: 68, 0xa860: 68, 0xa861: 68, 0xa862: 68, 0xa863: 68, 0xa864: 68, 0xa865: 68, 0xa866: 68, 0xa867: 68, 0xa868: 68, 0xa869: 68, 0xa86a: 68, 0xa86b: 68, 0xa86c: 68, 0xa86d: 68, 0xa86e: 68, 0xa86f: 68, 0xa870: 68, 0xa871: 68, 0xa872: 76, 0xa873: 85, } codepoint_classes = { 'PVALID': ( 0x2d0000002e, 0x300000003a, 0x610000007b, 0xdf000000f7, 0xf800000100, 0x10100000102, 0x10300000104, 0x10500000106, 0x10700000108, 0x1090000010a, 0x10b0000010c, 0x10d0000010e, 0x10f00000110, 0x11100000112, 0x11300000114, 0x11500000116, 0x11700000118, 0x1190000011a, 0x11b0000011c, 0x11d0000011e, 0x11f00000120, 0x12100000122, 0x12300000124, 0x12500000126, 0x12700000128, 0x1290000012a, 0x12b0000012c, 0x12d0000012e, 0x12f00000130, 0x13100000132, 0x13500000136, 0x13700000139, 0x13a0000013b, 0x13c0000013d, 0x13e0000013f, 0x14200000143, 0x14400000145, 0x14600000147, 0x14800000149, 0x14b0000014c, 0x14d0000014e, 0x14f00000150, 0x15100000152, 0x15300000154, 0x15500000156, 0x15700000158, 0x1590000015a, 0x15b0000015c, 0x15d0000015e, 0x15f00000160, 0x16100000162, 0x16300000164, 0x16500000166, 0x16700000168, 0x1690000016a, 0x16b0000016c, 0x16d0000016e, 0x16f00000170, 0x17100000172, 0x17300000174, 0x17500000176, 0x17700000178, 0x17a0000017b, 0x17c0000017d, 0x17e0000017f, 0x18000000181, 0x18300000184, 0x18500000186, 0x18800000189, 0x18c0000018e, 0x19200000193, 0x19500000196, 0x1990000019c, 0x19e0000019f, 0x1a1000001a2, 0x1a3000001a4, 0x1a5000001a6, 0x1a8000001a9, 0x1aa000001ac, 0x1ad000001ae, 0x1b0000001b1, 0x1b4000001b5, 0x1b6000001b7, 0x1b9000001bc, 0x1bd000001c4, 0x1ce000001cf, 0x1d0000001d1, 0x1d2000001d3, 0x1d4000001d5, 0x1d6000001d7, 0x1d8000001d9, 0x1da000001db, 0x1dc000001de, 0x1df000001e0, 0x1e1000001e2, 0x1e3000001e4, 0x1e5000001e6, 0x1e7000001e8, 0x1e9000001ea, 0x1eb000001ec, 0x1ed000001ee, 0x1ef000001f1, 0x1f5000001f6, 0x1f9000001fa, 0x1fb000001fc, 0x1fd000001fe, 0x1ff00000200, 0x20100000202, 0x20300000204, 0x20500000206, 0x20700000208, 0x2090000020a, 0x20b0000020c, 0x20d0000020e, 0x20f00000210, 0x21100000212, 0x21300000214, 0x21500000216, 0x21700000218, 0x2190000021a, 0x21b0000021c, 0x21d0000021e, 0x21f00000220, 0x22100000222, 0x22300000224, 0x22500000226, 0x22700000228, 0x2290000022a, 0x22b0000022c, 0x22d0000022e, 0x22f00000230, 0x23100000232, 0x2330000023a, 0x23c0000023d, 0x23f00000241, 0x24200000243, 0x24700000248, 0x2490000024a, 0x24b0000024c, 0x24d0000024e, 0x24f000002b0, 0x2b9000002c2, 0x2c6000002d2, 0x2ec000002ed, 0x2ee000002ef, 0x30000000340, 0x34200000343, 0x3460000034f, 0x35000000370, 0x37100000372, 0x37300000374, 0x37700000378, 0x37b0000037e, 0x39000000391, 0x3ac000003cf, 0x3d7000003d8, 0x3d9000003da, 0x3db000003dc, 0x3dd000003de, 0x3df000003e0, 0x3e1000003e2, 0x3e3000003e4, 0x3e5000003e6, 0x3e7000003e8, 0x3e9000003ea, 0x3eb000003ec, 0x3ed000003ee, 0x3ef000003f0, 0x3f3000003f4, 0x3f8000003f9, 0x3fb000003fd, 0x43000000460, 0x46100000462, 0x46300000464, 0x46500000466, 0x46700000468, 0x4690000046a, 0x46b0000046c, 0x46d0000046e, 0x46f00000470, 0x47100000472, 0x47300000474, 0x47500000476, 0x47700000478, 0x4790000047a, 0x47b0000047c, 0x47d0000047e, 0x47f00000480, 0x48100000482, 0x48300000488, 0x48b0000048c, 0x48d0000048e, 0x48f00000490, 0x49100000492, 0x49300000494, 0x49500000496, 0x49700000498, 0x4990000049a, 0x49b0000049c, 0x49d0000049e, 0x49f000004a0, 0x4a1000004a2, 0x4a3000004a4, 0x4a5000004a6, 0x4a7000004a8, 0x4a9000004aa, 0x4ab000004ac, 0x4ad000004ae, 0x4af000004b0, 0x4b1000004b2, 0x4b3000004b4, 0x4b5000004b6, 0x4b7000004b8, 0x4b9000004ba, 0x4bb000004bc, 0x4bd000004be, 0x4bf000004c0, 0x4c2000004c3, 0x4c4000004c5, 0x4c6000004c7, 0x4c8000004c9, 0x4ca000004cb, 0x4cc000004cd, 0x4ce000004d0, 0x4d1000004d2, 0x4d3000004d4, 0x4d5000004d6, 0x4d7000004d8, 0x4d9000004da, 0x4db000004dc, 0x4dd000004de, 0x4df000004e0, 0x4e1000004e2, 0x4e3000004e4, 0x4e5000004e6, 0x4e7000004e8, 0x4e9000004ea, 0x4eb000004ec, 0x4ed000004ee, 0x4ef000004f0, 0x4f1000004f2, 0x4f3000004f4, 0x4f5000004f6, 0x4f7000004f8, 0x4f9000004fa, 0x4fb000004fc, 0x4fd000004fe, 0x4ff00000500, 0x50100000502, 0x50300000504, 0x50500000506, 0x50700000508, 0x5090000050a, 0x50b0000050c, 0x50d0000050e, 0x50f00000510, 0x51100000512, 0x51300000514, 0x51500000516, 0x51700000518, 0x5190000051a, 0x51b0000051c, 0x51d0000051e, 0x51f00000520, 0x52100000522, 0x52300000524, 0x52500000526, 0x52700000528, 0x5590000055a, 0x56100000587, 0x591000005be, 0x5bf000005c0, 0x5c1000005c3, 0x5c4000005c6, 0x5c7000005c8, 0x5d0000005eb, 0x5f0000005f3, 0x6100000061b, 0x62000000640, 0x64100000660, 0x66e00000675, 0x679000006d4, 0x6d5000006dd, 0x6df000006e9, 0x6ea000006f0, 0x6fa00000700, 0x7100000074b, 0x74d000007b2, 0x7c0000007f6, 0x8000000082e, 0x8400000085c, 0x8a0000008a1, 0x8a2000008ad, 0x8e4000008ff, 0x90000000958, 0x96000000964, 0x96600000970, 0x97100000978, 0x97900000980, 0x98100000984, 0x9850000098d, 0x98f00000991, 0x993000009a9, 0x9aa000009b1, 0x9b2000009b3, 0x9b6000009ba, 0x9bc000009c5, 0x9c7000009c9, 0x9cb000009cf, 0x9d7000009d8, 0x9e0000009e4, 0x9e6000009f2, 0xa0100000a04, 0xa0500000a0b, 0xa0f00000a11, 0xa1300000a29, 0xa2a00000a31, 0xa3200000a33, 0xa3500000a36, 0xa3800000a3a, 0xa3c00000a3d, 0xa3e00000a43, 0xa4700000a49, 0xa4b00000a4e, 0xa5100000a52, 0xa5c00000a5d, 0xa6600000a76, 0xa8100000a84, 0xa8500000a8e, 0xa8f00000a92, 0xa9300000aa9, 0xaaa00000ab1, 0xab200000ab4, 0xab500000aba, 0xabc00000ac6, 0xac700000aca, 0xacb00000ace, 0xad000000ad1, 0xae000000ae4, 0xae600000af0, 0xb0100000b04, 0xb0500000b0d, 0xb0f00000b11, 0xb1300000b29, 0xb2a00000b31, 0xb3200000b34, 0xb3500000b3a, 0xb3c00000b45, 0xb4700000b49, 0xb4b00000b4e, 0xb5600000b58, 0xb5f00000b64, 0xb6600000b70, 0xb7100000b72, 0xb8200000b84, 0xb8500000b8b, 0xb8e00000b91, 0xb9200000b96, 0xb9900000b9b, 0xb9c00000b9d, 0xb9e00000ba0, 0xba300000ba5, 0xba800000bab, 0xbae00000bba, 0xbbe00000bc3, 0xbc600000bc9, 0xbca00000bce, 0xbd000000bd1, 0xbd700000bd8, 0xbe600000bf0, 0xc0100000c04, 0xc0500000c0d, 0xc0e00000c11, 0xc1200000c29, 0xc2a00000c34, 0xc3500000c3a, 0xc3d00000c45, 0xc4600000c49, 0xc4a00000c4e, 0xc5500000c57, 0xc5800000c5a, 0xc6000000c64, 0xc6600000c70, 0xc8200000c84, 0xc8500000c8d, 0xc8e00000c91, 0xc9200000ca9, 0xcaa00000cb4, 0xcb500000cba, 0xcbc00000cc5, 0xcc600000cc9, 0xcca00000cce, 0xcd500000cd7, 0xcde00000cdf, 0xce000000ce4, 0xce600000cf0, 0xcf100000cf3, 0xd0200000d04, 0xd0500000d0d, 0xd0e00000d11, 0xd1200000d3b, 0xd3d00000d45, 0xd4600000d49, 0xd4a00000d4f, 0xd5700000d58, 0xd6000000d64, 0xd6600000d70, 0xd7a00000d80, 0xd8200000d84, 0xd8500000d97, 0xd9a00000db2, 0xdb300000dbc, 0xdbd00000dbe, 0xdc000000dc7, 0xdca00000dcb, 0xdcf00000dd5, 0xdd600000dd7, 0xdd800000de0, 0xdf200000df4, 0xe0100000e33, 0xe3400000e3b, 0xe4000000e4f, 0xe5000000e5a, 0xe8100000e83, 0xe8400000e85, 0xe8700000e89, 0xe8a00000e8b, 0xe8d00000e8e, 0xe9400000e98, 0xe9900000ea0, 0xea100000ea4, 0xea500000ea6, 0xea700000ea8, 0xeaa00000eac, 0xead00000eb3, 0xeb400000eba, 0xebb00000ebe, 0xec000000ec5, 0xec600000ec7, 0xec800000ece, 0xed000000eda, 0xede00000ee0, 0xf0000000f01, 0xf0b00000f0c, 0xf1800000f1a, 0xf2000000f2a, 0xf3500000f36, 0xf3700000f38, 0xf3900000f3a, 0xf3e00000f43, 0xf4400000f48, 0xf4900000f4d, 0xf4e00000f52, 0xf5300000f57, 0xf5800000f5c, 0xf5d00000f69, 0xf6a00000f6d, 0xf7100000f73, 0xf7400000f75, 0xf7a00000f81, 0xf8200000f85, 0xf8600000f93, 0xf9400000f98, 0xf9900000f9d, 0xf9e00000fa2, 0xfa300000fa7, 0xfa800000fac, 0xfad00000fb9, 0xfba00000fbd, 0xfc600000fc7, 0x10000000104a, 0x10500000109e, 0x10d0000010fb, 0x10fd00001100, 0x120000001249, 0x124a0000124e, 0x125000001257, 0x125800001259, 0x125a0000125e, 0x126000001289, 0x128a0000128e, 0x1290000012b1, 0x12b2000012b6, 0x12b8000012bf, 0x12c0000012c1, 0x12c2000012c6, 0x12c8000012d7, 0x12d800001311, 0x131200001316, 0x13180000135b, 0x135d00001360, 0x138000001390, 0x13a0000013f5, 0x14010000166d, 0x166f00001680, 0x16810000169b, 0x16a0000016eb, 0x17000000170d, 0x170e00001715, 0x172000001735, 0x174000001754, 0x17600000176d, 0x176e00001771, 0x177200001774, 0x1780000017b4, 0x17b6000017d4, 0x17d7000017d8, 0x17dc000017de, 0x17e0000017ea, 0x18100000181a, 0x182000001878, 0x1880000018ab, 0x18b0000018f6, 0x19000000191d, 0x19200000192c, 0x19300000193c, 0x19460000196e, 0x197000001975, 0x1980000019ac, 0x19b0000019ca, 0x19d0000019da, 0x1a0000001a1c, 0x1a2000001a5f, 0x1a6000001a7d, 0x1a7f00001a8a, 0x1a9000001a9a, 0x1aa700001aa8, 0x1b0000001b4c, 0x1b5000001b5a, 0x1b6b00001b74, 0x1b8000001bf4, 0x1c0000001c38, 0x1c4000001c4a, 0x1c4d00001c7e, 0x1cd000001cd3, 0x1cd400001cf7, 0x1d0000001d2c, 0x1d2f00001d30, 0x1d3b00001d3c, 0x1d4e00001d4f, 0x1d6b00001d78, 0x1d7900001d9b, 0x1dc000001de7, 0x1dfc00001e00, 0x1e0100001e02, 0x1e0300001e04, 0x1e0500001e06, 0x1e0700001e08, 0x1e0900001e0a, 0x1e0b00001e0c, 0x1e0d00001e0e, 0x1e0f00001e10, 0x1e1100001e12, 0x1e1300001e14, 0x1e1500001e16, 0x1e1700001e18, 0x1e1900001e1a, 0x1e1b00001e1c, 0x1e1d00001e1e, 0x1e1f00001e20, 0x1e2100001e22, 0x1e2300001e24, 0x1e2500001e26, 0x1e2700001e28, 0x1e2900001e2a, 0x1e2b00001e2c, 0x1e2d00001e2e, 0x1e2f00001e30, 0x1e3100001e32, 0x1e3300001e34, 0x1e3500001e36, 0x1e3700001e38, 0x1e3900001e3a, 0x1e3b00001e3c, 0x1e3d00001e3e, 0x1e3f00001e40, 0x1e4100001e42, 0x1e4300001e44, 0x1e4500001e46, 0x1e4700001e48, 0x1e4900001e4a, 0x1e4b00001e4c, 0x1e4d00001e4e, 0x1e4f00001e50, 0x1e5100001e52, 0x1e5300001e54, 0x1e5500001e56, 0x1e5700001e58, 0x1e5900001e5a, 0x1e5b00001e5c, 0x1e5d00001e5e, 0x1e5f00001e60, 0x1e6100001e62, 0x1e6300001e64, 0x1e6500001e66, 0x1e6700001e68, 0x1e6900001e6a, 0x1e6b00001e6c, 0x1e6d00001e6e, 0x1e6f00001e70, 0x1e7100001e72, 0x1e7300001e74, 0x1e7500001e76, 0x1e7700001e78, 0x1e7900001e7a, 0x1e7b00001e7c, 0x1e7d00001e7e, 0x1e7f00001e80, 0x1e8100001e82, 0x1e8300001e84, 0x1e8500001e86, 0x1e8700001e88, 0x1e8900001e8a, 0x1e8b00001e8c, 0x1e8d00001e8e, 0x1e8f00001e90, 0x1e9100001e92, 0x1e9300001e94, 0x1e9500001e9a, 0x1e9c00001e9e, 0x1e9f00001ea0, 0x1ea100001ea2, 0x1ea300001ea4, 0x1ea500001ea6, 0x1ea700001ea8, 0x1ea900001eaa, 0x1eab00001eac, 0x1ead00001eae, 0x1eaf00001eb0, 0x1eb100001eb2, 0x1eb300001eb4, 0x1eb500001eb6, 0x1eb700001eb8, 0x1eb900001eba, 0x1ebb00001ebc, 0x1ebd00001ebe, 0x1ebf00001ec0, 0x1ec100001ec2, 0x1ec300001ec4, 0x1ec500001ec6, 0x1ec700001ec8, 0x1ec900001eca, 0x1ecb00001ecc, 0x1ecd00001ece, 0x1ecf00001ed0, 0x1ed100001ed2, 0x1ed300001ed4, 0x1ed500001ed6, 0x1ed700001ed8, 0x1ed900001eda, 0x1edb00001edc, 0x1edd00001ede, 0x1edf00001ee0, 0x1ee100001ee2, 0x1ee300001ee4, 0x1ee500001ee6, 0x1ee700001ee8, 0x1ee900001eea, 0x1eeb00001eec, 0x1eed00001eee, 0x1eef00001ef0, 0x1ef100001ef2, 0x1ef300001ef4, 0x1ef500001ef6, 0x1ef700001ef8, 0x1ef900001efa, 0x1efb00001efc, 0x1efd00001efe, 0x1eff00001f08, 0x1f1000001f16, 0x1f2000001f28, 0x1f3000001f38, 0x1f4000001f46, 0x1f5000001f58, 0x1f6000001f68, 0x1f7000001f71, 0x1f7200001f73, 0x1f7400001f75, 0x1f7600001f77, 0x1f7800001f79, 0x1f7a00001f7b, 0x1f7c00001f7d, 0x1fb000001fb2, 0x1fb600001fb7, 0x1fc600001fc7, 0x1fd000001fd3, 0x1fd600001fd8, 0x1fe000001fe3, 0x1fe400001fe8, 0x1ff600001ff7, 0x214e0000214f, 0x218400002185, 0x2c3000002c5f, 0x2c6100002c62, 0x2c6500002c67, 0x2c6800002c69, 0x2c6a00002c6b, 0x2c6c00002c6d, 0x2c7100002c72, 0x2c7300002c75, 0x2c7600002c7c, 0x2c8100002c82, 0x2c8300002c84, 0x2c8500002c86, 0x2c8700002c88, 0x2c8900002c8a, 0x2c8b00002c8c, 0x2c8d00002c8e, 0x2c8f00002c90, 0x2c9100002c92, 0x2c9300002c94, 0x2c9500002c96, 0x2c9700002c98, 0x2c9900002c9a, 0x2c9b00002c9c, 0x2c9d00002c9e, 0x2c9f00002ca0, 0x2ca100002ca2, 0x2ca300002ca4, 0x2ca500002ca6, 0x2ca700002ca8, 0x2ca900002caa, 0x2cab00002cac, 0x2cad00002cae, 0x2caf00002cb0, 0x2cb100002cb2, 0x2cb300002cb4, 0x2cb500002cb6, 0x2cb700002cb8, 0x2cb900002cba, 0x2cbb00002cbc, 0x2cbd00002cbe, 0x2cbf00002cc0, 0x2cc100002cc2, 0x2cc300002cc4, 0x2cc500002cc6, 0x2cc700002cc8, 0x2cc900002cca, 0x2ccb00002ccc, 0x2ccd00002cce, 0x2ccf00002cd0, 0x2cd100002cd2, 0x2cd300002cd4, 0x2cd500002cd6, 0x2cd700002cd8, 0x2cd900002cda, 0x2cdb00002cdc, 0x2cdd00002cde, 0x2cdf00002ce0, 0x2ce100002ce2, 0x2ce300002ce5, 0x2cec00002ced, 0x2cee00002cf2, 0x2cf300002cf4, 0x2d0000002d26, 0x2d2700002d28, 0x2d2d00002d2e, 0x2d3000002d68, 0x2d7f00002d97, 0x2da000002da7, 0x2da800002daf, 0x2db000002db7, 0x2db800002dbf, 0x2dc000002dc7, 0x2dc800002dcf, 0x2dd000002dd7, 0x2dd800002ddf, 0x2de000002e00, 0x2e2f00002e30, 0x300500003008, 0x302a0000302e, 0x303c0000303d, 0x304100003097, 0x30990000309b, 0x309d0000309f, 0x30a1000030fb, 0x30fc000030ff, 0x31050000312e, 0x31a0000031bb, 0x31f000003200, 0x340000004db6, 0x4e0000009fcd, 0xa0000000a48d, 0xa4d00000a4fe, 0xa5000000a60d, 0xa6100000a62c, 0xa6410000a642, 0xa6430000a644, 0xa6450000a646, 0xa6470000a648, 0xa6490000a64a, 0xa64b0000a64c, 0xa64d0000a64e, 0xa64f0000a650, 0xa6510000a652, 0xa6530000a654, 0xa6550000a656, 0xa6570000a658, 0xa6590000a65a, 0xa65b0000a65c, 0xa65d0000a65e, 0xa65f0000a660, 0xa6610000a662, 0xa6630000a664, 0xa6650000a666, 0xa6670000a668, 0xa6690000a66a, 0xa66b0000a66c, 0xa66d0000a670, 0xa6740000a67e, 0xa67f0000a680, 0xa6810000a682, 0xa6830000a684, 0xa6850000a686, 0xa6870000a688, 0xa6890000a68a, 0xa68b0000a68c, 0xa68d0000a68e, 0xa68f0000a690, 0xa6910000a692, 0xa6930000a694, 0xa6950000a696, 0xa6970000a698, 0xa69f0000a6e6, 0xa6f00000a6f2, 0xa7170000a720, 0xa7230000a724, 0xa7250000a726, 0xa7270000a728, 0xa7290000a72a, 0xa72b0000a72c, 0xa72d0000a72e, 0xa72f0000a732, 0xa7330000a734, 0xa7350000a736, 0xa7370000a738, 0xa7390000a73a, 0xa73b0000a73c, 0xa73d0000a73e, 0xa73f0000a740, 0xa7410000a742, 0xa7430000a744, 0xa7450000a746, 0xa7470000a748, 0xa7490000a74a, 0xa74b0000a74c, 0xa74d0000a74e, 0xa74f0000a750, 0xa7510000a752, 0xa7530000a754, 0xa7550000a756, 0xa7570000a758, 0xa7590000a75a, 0xa75b0000a75c, 0xa75d0000a75e, 0xa75f0000a760, 0xa7610000a762, 0xa7630000a764, 0xa7650000a766, 0xa7670000a768, 0xa7690000a76a, 0xa76b0000a76c, 0xa76d0000a76e, 0xa76f0000a770, 0xa7710000a779, 0xa77a0000a77b, 0xa77c0000a77d, 0xa77f0000a780, 0xa7810000a782, 0xa7830000a784, 0xa7850000a786, 0xa7870000a789, 0xa78c0000a78d, 0xa78e0000a78f, 0xa7910000a792, 0xa7930000a794, 0xa7a10000a7a2, 0xa7a30000a7a4, 0xa7a50000a7a6, 0xa7a70000a7a8, 0xa7a90000a7aa, 0xa7fa0000a828, 0xa8400000a874, 0xa8800000a8c5, 0xa8d00000a8da, 0xa8e00000a8f8, 0xa8fb0000a8fc, 0xa9000000a92e, 0xa9300000a954, 0xa9800000a9c1, 0xa9cf0000a9da, 0xaa000000aa37, 0xaa400000aa4e, 0xaa500000aa5a, 0xaa600000aa77, 0xaa7a0000aa7c, 0xaa800000aac3, 0xaadb0000aade, 0xaae00000aaf0, 0xaaf20000aaf7, 0xab010000ab07, 0xab090000ab0f, 0xab110000ab17, 0xab200000ab27, 0xab280000ab2f, 0xabc00000abeb, 0xabec0000abee, 0xabf00000abfa, 0xac000000d7a4, 0xfa0e0000fa10, 0xfa110000fa12, 0xfa130000fa15, 0xfa1f0000fa20, 0xfa210000fa22, 0xfa230000fa25, 0xfa270000fa2a, 0xfb1e0000fb1f, 0xfe200000fe27, 0xfe730000fe74, 0x100000001000c, 0x1000d00010027, 0x100280001003b, 0x1003c0001003e, 0x1003f0001004e, 0x100500001005e, 0x10080000100fb, 0x101fd000101fe, 0x102800001029d, 0x102a0000102d1, 0x103000001031f, 0x1033000010341, 0x103420001034a, 0x103800001039e, 0x103a0000103c4, 0x103c8000103d0, 0x104280001049e, 0x104a0000104aa, 0x1080000010806, 0x1080800010809, 0x1080a00010836, 0x1083700010839, 0x1083c0001083d, 0x1083f00010856, 0x1090000010916, 0x109200001093a, 0x10980000109b8, 0x109be000109c0, 0x10a0000010a04, 0x10a0500010a07, 0x10a0c00010a14, 0x10a1500010a18, 0x10a1900010a34, 0x10a3800010a3b, 0x10a3f00010a40, 0x10a6000010a7d, 0x10b0000010b36, 0x10b4000010b56, 0x10b6000010b73, 0x10c0000010c49, 0x1100000011047, 0x1106600011070, 0x11080000110bb, 0x110d0000110e9, 0x110f0000110fa, 0x1110000011135, 0x1113600011140, 0x11180000111c5, 0x111d0000111da, 0x11680000116b8, 0x116c0000116ca, 0x120000001236f, 0x130000001342f, 0x1680000016a39, 0x16f0000016f45, 0x16f5000016f7f, 0x16f8f00016fa0, 0x1b0000001b002, 0x200000002a6d7, 0x2a7000002b735, 0x2b7400002b81e, ), 'CONTEXTJ': ( 0x200c0000200e, ), 'CONTEXTO': ( 0xb7000000b8, 0x37500000376, 0x5f3000005f5, 0x6600000066a, 0x6f0000006fa, 0x30fb000030fc, ), }
32,999
19.807062
57
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pkg_resources/__init__.py
# coding: utf-8 """ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import operator import platform import collections import plistlib import email.parser import errno import tempfile import textwrap import itertools import inspect from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp from pip._vendor import six from pip._vendor.six.moves import urllib, map, filter # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split try: import importlib.machinery as importlib_machinery # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: importlib_machinery = None from . import py31compat from pip._vendor import appdirs from pip._vendor import packaging __import__('pip._vendor.packaging.version') __import__('pip._vendor.packaging.specifiers') __import__('pip._vendor.packaging.requirements') __import__('pip._vendor.packaging.markers') if (3, 0) < sys.version_info < (3, 3): raise RuntimeError("Python 3.3 or later is required") if six.PY2: # Those builtin exceptions are only defined in Python 3 PermissionError = None NotADirectoryError = None # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None add_activation_listener = None resources_stream = None cleanup_resources = None resource_dir = None resource_stream = None set_extraction_path = None resource_isdir = None resource_string = None iter_entry_points = None resource_listdir = None resource_filename = None resource_exists = None _distribution_finders = None _namespace_handlers = None _namespace_packages = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ def parse_version(v): try: return packaging.version.Version(v) except packaging.version.InvalidVersion: return packaging.version.LegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_' + v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_' + _state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( int(version[0]), int(version[1]), _macosx_arch(machine), ) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided == required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, six.string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry, replace=replace) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False, extras=None): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. `extras` is a list of the extras to be used with these requirements. This is important because extra requirements may look like `my_req; extra = "my_extra"`, which would otherwise be interpreted as a purely optional requirement. Instead, we want to be able to assert that these requirements are truly required. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] req_extras = _ReqExtras() # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue if not req_extras.markers_pass(req, extras): continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match( req, ws, installer, replace_conflicting=replace_conflicting ) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) req_extras[new_requirement] = req.extras processed[req] = True # return list of distros to activate return to_activate def find_plugins( self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback, existing=True): """Invoke `callback` for all distributions If `existing=True` (default), call on all existing ones, as well. """ if callback in self.callbacks: return self.callbacks.append(callback) if not existing: return for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class _ReqExtras(dict): """ Map each requirement to the extras that demanded it. """ def markers_pass(self, req, extras=None): """ Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. """ extra_evals = ( req.marker.evaluate({'extra': extra}) for extra in self.get(req, ()) + (extras or (None,)) ) return not req.marker or any(extra_evals) class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__( self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ py_compat = ( self.python is None or dist.py_version is None or dist.py_version == self.python ) return py_compat and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match( self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ try: dist = working_set.find(req) except VersionConflict: if not replace_conflicting: raise dist = None if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) except Exception: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ( "%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path ) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """ Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". """ return ( os.environ.get('PYTHON_EGG_CACHE') or appdirs.user_cache_dir(appname='Python-Eggs') ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-', '_') def invalid_marker(text): """ Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. """ try: evaluate_marker(text) except SyntaxError as e: e.filename = None e.lineno = None return e return False def evaluate_marker(text, extra=None): """ Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. """ try: marker = packaging.markers.Marker(text) return marker.evaluate() except packaging.markers.InvalidMarker as e: raise SyntaxError(e) class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) def get_metadata(self, name): if not self.egg_info: return "" value = self._get(self._fn(self.egg_info, name)) return value.decode('utf-8') if six.PY3 else value def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( "Script {script!r} not found in metadata at {self.egg_info!r}" .format(**locals()), ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path != old: if _is_egg_path(path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() @classmethod def _register(cls): loader_cls = getattr( importlib_machinery, 'SourceFileLoader', type(None), ) register_loader_type(loader_cls, cls) DefaultProvider._register() class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" module_path = None _isdir = _has = lambda self, path: False def _get(self, path): return '' def _listdir(self, path): return [] def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with zipfile.ZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive fspath = fspath.rstrip(os.sep) if fspath == self.loader.archive: return '' if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): return fspath[len(self.egg_root) + 1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp( ".$extract", dir=os.path.dirname(real_path), ) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): if name != 'PKG-INFO': raise KeyError("No metadata except PKG-INFO is available") with io.open(self.path, encoding='utf-8', errors="replace") as f: metadata = f.read() self._warn_on_replacement(metadata) return metadata def _warn_on_replacement(self, metadata): # Python 2.7 compat for: replacement_char = '�' replacement_char = b'\xef\xbf\xbd'.decode('utf-8') if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" msg = tmpl.format(**locals()) warnings.warn(msg) def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders={}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) for dist in dists: yield dist elif subitem.lower().endswith('.dist-info'): subpath = os.path.join(path_item, subitem) submeta = EggMetadata(zipimport.zipimporter(subpath)) submeta.egg_info = subpath yield Distribution.from_location(path_item, subitem, submeta) register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def _by_version_descending(names): """ Given a list of filenames, return them in descending order by version number. >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' >>> _by_version_descending(names) ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] """ def _by_version(name): """ Parse each component of the filename """ name, ext = os.path.splitext(name) parts = itertools.chain(name.split('-'), [ext]) return [packaging.version.parse(part) for part in parts] return sorted(names, key=_by_version, reverse=True) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item, 'EGG-INFO') ) ) return entries = safe_listdir(path_item) # for performance, before sorting by version, # screen entries for only those that will yield # distributions filtered = ( entry for entry in entries if dist_factory(path_item, entry, only) ) # scan for .egg and .egg-info in directory path_item_entries = _by_version_descending(filtered) for entry in path_item_entries: fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): yield dist def dist_factory(path_item, entry, only): """ Return a dist_factory for a path_item and entry """ lower = entry.lower() is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) return ( distributions_from_metadata if is_meta else find_distributions if not only and _is_egg_path(entry) else resolve_egg_link if not only and lower.endswith('.egg-link') else NoDists() ) class NoDists: """ >>> bool(NoDists()) False >>> list(NoDists()('anything')) [] """ def __bool__(self): return False if six.PY2: __nonzero__ = __bool__ def __call__(self, fullpath): return iter(()) def safe_listdir(path): """ Attempt to list contents of path, but suppress some exceptions. """ try: return os.listdir(path) except (PermissionError, NotADirectoryError): pass except OSError as e: # Ignore the directory if does not exist, not a directory or # permission denied ignorable = ( e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) # Python 2 on Windows needs to be handled this way :( or getattr(e, "winerror", None) == 267 ) if not ignorable: raise return () def distributions_from_metadata(path): root = os.path.dirname(path) if os.path.isdir(path): if len(os.listdir(path)) == 0: # empty metadata dir; skip return metadata = PathMetadata(root, path) else: metadata = FileMetadata(path) entry = os.path.basename(path) yield Distribution.from_location( root, entry, metadata, precedence=DEVELOP_DIST, ) def non_empty_lines(path): """ Yield non-empty lines from file at path """ with open(path) as f: for line in f: line = line.strip() if line: yield line def resolve_egg_link(path): """ Given a path to an .egg-link, resolve distributions present in the referenced path. """ referenced_paths = non_empty_lines(path) resolved_paths = ( os.path.join(os.path.dirname(path), ref) for ref in referenced_paths ) dist_groups = map(find_distributions, resolved_paths) return next(dist_groups, ()) register_finder(pkgutil.ImpImporter, find_on_path) if hasattr(importlib_machinery, 'FileFinder'): register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath def _rebuild_mod_path(orig_path, package_name, module): """ Rebuild module.__path__ ensuring that all entries are ordered corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] def safe_sys_path_index(entry): """ Workaround for #520 and #513. """ try: return sys_path.index(entry) except ValueError: return float('inf') def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path """ path_parts = path.split(os.sep) module_parts = package_name.count('.') + 1 parts = path_parts[:-module_parts] return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) if not isinstance(orig_path, list): # Is this behavior useful when module.__path__ is not a list? return orig_path.sort(key=position_in_sys_path) module.__path__[:] = [_normalize_cached(p) for p in orig_path] def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if hasattr(importlib_machinery, 'FileFinder'): register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _is_egg_path(path): """ Determine if given path appears to be an egg. """ return path.lower().endswith('.egg') def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( _is_egg_path(path) and os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) ) def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, six.string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = tuple(extras) self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) # Get the requirements for this entry point with all its extras and # then resolve them. We have to pass `extras` along when resolving so # that the working set knows what extras we want. Otherwise, for # dist-info distributions, the working set will assume that the # requirements for that extra are purely optional and skip over them. reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer, extras=self.extras) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name] = ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urllib.parse.urlparse(location) if parsed[-1].startswith('md5='): return urllib.parse.urlunparse(parsed[:-1] + ('',)) return location def _version_from_file(lines): """ Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ def is_version_line(line): return line.lower().startswith('version:') version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') return safe_version(value.strip()) or None class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__( self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None, **kw): project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name', 'ver', 'pyver', 'plat' ) return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw )._reload_version() def _reload_version(self): return self @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: version = _version_from_file(self._get_metadata(self.PKG_INFO)) if version is None: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) return version @property def _dep_map(self): """ A map of extra to its list of (direct) requirements for this distribution, including the null extra. """ try: return self.__dep_map except AttributeError: self.__dep_map = self._filter_extras(self._build_dep_map()) return self.__dep_map @staticmethod def _filter_extras(dm): """ Given a mapping of extras to dependencies, strip off environment markers and filter out any dependencies not matching the markers. """ for extra in list(filter(None, dm)): new_extra = extra reqs = dm.pop(extra) new_extra, _, marker = extra.partition(':') fails_marker = marker and ( invalid_marker(marker) or not evaluate_marker(marker) ) if fails_marker: reqs = [] new_extra = safe_extra(new_extra) or None dm.setdefault(new_extra, []).extend(reqs) return dm def _build_dep_map(self): dm = {} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path, replace=replace) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc=None, replace=False): """Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. """ loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class EggInfoDistribution(Distribution): def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. """ md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) if md_version: self._version = md_version return self class DistInfoDistribution(Distribution): """ Wrap an actual or potential sys.path entry w/metadata, .dist-info style. """ PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _compute_dependencies(self): """Recompute this distribution's dependencies.""" dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: reqs.extend(parse_requirements(req)) def reqs_for_extra(extra): for req in reqs: if not req.marker or req.marker.evaluate({'extra': extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args, **kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) for line in lines: # Drop comments -- a hash without a space may be in a URL. if ' #' in line: line = line[:line.find(' #')] # If there is a line continuation, drop it, and append the next line. if line.endswith('\\'): line = line[:-2].strip() try: line += next(lines) except StopIteration: return yield Requirement(line) class Requirement(packaging.requirements.Requirement): def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" try: super(Requirement, self).__init__(requirement_string) except packaging.requirements.InvalidRequirement as e: raise RequirementParseError(str(e)) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() self.specs = [ (spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), str(self.marker) if self.marker else None, ) self.__hash = hash(self.hashCmp) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): req, = parse_requirements(s) return req def _always_object(classes): """ Ensure object appears in the mro even for old-style classes. """ if object not in classes: return classes + (object,) return classes def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) py31compat.makedirs(dirname, exist_ok=True) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager g.update( (name, getattr(manager, name)) for name in dir(manager) if not name.startswith('_') ) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path with replace=False and # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). tuple( dist.activate(replace=False) for dist in working_set ) add_activation_listener( lambda dist: dist.activate(replace=True), existing=False, ) working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
103,477
32.102367
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pkg_resources/py31compat.py
import os import errno import sys def _makedirs_31(path, exist_ok=False): try: os.makedirs(path) except OSError as exc: if not exist_ok or exc.errno != errno.EEXIST: raise # rely on compatibility behavior until mode considerations # and exists_ok considerations are disentangled. # See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 needs_makedirs = ( sys.version_info < (3, 2, 5) or (3, 3) <= sys.version_info < (3, 3, 6) or (3, 4) <= sys.version_info < (3, 4, 1) ) makedirs = _makedirs_31 if needs_makedirs else os.makedirs
600
25.130435
73
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/webencodings/labels.py
""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { 'unicode-1-1-utf-8': 'utf-8', 'utf-8': 'utf-8', 'utf8': 'utf-8', '866': 'ibm866', 'cp866': 'ibm866', 'csibm866': 'ibm866', 'ibm866': 'ibm866', 'csisolatin2': 'iso-8859-2', 'iso-8859-2': 'iso-8859-2', 'iso-ir-101': 'iso-8859-2', 'iso8859-2': 'iso-8859-2', 'iso88592': 'iso-8859-2', 'iso_8859-2': 'iso-8859-2', 'iso_8859-2:1987': 'iso-8859-2', 'l2': 'iso-8859-2', 'latin2': 'iso-8859-2', 'csisolatin3': 'iso-8859-3', 'iso-8859-3': 'iso-8859-3', 'iso-ir-109': 'iso-8859-3', 'iso8859-3': 'iso-8859-3', 'iso88593': 'iso-8859-3', 'iso_8859-3': 'iso-8859-3', 'iso_8859-3:1988': 'iso-8859-3', 'l3': 'iso-8859-3', 'latin3': 'iso-8859-3', 'csisolatin4': 'iso-8859-4', 'iso-8859-4': 'iso-8859-4', 'iso-ir-110': 'iso-8859-4', 'iso8859-4': 'iso-8859-4', 'iso88594': 'iso-8859-4', 'iso_8859-4': 'iso-8859-4', 'iso_8859-4:1988': 'iso-8859-4', 'l4': 'iso-8859-4', 'latin4': 'iso-8859-4', 'csisolatincyrillic': 'iso-8859-5', 'cyrillic': 'iso-8859-5', 'iso-8859-5': 'iso-8859-5', 'iso-ir-144': 'iso-8859-5', 'iso8859-5': 'iso-8859-5', 'iso88595': 'iso-8859-5', 'iso_8859-5': 'iso-8859-5', 'iso_8859-5:1988': 'iso-8859-5', 'arabic': 'iso-8859-6', 'asmo-708': 'iso-8859-6', 'csiso88596e': 'iso-8859-6', 'csiso88596i': 'iso-8859-6', 'csisolatinarabic': 'iso-8859-6', 'ecma-114': 'iso-8859-6', 'iso-8859-6': 'iso-8859-6', 'iso-8859-6-e': 'iso-8859-6', 'iso-8859-6-i': 'iso-8859-6', 'iso-ir-127': 'iso-8859-6', 'iso8859-6': 'iso-8859-6', 'iso88596': 'iso-8859-6', 'iso_8859-6': 'iso-8859-6', 'iso_8859-6:1987': 'iso-8859-6', 'csisolatingreek': 'iso-8859-7', 'ecma-118': 'iso-8859-7', 'elot_928': 'iso-8859-7', 'greek': 'iso-8859-7', 'greek8': 'iso-8859-7', 'iso-8859-7': 'iso-8859-7', 'iso-ir-126': 'iso-8859-7', 'iso8859-7': 'iso-8859-7', 'iso88597': 'iso-8859-7', 'iso_8859-7': 'iso-8859-7', 'iso_8859-7:1987': 'iso-8859-7', 'sun_eu_greek': 'iso-8859-7', 'csiso88598e': 'iso-8859-8', 'csisolatinhebrew': 'iso-8859-8', 'hebrew': 'iso-8859-8', 'iso-8859-8': 'iso-8859-8', 'iso-8859-8-e': 'iso-8859-8', 'iso-ir-138': 'iso-8859-8', 'iso8859-8': 'iso-8859-8', 'iso88598': 'iso-8859-8', 'iso_8859-8': 'iso-8859-8', 'iso_8859-8:1988': 'iso-8859-8', 'visual': 'iso-8859-8', 'csiso88598i': 'iso-8859-8-i', 'iso-8859-8-i': 'iso-8859-8-i', 'logical': 'iso-8859-8-i', 'csisolatin6': 'iso-8859-10', 'iso-8859-10': 'iso-8859-10', 'iso-ir-157': 'iso-8859-10', 'iso8859-10': 'iso-8859-10', 'iso885910': 'iso-8859-10', 'l6': 'iso-8859-10', 'latin6': 'iso-8859-10', 'iso-8859-13': 'iso-8859-13', 'iso8859-13': 'iso-8859-13', 'iso885913': 'iso-8859-13', 'iso-8859-14': 'iso-8859-14', 'iso8859-14': 'iso-8859-14', 'iso885914': 'iso-8859-14', 'csisolatin9': 'iso-8859-15', 'iso-8859-15': 'iso-8859-15', 'iso8859-15': 'iso-8859-15', 'iso885915': 'iso-8859-15', 'iso_8859-15': 'iso-8859-15', 'l9': 'iso-8859-15', 'iso-8859-16': 'iso-8859-16', 'cskoi8r': 'koi8-r', 'koi': 'koi8-r', 'koi8': 'koi8-r', 'koi8-r': 'koi8-r', 'koi8_r': 'koi8-r', 'koi8-u': 'koi8-u', 'csmacintosh': 'macintosh', 'mac': 'macintosh', 'macintosh': 'macintosh', 'x-mac-roman': 'macintosh', 'dos-874': 'windows-874', 'iso-8859-11': 'windows-874', 'iso8859-11': 'windows-874', 'iso885911': 'windows-874', 'tis-620': 'windows-874', 'windows-874': 'windows-874', 'cp1250': 'windows-1250', 'windows-1250': 'windows-1250', 'x-cp1250': 'windows-1250', 'cp1251': 'windows-1251', 'windows-1251': 'windows-1251', 'x-cp1251': 'windows-1251', 'ansi_x3.4-1968': 'windows-1252', 'ascii': 'windows-1252', 'cp1252': 'windows-1252', 'cp819': 'windows-1252', 'csisolatin1': 'windows-1252', 'ibm819': 'windows-1252', 'iso-8859-1': 'windows-1252', 'iso-ir-100': 'windows-1252', 'iso8859-1': 'windows-1252', 'iso88591': 'windows-1252', 'iso_8859-1': 'windows-1252', 'iso_8859-1:1987': 'windows-1252', 'l1': 'windows-1252', 'latin1': 'windows-1252', 'us-ascii': 'windows-1252', 'windows-1252': 'windows-1252', 'x-cp1252': 'windows-1252', 'cp1253': 'windows-1253', 'windows-1253': 'windows-1253', 'x-cp1253': 'windows-1253', 'cp1254': 'windows-1254', 'csisolatin5': 'windows-1254', 'iso-8859-9': 'windows-1254', 'iso-ir-148': 'windows-1254', 'iso8859-9': 'windows-1254', 'iso88599': 'windows-1254', 'iso_8859-9': 'windows-1254', 'iso_8859-9:1989': 'windows-1254', 'l5': 'windows-1254', 'latin5': 'windows-1254', 'windows-1254': 'windows-1254', 'x-cp1254': 'windows-1254', 'cp1255': 'windows-1255', 'windows-1255': 'windows-1255', 'x-cp1255': 'windows-1255', 'cp1256': 'windows-1256', 'windows-1256': 'windows-1256', 'x-cp1256': 'windows-1256', 'cp1257': 'windows-1257', 'windows-1257': 'windows-1257', 'x-cp1257': 'windows-1257', 'cp1258': 'windows-1258', 'windows-1258': 'windows-1258', 'x-cp1258': 'windows-1258', 'x-mac-cyrillic': 'x-mac-cyrillic', 'x-mac-ukrainian': 'x-mac-cyrillic', 'chinese': 'gbk', 'csgb2312': 'gbk', 'csiso58gb231280': 'gbk', 'gb2312': 'gbk', 'gb_2312': 'gbk', 'gb_2312-80': 'gbk', 'gbk': 'gbk', 'iso-ir-58': 'gbk', 'x-gbk': 'gbk', 'gb18030': 'gb18030', 'hz-gb-2312': 'hz-gb-2312', 'big5': 'big5', 'big5-hkscs': 'big5', 'cn-big5': 'big5', 'csbig5': 'big5', 'x-x-big5': 'big5', 'cseucpkdfmtjapanese': 'euc-jp', 'euc-jp': 'euc-jp', 'x-euc-jp': 'euc-jp', 'csiso2022jp': 'iso-2022-jp', 'iso-2022-jp': 'iso-2022-jp', 'csshiftjis': 'shift_jis', 'ms_kanji': 'shift_jis', 'shift-jis': 'shift_jis', 'shift_jis': 'shift_jis', 'sjis': 'shift_jis', 'windows-31j': 'shift_jis', 'x-sjis': 'shift_jis', 'cseuckr': 'euc-kr', 'csksc56011987': 'euc-kr', 'euc-kr': 'euc-kr', 'iso-ir-149': 'euc-kr', 'korean': 'euc-kr', 'ks_c_5601-1987': 'euc-kr', 'ks_c_5601-1989': 'euc-kr', 'ksc5601': 'euc-kr', 'ksc_5601': 'euc-kr', 'windows-949': 'euc-kr', 'csiso2022kr': 'iso-2022-kr', 'iso-2022-kr': 'iso-2022-kr', 'utf-16be': 'utf-16be', 'utf-16': 'utf-16le', 'utf-16le': 'utf-16le', 'x-user-defined': 'x-user-defined', }
8,979
37.706897
53
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/webencodings/tests.py
# coding: utf-8 """ webencodings.tests ~~~~~~~~~~~~~~~~~~ A basic test suite for Encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, IncrementalDecoder, IncrementalEncoder, UTF8) def assert_raises(exception, function, *args, **kwargs): try: function(*args, **kwargs) except exception: return else: # pragma: no cover raise AssertionError('Did not raise %s.' % exception) def test_labels(): assert lookup('utf-8').name == 'utf-8' assert lookup('Utf-8').name == 'utf-8' assert lookup('UTF-8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8 ').name == 'utf-8' assert lookup(' \r\nutf8\t').name == 'utf-8' assert lookup('u8') is None # Python label. assert lookup('utf-8 ') is None # Non-ASCII white space. assert lookup('US-ASCII').name == 'windows-1252' assert lookup('iso-8859-1').name == 'windows-1252' assert lookup('latin1').name == 'windows-1252' assert lookup('LATIN1').name == 'windows-1252' assert lookup('latin-1') is None assert lookup('LATİN1') is None # ASCII-only case insensitivity. def test_all_labels(): for label in LABELS: assert decode(b'', label) == ('', lookup(label)) assert encode('', label) == b'' for repeat in [0, 1, 12]: output, _ = iter_decode([b''] * repeat, label) assert list(output) == [] assert list(iter_encode([''] * repeat, label)) == [] decoder = IncrementalDecoder(label) assert decoder.decode(b'') == '' assert decoder.decode(b'', final=True) == '' encoder = IncrementalEncoder(label) assert encoder.encode('') == b'' assert encoder.encode('', final=True) == b'' # All encoding names are valid labels too: for name in set(LABELS.values()): assert lookup(name).name == name def test_invalid_label(): assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') assert_raises(LookupError, encode, 'é', 'invalid') assert_raises(LookupError, iter_decode, [], 'invalid') assert_raises(LookupError, iter_encode, [], 'invalid') assert_raises(LookupError, IncrementalDecoder, 'invalid') assert_raises(LookupError, IncrementalEncoder, 'invalid') def test_decode(): assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) def test_encode(): assert encode('é', 'latin1') == b'\xe9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf-16') == b'\xe9\x00' assert encode('é', 'utf-16le') == b'\xe9\x00' assert encode('é', 'utf-16be') == b'\x00\xe9' def test_iter_decode(): def iter_decode_to_string(input, fallback_encoding): output, _encoding = iter_decode(input, fallback_encoding) return ''.join(output) assert iter_decode_to_string([], 'latin1') == '' assert iter_decode_to_string([b''], 'latin1') == '' assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' assert iter_decode_to_string([ b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' def test_iter_encode(): assert b''.join(iter_encode([], 'latin1')) == b'' assert b''.join(iter_encode([''], 'latin1')) == b'' assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' assert b''.join(iter_encode([ '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' def test_x_user_defined(): encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' encoded = b'aa' decoded = 'aa' assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) assert encode(decoded, 'x-user-defined') == encoded
6,524
41.37013
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/webencodings/mklabels.py
""" webencodings.mklabels ~~~~~~~~~~~~~~~~~~~~~ Regenarate the webencodings.labels module. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ import json try: from urllib import urlopen except ImportError: from urllib.request import urlopen def assert_lower(string): assert string == string.lower() return string def generate(url): parts = ['''\ """ webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { '''] labels = [ (repr(assert_lower(label)).lstrip('u'), repr(encoding['name']).lstrip('u')) for category in json.loads(urlopen(url).read().decode('ascii')) for encoding in category['encodings'] for label in encoding['labels']] max_len = max(len(label) for label, name in labels) parts.extend( ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) for label, name in labels) parts.append('}') return ''.join(parts) if __name__ == '__main__': print(generate('http://encoding.spec.whatwg.org/encodings.json'))
1,305
20.766667
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/webencodings/x_user_defined.py
# coding: utf-8 """ webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self, input, errors='strict'): return codecs.charmap_encode(input, errors, encoding_table) def decode(self, input, errors='strict'): return codecs.charmap_decode(input, errors, decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input, self.errors, encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input, self.errors, decoding_table)[0] class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass ### encodings module API codec_info = codecs.CodecInfo( name='x-user-defined', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table # Python 3: # for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) decoding_table = ( '\x00' '\x01' '\x02' '\x03' '\x04' '\x05' '\x06' '\x07' '\x08' '\t' '\n' '\x0b' '\x0c' '\r' '\x0e' '\x0f' '\x10' '\x11' '\x12' '\x13' '\x14' '\x15' '\x16' '\x17' '\x18' '\x19' '\x1a' '\x1b' '\x1c' '\x1d' '\x1e' '\x1f' ' ' '!' '"' '#' '$' '%' '&' "'" '(' ')' '*' '+' ',' '-' '.' '/' '0' '1' '2' '3' '4' '5' '6' '7' '8' '9' ':' ';' '<' '=' '>' '?' '@' 'A' 'B' 'C' 'D' 'E' 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'O' 'P' 'Q' 'R' 'S' 'T' 'U' 'V' 'W' 'X' 'Y' 'Z' '[' '\\' ']' '^' '_' '`' 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 'u' 'v' 'w' 'x' 'y' 'z' '{' '|' '}' '~' '\x7f' '\uf780' '\uf781' '\uf782' '\uf783' '\uf784' '\uf785' '\uf786' '\uf787' '\uf788' '\uf789' '\uf78a' '\uf78b' '\uf78c' '\uf78d' '\uf78e' '\uf78f' '\uf790' '\uf791' '\uf792' '\uf793' '\uf794' '\uf795' '\uf796' '\uf797' '\uf798' '\uf799' '\uf79a' '\uf79b' '\uf79c' '\uf79d' '\uf79e' '\uf79f' '\uf7a0' '\uf7a1' '\uf7a2' '\uf7a3' '\uf7a4' '\uf7a5' '\uf7a6' '\uf7a7' '\uf7a8' '\uf7a9' '\uf7aa' '\uf7ab' '\uf7ac' '\uf7ad' '\uf7ae' '\uf7af' '\uf7b0' '\uf7b1' '\uf7b2' '\uf7b3' '\uf7b4' '\uf7b5' '\uf7b6' '\uf7b7' '\uf7b8' '\uf7b9' '\uf7ba' '\uf7bb' '\uf7bc' '\uf7bd' '\uf7be' '\uf7bf' '\uf7c0' '\uf7c1' '\uf7c2' '\uf7c3' '\uf7c4' '\uf7c5' '\uf7c6' '\uf7c7' '\uf7c8' '\uf7c9' '\uf7ca' '\uf7cb' '\uf7cc' '\uf7cd' '\uf7ce' '\uf7cf' '\uf7d0' '\uf7d1' '\uf7d2' '\uf7d3' '\uf7d4' '\uf7d5' '\uf7d6' '\uf7d7' '\uf7d8' '\uf7d9' '\uf7da' '\uf7db' '\uf7dc' '\uf7dd' '\uf7de' '\uf7df' '\uf7e0' '\uf7e1' '\uf7e2' '\uf7e3' '\uf7e4' '\uf7e5' '\uf7e6' '\uf7e7' '\uf7e8' '\uf7e9' '\uf7ea' '\uf7eb' '\uf7ec' '\uf7ed' '\uf7ee' '\uf7ef' '\uf7f0' '\uf7f1' '\uf7f2' '\uf7f3' '\uf7f4' '\uf7f5' '\uf7f6' '\uf7f7' '\uf7f8' '\uf7f9' '\uf7fa' '\uf7fb' '\uf7fc' '\uf7fd' '\uf7fe' '\uf7ff' ) ### Encoding table encoding_table = codecs.charmap_build(decoding_table)
4,307
12.214724
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/webencodings/__init__.py
# coding: utf-8 """ webencodings ~~~~~~~~~~~~ This is a Python implementation of the `WHATWG Encoding standard <http://encoding.spec.whatwg.org/>`. See README for details. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs from .labels import LABELS VERSION = '0.5.1' # Some names in Encoding are not valid Python aliases. Remap these. PYTHON_NAMES = { 'iso-8859-8-i': 'iso-8859-8', 'x-mac-cyrillic': 'mac-cyrillic', 'macintosh': 'mac-roman', 'windows-874': 'cp874'} CACHE = {} def ascii_lower(string): r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. :param string: An Unicode string. :returns: A new Unicode string. This is used for `ASCII case-insensitive <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_ matching of encoding labels. The same matching is also used, among other things, for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_. This is different from the :meth:`~py:str.lower` method of Unicode strings which also affect non-ASCII characters, sometimes mapping them into the ASCII range: >>> keyword = u'Bac\N{KELVIN SIGN}ground' >>> assert keyword.lower() == u'background' >>> assert ascii_lower(keyword) != keyword.lower() >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' """ # This turns out to be faster than unicode.translate() return string.encode('utf8').lower().decode('utf8') def lookup(label): """ Look for an encoding by its label. This is the spec’s `get an encoding <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. """ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. label = ascii_lower(label.strip('\t\n\f\r ')) name = LABELS.get(label) if name is None: return None encoding = CACHE.get(name) if encoding is None: if name == 'x-user-defined': from .x_user_defined import codec_info else: python_name = PYTHON_NAMES.get(name, name) # Any python_name value that gets to here should be valid. codec_info = codecs.lookup(python_name) encoding = Encoding(name, codec_info) CACHE[name] = encoding return encoding def _get_encoding(encoding_or_label): """ Accept either an encoding object or label. :param encoding: An :class:`Encoding` object or a label string. :returns: An :class:`Encoding` object. :raises: :exc:`~exceptions.LookupError` for an unknown label. """ if hasattr(encoding_or_label, 'codec_info'): return encoding_or_label encoding = lookup(encoding_or_label) if encoding is None: raise LookupError('Unknown encoding label: %r' % encoding_or_label) return encoding class Encoding(object): """Reresents a character encoding such as UTF-8, that can be used for decoding or encoding. .. attribute:: name Canonical name of the encoding .. attribute:: codec_info The actual implementation of the encoding, a stdlib :class:`~codecs.CodecInfo` object. See :func:`codecs.register`. """ def __init__(self, name, codec_info): self.name = name self.codec_info = codec_info def __repr__(self): return '<Encoding %s>' % self.name #: The UTF-8 encoding. Should be used for new content and formats. UTF8 = lookup('utf-8') _UTF16LE = lookup('utf-16le') _UTF16BE = lookup('utf-16be') def decode(input, fallback_encoding, errors='replace'): """ Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`. """ # Fail early if `encoding` is an invalid label. fallback_encoding = _get_encoding(fallback_encoding) bom_encoding, input = _detect_bom(input) encoding = bom_encoding or fallback_encoding return encoding.codec_info.decode(input, errors)[0], encoding def _detect_bom(input): """Return (bom_encoding, input), with any BOM removed from the input.""" if input.startswith(b'\xFF\xFE'): return _UTF16LE, input[2:] if input.startswith(b'\xFE\xFF'): return _UTF16BE, input[2:] if input.startswith(b'\xEF\xBB\xBF'): return UTF8, input[3:] return None, input def encode(input, encoding=UTF8, errors='strict'): """ Encode a single string. :param input: An Unicode string. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A byte string. """ return _get_encoding(encoding).codec_info.encode(input, errors)[0] def iter_decode(input, fallback_encoding, errors='replace'): """ "Pull"-based decoder. :param input: An iterable of byte strings. The input is first consumed just enough to determine the encoding based on the precense of a BOM, then consumed on demand when the return value is. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An ``(output, encoding)`` tuple. :obj:`output` is an iterable of Unicode strings, :obj:`encoding` is the :obj:`Encoding` that is being used. """ decoder = IncrementalDecoder(fallback_encoding, errors) generator = _iter_decode_generator(input, decoder) encoding = next(generator) return generator, encoding def _iter_decode_generator(input, decoder): """Return a generator that first yields the :obj:`Encoding`, then yields output chukns as Unicode strings. """ decode = decoder.decode input = iter(input) for chunck in input: output = decode(chunck) if output: assert decoder.encoding is not None yield decoder.encoding yield output break else: # Input exhausted without determining the encoding output = decode(b'', final=True) assert decoder.encoding is not None yield decoder.encoding if output: yield output return for chunck in input: output = decode(chunck) if output: yield output output = decode(b'', final=True) if output: yield output def iter_encode(input, encoding=UTF8, errors='strict'): """ “Pull”-based encoder. :param input: An iterable of Unicode strings. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An iterable of byte strings. """ # Fail early if `encoding` is an invalid label. encode = IncrementalEncoder(encoding, errors).encode return _iter_encode_generator(input, encode) def _iter_encode_generator(input, encode): for chunck in input: output = encode(chunck) if output: yield output output = encode('', final=True) if output: yield output class IncrementalDecoder(object): """ “Push”-based decoder. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. """ def __init__(self, fallback_encoding, errors='replace'): # Fail early if `encoding` is an invalid label. self._fallback_encoding = _get_encoding(fallback_encoding) self._errors = errors self._buffer = b'' self._decoder = None #: The actual :class:`Encoding` that is being used, #: or :obj:`None` if that is not determined yet. #: (Ie. if there is not enough input yet to determine #: if there is a BOM.) self.encoding = None # Not known yet. def decode(self, input, final=False): """Decode one chunk of the input. :param input: A byte string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: An Unicode string. """ decoder = self._decoder if decoder is not None: return decoder(input, final) input = self._buffer + input encoding, input = _detect_bom(input) if encoding is None: if len(input) < 3 and not final: # Not enough data yet. self._buffer = input return '' else: # No BOM encoding = self._fallback_encoding decoder = encoding.codec_info.incrementaldecoder(self._errors).decode self._decoder = decoder self.encoding = encoding return decoder(input, final) class IncrementalEncoder(object): """ “Push”-based encoder. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. .. method:: encode(input, final=False) :param input: An Unicode string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: A byte string. """ def __init__(self, encoding=UTF8, errors='strict'): encoding = _get_encoding(encoding) self.encode = encoding.codec_info.incrementalencoder(errors).encode
10,565
29.804665
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pytoml/writer.py
from __future__ import unicode_literals import io, datetime, math, sys if sys.version_info[0] == 3: long = int unicode = str def dumps(obj, sort_keys=False): fout = io.StringIO() dump(obj, fout, sort_keys=sort_keys) return fout.getvalue() _escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} def _escape_string(s): res = [] start = 0 def flush(): if start != i: res.append(s[start:i]) return i + 1 i = 0 while i < len(s): c = s[i] if c in '"\\\n\r\t\b\f': start = flush() res.append('\\' + _escapes[c]) elif ord(c) < 0x20: start = flush() res.append('\\u%04x' % ord(c)) i += 1 flush() return '"' + ''.join(res) + '"' def _escape_id(s): if any(not c.isalnum() and c not in '-_' for c in s): return _escape_string(s) return s def _format_list(v): return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) # Formula from: # https://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds # Once support for py26 is dropped, this can be replaced by td.total_seconds() def _total_seconds(td): return ((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.0**6) def _format_value(v): if isinstance(v, bool): return 'true' if v else 'false' if isinstance(v, int) or isinstance(v, long): return unicode(v) if isinstance(v, float): if math.isnan(v) or math.isinf(v): raise ValueError("{0} is not a valid TOML value".format(v)) else: return repr(v) elif isinstance(v, unicode) or isinstance(v, bytes): return _escape_string(v) elif isinstance(v, datetime.datetime): offs = v.utcoffset() offs = _total_seconds(offs) // 60 if offs is not None else 0 if offs == 0: suffix = 'Z' else: if offs > 0: suffix = '+' else: suffix = '-' offs = -offs suffix = '{0}{1:.02}{2:.02}'.format(suffix, offs // 60, offs % 60) if v.microsecond: return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix else: return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix elif isinstance(v, list): return _format_list(v) else: raise RuntimeError(v) def dump(obj, fout, sort_keys=False): tables = [((), obj, False)] while tables: name, table, is_array = tables.pop() if name: section_name = '.'.join(_escape_id(c) for c in name) if is_array: fout.write('[[{0}]]\n'.format(section_name)) else: fout.write('[{0}]\n'.format(section_name)) table_keys = sorted(table.keys()) if sort_keys else table.keys() new_tables = [] has_kv = False for k in table_keys: v = table[k] if isinstance(v, dict): new_tables.append((name + (k,), v, False)) elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): new_tables.extend((name + (k,), d, True) for d in v) elif v is None: # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 fout.write( '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) has_kv = True else: fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) has_kv = True tables.extend(reversed(new_tables)) if (name or has_kv) and tables: fout.write('\n')
3,815
28.8125
112
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pytoml/parser.py
import string, re, sys, datetime from .core import TomlError if sys.version_info[0] == 2: _chr = unichr else: _chr = chr def load(fin, translate=lambda t, x, v: v): return loads(fin.read(), translate=translate, filename=getattr(fin, 'name', repr(fin))) def loads(s, filename='<string>', translate=lambda t, x, v: v): if isinstance(s, bytes): s = s.decode('utf-8') s = s.replace('\r\n', '\n') root = {} tables = {} scope = root src = _Source(s, filename=filename) ast = _p_toml(src) def error(msg): raise TomlError(msg, pos[0], pos[1], filename) def process_value(v): kind, text, value, pos = v if kind == 'str' and value.startswith('\n'): value = value[1:] if kind == 'array': if value and any(k != value[0][0] for k, t, v, p in value[1:]): error('array-type-mismatch') value = [process_value(item) for item in value] elif kind == 'table': value = dict([(k, process_value(value[k])) for k in value]) return translate(kind, text, value) for kind, value, pos in ast: if kind == 'kv': k, v = value if k in scope: error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) scope[k] = process_value(v) else: is_table_array = (kind == 'table_array') cur = tables for name in value[:-1]: if isinstance(cur.get(name), list): d, cur = cur[name][-1] else: d, cur = cur.setdefault(name, (None, {})) scope = {} name = value[-1] if name not in cur: if is_table_array: cur[name] = [(scope, {})] else: cur[name] = (scope, {}) elif isinstance(cur[name], list): if not is_table_array: error('table_type_mismatch') cur[name].append((scope, {})) else: if is_table_array: error('table_type_mismatch') old_scope, next_table = cur[name] if old_scope is not None: error('duplicate_tables') cur[name] = (scope, next_table) def merge_tables(scope, tables): if scope is None: scope = {} for k in tables: if k in scope: error('key_table_conflict') v = tables[k] if isinstance(v, list): scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] else: scope[k] = merge_tables(v[0], v[1]) return scope return merge_tables(root, tables) class _Source: def __init__(self, s, filename=None): self.s = s self._pos = (1, 1) self._last = None self._filename = filename self.backtrack_stack = [] def last(self): return self._last def pos(self): return self._pos def fail(self): return self._expect(None) def consume_dot(self): if self.s: self._last = self.s[0] self.s = self[1:] self._advance(self._last) return self._last return None def expect_dot(self): return self._expect(self.consume_dot()) def consume_eof(self): if not self.s: self._last = '' return True return False def expect_eof(self): return self._expect(self.consume_eof()) def consume(self, s): if self.s.startswith(s): self.s = self.s[len(s):] self._last = s self._advance(s) return True return False def expect(self, s): return self._expect(self.consume(s)) def consume_re(self, re): m = re.match(self.s) if m: self.s = self.s[len(m.group(0)):] self._last = m self._advance(m.group(0)) return m return None def expect_re(self, re): return self._expect(self.consume_re(re)) def __enter__(self): self.backtrack_stack.append((self.s, self._pos)) def __exit__(self, type, value, traceback): if type is None: self.backtrack_stack.pop() else: self.s, self._pos = self.backtrack_stack.pop() return type == TomlError def commit(self): self.backtrack_stack[-1] = (self.s, self._pos) def _expect(self, r): if not r: raise TomlError('msg', self._pos[0], self._pos[1], self._filename) return r def _advance(self, s): suffix_pos = s.rfind('\n') if suffix_pos == -1: self._pos = (self._pos[0], self._pos[1] + len(s)) else: self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) _ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') def _p_ews(s): s.expect_re(_ews_re) _ws_re = re.compile(r'[ \t]*') def _p_ws(s): s.expect_re(_ws_re) _escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', '\'': '\'', '\\': '\\', '/': '/', 'f': '\f' } _basicstr_re = re.compile(r'[^"\\\000-\037]*') _short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') _long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') _escapes_re = re.compile('[bnrt"\'\\\\/f]') _newline_esc_re = re.compile('\n[ \t\n]*') def _p_basicstr_content(s, content=_basicstr_re): res = [] while True: res.append(s.expect_re(content).group(0)) if not s.consume('\\'): break if s.consume_re(_newline_esc_re): pass elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): res.append(_chr(int(s.last().group(1), 16))) else: s.expect_re(_escapes_re) res.append(_escapes[s.last().group(0)]) return ''.join(res) _key_re = re.compile(r'[0-9a-zA-Z-_]+') def _p_key(s): with s: s.expect('"') r = _p_basicstr_content(s, _basicstr_re) s.expect('"') return r if s.consume('\''): if s.consume('\'\''): r = s.expect_re(_litstr_ml_re).group(0) s.expect('\'\'\'') else: r = s.expect_re(_litstr_re).group(0) s.expect('\'') return r return s.expect_re(_key_re).group(0) _float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') _datetime_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') _basicstr_ml_re = re.compile(r'(?:(?:|"|"")[^"\\\000-\011\013-\037])*') _litstr_re = re.compile(r"[^'\000-\037]*") _litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\011\013-\037]))*") def _p_value(s): pos = s.pos() if s.consume('true'): return 'bool', s.last(), True, pos if s.consume('false'): return 'bool', s.last(), False, pos if s.consume('"'): if s.consume('""'): r = _p_basicstr_content(s, _basicstr_ml_re) s.expect('"""') else: r = _p_basicstr_content(s, _basicstr_re) s.expect('"') return 'str', r, r, pos if s.consume('\''): if s.consume('\'\''): r = s.expect_re(_litstr_ml_re).group(0) s.expect('\'\'\'') else: r = s.expect_re(_litstr_re).group(0) s.expect('\'') return 'str', r, r, pos if s.consume_re(_datetime_re): m = s.last() s0 = m.group(0) r = map(int, m.groups()[:6]) if m.group(7): micro = float(m.group(7)) else: micro = 0 if m.group(8): g = int(m.group(8), 10) * 60 + int(m.group(9), 10) tz = _TimeZone(datetime.timedelta(0, g * 60)) else: tz = _TimeZone(datetime.timedelta(0, 0)) y, m, d, H, M, S = r dt = datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) return 'datetime', s0, dt, pos if s.consume_re(_float_re): m = s.last().group(0) r = m.replace('_','') if '.' in m or 'e' in m or 'E' in m: return 'float', m, float(r), pos else: return 'int', m, int(r, 10), pos if s.consume('['): items = [] with s: while True: _p_ews(s) items.append(_p_value(s)) s.commit() _p_ews(s) s.expect(',') s.commit() _p_ews(s) s.expect(']') return 'array', None, items, pos if s.consume('{'): _p_ws(s) items = {} if not s.consume('}'): k = _p_key(s) _p_ws(s) s.expect('=') _p_ws(s) items[k] = _p_value(s) _p_ws(s) while s.consume(','): _p_ws(s) k = _p_key(s) _p_ws(s) s.expect('=') _p_ws(s) items[k] = _p_value(s) _p_ws(s) s.expect('}') return 'table', None, items, pos s.fail() def _p_stmt(s): pos = s.pos() if s.consume( '['): is_array = s.consume('[') _p_ws(s) keys = [_p_key(s)] _p_ws(s) while s.consume('.'): _p_ws(s) keys.append(_p_key(s)) _p_ws(s) s.expect(']') if is_array: s.expect(']') return 'table_array' if is_array else 'table', keys, pos key = _p_key(s) _p_ws(s) s.expect('=') _p_ws(s) value = _p_value(s) return 'kv', (key, value), pos _stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') def _p_toml(s): stmts = [] _p_ews(s) with s: stmts.append(_p_stmt(s)) while True: s.commit() s.expect_re(_stmtsep_re) stmts.append(_p_stmt(s)) _p_ews(s) s.expect_eof() return stmts class _TimeZone(datetime.tzinfo): def __init__(self, offset): self._offset = offset def utcoffset(self, dt): return self._offset def dst(self, dt): return None def tzname(self, dt): m = self._offset.total_seconds() // 60 if m < 0: res = '-' m = -m else: res = '+' h = m // 60 m = m - h * 60 return '{}{:.02}{:.02}'.format(res, h, m)
10,542
27.114667
110
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pytoml/core.py
class TomlError(RuntimeError): def __init__(self, message, line, col, filename): RuntimeError.__init__(self, message, line, col, filename) self.message = message self.line = line self.col = col self.filename = filename def __str__(self): return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) def __repr__(self): return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename)
509
35.428571
107
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/pytoml/__init__.py
from .core import TomlError from .parser import load, loads from .writer import dump, dumps
92
22.25
31
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/filewrapper.py
from io import BytesIO class CallbackFileWrapper(object): """ Small wrapper around a fp object which will tee everything read into a buffer, and when that file is closed it will execute a callback with the contents of that buffer. All attributes are proxied to the underlying file object. This class uses members with a double underscore (__) leading prefix so as not to accidentally shadow an attribute. """ def __init__(self, fp, callback): self.__buf = BytesIO() self.__fp = fp self.__callback = callback def __getattr__(self, name): # The vaguaries of garbage collection means that self.__fp is # not always set. By using __getattribute__ and the private # name[0] allows looking up the attribute value and raising an # AttributeError when it doesn't exist. This stop thigns from # infinitely recursing calls to getattr in the case where # self.__fp hasn't been set. # # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers fp = self.__getattribute__('_CallbackFileWrapper__fp') return getattr(fp, name) def __is_fp_closed(self): try: return self.__fp.fp is None except AttributeError: pass try: return self.__fp.closed except AttributeError: pass # We just don't cache it then. # TODO: Add some logging here... return False def _close(self): if self.__callback: self.__callback(self.__buf.getvalue()) # We assign this to None here, because otherwise we can get into # really tricky problems where the CPython interpreter dead locks # because the callback is holding a reference to something which # has a __del__ method. Setting this to None breaks the cycle # and allows the garbage collector to do it's thing normally. self.__callback = None def read(self, amt=None): data = self.__fp.read(amt) self.__buf.write(data) if self.__is_fp_closed(): self._close() return data def _safe_read(self, amt): data = self.__fp._safe_read(amt) if amt == 2 and data == b'\r\n': # urllib executes this read to toss the CRLF at the end # of the chunk. return data self.__buf.write(data) if self.__is_fp_closed(): self._close() return data
2,531
31.050633
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/adapter.py
import types import functools import zlib from pip._vendor.requests.adapters import HTTPAdapter from .controller import CacheController from .cache import DictCache from .filewrapper import CallbackFileWrapper class CacheControlAdapter(HTTPAdapter): invalidating_methods = set(['PUT', 'DELETE']) def __init__(self, cache=None, cache_etags=True, controller_class=None, serializer=None, heuristic=None, cacheable_methods=None, *args, **kw): super(CacheControlAdapter, self).__init__(*args, **kw) self.cache = cache or DictCache() self.heuristic = heuristic self.cacheable_methods = cacheable_methods or ('GET',) controller_factory = controller_class or CacheController self.controller = controller_factory( self.cache, cache_etags=cache_etags, serializer=serializer, ) def send(self, request, cacheable_methods=None, **kw): """ Send a request. Use the request information to see if it exists in the cache and cache the response if we need to and can. """ cacheable = cacheable_methods or self.cacheable_methods if request.method in cacheable: try: cached_response = self.controller.cached_request(request) except zlib.error: cached_response = None if cached_response: return self.build_response(request, cached_response, from_cache=True) # check for etags and add headers if appropriate request.headers.update( self.controller.conditional_headers(request) ) resp = super(CacheControlAdapter, self).send(request, **kw) return resp def build_response(self, request, response, from_cache=False, cacheable_methods=None): """ Build a response by making a request or using the cache. This will end up calling send and returning a potentially cached response """ cacheable = cacheable_methods or self.cacheable_methods if not from_cache and request.method in cacheable: # Check for any heuristics that might update headers # before trying to cache. if self.heuristic: response = self.heuristic.apply(response) # apply any expiration heuristics if response.status == 304: # We must have sent an ETag request. This could mean # that we've been expired already or that we simply # have an etag. In either case, we want to try and # update the cache if that is the case. cached_response = self.controller.update_cached_response( request, response ) if cached_response is not response: from_cache = True # We are done with the server response, read a # possible response body (compliant servers will # not return one, but we cannot be 100% sure) and # release the connection back to the pool. response.read(decode_content=False) response.release_conn() response = cached_response # We always cache the 301 responses elif response.status == 301: self.controller.cache_response(request, response) else: # Wrap the response file with a wrapper that will cache the # response when the stream has been consumed. response._fp = CallbackFileWrapper( response._fp, functools.partial( self.controller.cache_response, request, response, ) ) if response.chunked: super_update_chunk_length = response._update_chunk_length def _update_chunk_length(self): super_update_chunk_length() if self.chunk_left == 0: self._fp._close() response._update_chunk_length = types.MethodType(_update_chunk_length, response) resp = super(CacheControlAdapter, self).build_response( request, response ) # See if we should invalidate the cache. if request.method in self.invalidating_methods and resp.ok: cache_url = self.controller.cache_url(request.url) self.cache.delete(cache_url) # Give the request a from_cache attr to let people use it resp.from_cache = from_cache return resp def close(self): self.cache.close() super(CacheControlAdapter, self).close()
5,022
36.207407
100
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/wrapper.py
from .adapter import CacheControlAdapter from .cache import DictCache def CacheControl(sess, cache=None, cache_etags=True, serializer=None, heuristic=None, controller_class=None, adapter_class=None, cacheable_methods=None): cache = cache or DictCache() adapter_class = adapter_class or CacheControlAdapter adapter = adapter_class( cache, cache_etags=cache_etags, serializer=serializer, heuristic=heuristic, controller_class=controller_class, cacheable_methods=cacheable_methods ) sess.mount('http://', adapter) sess.mount('https://', adapter) return sess
754
25.964286
56
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/controller.py
""" The httplib2 algorithms ported for use with requests. """ import logging import re import calendar import time from email.utils import parsedate_tz from pip._vendor.requests.structures import CaseInsensitiveDict from .cache import DictCache from .serialize import Serializer logger = logging.getLogger(__name__) URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) class CacheController(object): """An interface to see if request should cached or not. """ def __init__(self, cache=None, cache_etags=True, serializer=None, status_codes=None): self.cache = cache or DictCache() self.cache_etags = cache_etags self.serializer = serializer or Serializer() self.cacheable_status_codes = status_codes or (200, 203, 300, 301) @classmethod def _urlnorm(cls, uri): """Normalize the URL to create a safe key for the cache""" (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise Exception("Only absolute URIs are allowed. uri = %s" % uri) scheme = scheme.lower() authority = authority.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path defrag_uri = scheme + "://" + authority + request_uri return defrag_uri @classmethod def cache_url(cls, uri): return cls._urlnorm(uri) def parse_cache_control(self, headers): known_directives = { # https://tools.ietf.org/html/rfc7234#section-5.2 'max-age': (int, True,), 'max-stale': (int, False,), 'min-fresh': (int, True,), 'no-cache': (None, False,), 'no-store': (None, False,), 'no-transform': (None, False,), 'only-if-cached' : (None, False,), 'must-revalidate': (None, False,), 'public': (None, False,), 'private': (None, False,), 'proxy-revalidate': (None, False,), 's-maxage': (int, True,) } cc_headers = headers.get('cache-control', headers.get('Cache-Control', '')) retval = {} for cc_directive in cc_headers.split(','): parts = cc_directive.split('=', 1) directive = parts[0].strip() try: typ, required = known_directives[directive] except KeyError: logger.debug('Ignoring unknown cache-control directive: %s', directive) continue if not typ or not required: retval[directive] = None if typ: try: retval[directive] = typ(parts[1].strip()) except IndexError: if required: logger.debug('Missing value for cache-control ' 'directive: %s', directive) except ValueError: logger.debug('Invalid value for cache-control directive ' '%s, must be %s', directive, typ.__name__) return retval def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) logger.debug('Looking up "%s" in the cache', cache_url) cc = self.parse_cache_control(request.headers) # Bail out if the request insists on fresh data if 'no-cache' in cc: logger.debug('Request header has "no-cache", cache bypassed') return False if 'max-age' in cc and cc['max-age'] == 0: logger.debug('Request header has "max_age" as 0, cache bypassed') return False # Request allows serving from the cache, let's see if we find something cache_data = self.cache.get(cache_url) if cache_data is None: logger.debug('No cache entry available') return False # Check whether it can be deserialized resp = self.serializer.loads(request, cache_data) if not resp: logger.warning('Cache entry deserialization failed, entry ignored') return False # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: msg = ('Returning cached "301 Moved Permanently" response ' '(ignoring date and etag information)') logger.debug(msg) return resp headers = CaseInsensitiveDict(resp.headers) if not headers or 'date' not in headers: if 'etag' not in headers: # Without date or etag, the cached response can never be used # and should be deleted. logger.debug('Purging cached response: no date or etag') self.cache.delete(cache_url) logger.debug('Ignoring cached response: no date') return False now = time.time() date = calendar.timegm( parsedate_tz(headers['date']) ) current_age = max(0, now - date) logger.debug('Current age based on date: %i', current_age) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if 'max-age' in resp_cc: freshness_lifetime = resp_cc['max-age'] logger.debug('Freshness lifetime from max-age: %i', freshness_lifetime) # If there isn't a max-age, check for an expires header elif 'expires' in headers: expires = parsedate_tz(headers['expires']) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) # Determine if we are setting freshness limit in the # request. Note, this overrides what was in the response. if 'max-age' in cc: freshness_lifetime = cc['max-age'] logger.debug('Freshness lifetime from request max-age: %i', freshness_lifetime) if 'min-fresh' in cc: min_fresh = cc['min-fresh'] # adjust our current age by our min fresh current_age += min_fresh logger.debug('Adjusted current age from min-fresh: %i', current_age) # Return entry if it is fresh enough if freshness_lifetime > current_age: logger.debug('The response is "fresh", returning cached response') logger.debug('%i > %i', freshness_lifetime, current_age) return resp # we're not fresh. If we don't have an Etag, clear it out if 'etag' not in headers: logger.debug( 'The cached response is "stale" with no etag, purging' ) self.cache.delete(cache_url) # return the original handler return False def conditional_headers(self, request): cache_url = self.cache_url(request.url) resp = self.serializer.loads(request, self.cache.get(cache_url)) new_headers = {} if resp: headers = CaseInsensitiveDict(resp.headers) if 'etag' in headers: new_headers['If-None-Match'] = headers['ETag'] if 'last-modified' in headers: new_headers['If-Modified-Since'] = headers['Last-Modified'] return new_headers def cache_response(self, request, response, body=None, status_codes=None): """ Algorithm for caching requests. This assumes a requests Response object. """ # From httplib2: Don't cache 206's since we aren't going to # handle byte range requests cacheable_status_codes = status_codes or self.cacheable_status_codes if response.status not in cacheable_status_codes: logger.debug( 'Status code %s not in %s', response.status, cacheable_status_codes ) return response_headers = CaseInsensitiveDict(response.headers) # If we've been given a body, our response has a Content-Length, that # Content-Length is valid then we can check to see if the body we've # been given matches the expected size, and if it doesn't we'll just # skip trying to cache it. if (body is not None and "content-length" in response_headers and response_headers["content-length"].isdigit() and int(response_headers["content-length"]) != len(body)): return cc_req = self.parse_cache_control(request.headers) cc = self.parse_cache_control(response_headers) cache_url = self.cache_url(request.url) logger.debug('Updating cache with response from "%s"', cache_url) # Delete it from the cache if we happen to have it stored there no_store = False if 'no-store' in cc: no_store = True logger.debug('Response header has "no-store"') if 'no-store' in cc_req: no_store = True logger.debug('Request header has "no-store"') if no_store and self.cache.get(cache_url): logger.debug('Purging existing cache entry to honor "no-store"') self.cache.delete(cache_url) # If we've been given an etag, then keep the response if self.cache_etags and 'etag' in response_headers: logger.debug('Caching due to etag') self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # Add to the cache any 301s. We do this before looking that # the Date headers. elif response.status == 301: logger.debug('Caching permanant redirect') self.cache.set( cache_url, self.serializer.dumps(request, response) ) # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif 'date' in response_headers: # cache when there is a max-age > 0 if 'max-age' in cc and cc['max-age'] > 0: logger.debug('Caching b/c date exists and max-age > 0') self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # If the request can expire, it means we should cache it # in the meantime. elif 'expires' in response_headers: if response_headers['expires']: logger.debug('Caching b/c of expires header') self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) def update_cached_response(self, request, response): """On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response. """ cache_url = self.cache_url(request.url) cached_response = self.serializer.loads( request, self.cache.get(cache_url) ) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = [ "content-length", ] cached_response.headers.update( dict((k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self.cache.set( cache_url, self.serializer.dumps(request, cached_response), ) return cached_response
13,859
36.058824
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/cache.py
""" The cache object API for implementing caches. The default is a thread safe in-memory dictionary. """ from threading import Lock class BaseCache(object): def get(self, key): raise NotImplemented() def set(self, key, value): raise NotImplemented() def delete(self, key): raise NotImplemented() def close(self): pass class DictCache(BaseCache): def __init__(self, init_dict=None): self.lock = Lock() self.data = init_dict or {} def get(self, key): return self.data.get(key, None) def set(self, key, value): with self.lock: self.data.update({key: value}) def delete(self, key): with self.lock: if key in self.data: self.data.pop(key)
790
18.775
69
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/heuristics.py
import calendar import time from email.utils import formatdate, parsedate, parsedate_tz from datetime import datetime, timedelta TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" def expire_after(delta, date=None): date = date or datetime.utcnow() return date + delta def datetime_to_header(dt): return formatdate(calendar.timegm(dt.timetuple())) class BaseHeuristic(object): def warning(self, response): """ Return a valid 1xx warning header value describing the cache adjustments. The response is provided too allow warnings like 113 http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need to explicitly say response is over 24 hours old. """ return '110 - "Response is Stale"' def update_headers(self, response): """Update the response headers with any new headers. NOTE: This SHOULD always include some Warning header to signify that the response was cached by the client, not by way of the provided headers. """ return {} def apply(self, response): updated_headers = self.update_headers(response) if updated_headers: response.headers.update(updated_headers) warning_header_value = self.warning(response) if warning_header_value is not None: response.headers.update({'Warning': warning_header_value}) return response class OneDayCache(BaseHeuristic): """ Cache the response by providing an expires 1 day in the future. """ def update_headers(self, response): headers = {} if 'expires' not in response.headers: date = parsedate(response.headers['date']) expires = expire_after(timedelta(days=1), date=datetime(*date[:6])) headers['expires'] = datetime_to_header(expires) headers['cache-control'] = 'public' return headers class ExpiresAfter(BaseHeuristic): """ Cache **all** requests for a defined time period. """ def __init__(self, **kw): self.delta = timedelta(**kw) def update_headers(self, response): expires = expire_after(self.delta) return { 'expires': datetime_to_header(expires), 'cache-control': 'public', } def warning(self, response): tmpl = '110 - Automatically cached for %s. Response might be stale' return tmpl % self.delta class LastModified(BaseHeuristic): """ If there is no Expires header already, fall back on Last-Modified using the heuristic from http://tools.ietf.org/html/rfc7234#section-4.2.2 to calculate a reasonable value. Firefox also does something like this per https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 Unlike mozilla we limit this to 24-hr. """ cacheable_by_default_statuses = set([ 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 ]) def update_headers(self, resp): headers = resp.headers if 'expires' in headers: return {} if 'cache-control' in headers and headers['cache-control'] != 'public': return {} if resp.status not in self.cacheable_by_default_statuses: return {} if 'date' not in headers or 'last-modified' not in headers: return {} date = calendar.timegm(parsedate_tz(headers['date'])) last_modified = parsedate(headers['last-modified']) if date is None or last_modified is None: return {} now = time.time() current_age = max(0, now - date) delta = date - calendar.timegm(last_modified) freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) if freshness_lifetime <= current_age: return {} expires = date + freshness_lifetime return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))} def warning(self, resp): return None
4,144
28.820144
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/__init__.py
"""CacheControl import Interface. Make it easy to import from cachecontrol without long namespaces. """ __author__ = 'Eric Larson' __email__ = '[email protected]' __version__ = '0.12.4' from .wrapper import CacheControl from .adapter import CacheControlAdapter from .controller import CacheController
302
24.25
65
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/compat.py
try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin try: import cPickle as pickle except ImportError: import pickle # Handle the case where the requests module has been patched to not have # urllib3 bundled as part of its source. try: from pip._vendor.requests.packages.urllib3.response import HTTPResponse except ImportError: from pip._vendor.urllib3.response import HTTPResponse try: from pip._vendor.requests.packages.urllib3.util import is_fp_closed except ImportError: from pip._vendor.urllib3.util import is_fp_closed # Replicate some six behaviour try: text_type = unicode except NameError: text_type = str
695
22.2
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/_cmd.py
import logging from pip._vendor import requests from pip._vendor.cachecontrol.adapter import CacheControlAdapter from pip._vendor.cachecontrol.cache import DictCache from pip._vendor.cachecontrol.controller import logger from argparse import ArgumentParser def setup_logging(): logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() logger.addHandler(handler) def get_session(): adapter = CacheControlAdapter( DictCache(), cache_etags=True, serializer=None, heuristic=None, ) sess = requests.Session() sess.mount('http://', adapter) sess.mount('https://', adapter) sess.cache_controller = adapter.controller return sess def get_args(): parser = ArgumentParser() parser.add_argument('url', help='The URL to try and cache') return parser.parse_args() def main(args=None): args = get_args() sess = get_session() # Make a request to get a response resp = sess.get(args.url) # Turn on logging setup_logging() # try setting the cache sess.cache_controller.cache_response(resp.request, resp.raw) # Now try to get it if sess.cache_controller.cached_request(resp.request): print('Cached!') else: print('Not cached :(') if __name__ == '__main__': main()
1,320
20.655738
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/serialize.py
import base64 import io import json import zlib from pip._vendor import msgpack from pip._vendor.requests.structures import CaseInsensitiveDict from .compat import HTTPResponse, pickle, text_type def _b64_decode_bytes(b): return base64.b64decode(b.encode("ascii")) def _b64_decode_str(s): return _b64_decode_bytes(s).decode("utf8") class Serializer(object): def dumps(self, request, response, body=None): response_headers = CaseInsensitiveDict(response.headers) if body is None: body = response.read(decode_content=False) # NOTE: 99% sure this is dead code. I'm only leaving it # here b/c I don't have a test yet to prove # it. Basically, before using # `cachecontrol.filewrapper.CallbackFileWrapper`, # this made an effort to reset the file handle. The # `CallbackFileWrapper` short circuits this code by # setting the body as the content is consumed, the # result being a `body` argument is *always* passed # into cache_response, and in turn, # `Serializer.dump`. response._fp = io.BytesIO(body) # NOTE: This is all a bit weird, but it's really important that on # Python 2.x these objects are unicode and not str, even when # they contain only ascii. The problem here is that msgpack # understands the difference between unicode and bytes and we # have it set to differentiate between them, however Python 2 # doesn't know the difference. Forcing these to unicode will be # enough to have msgpack know the difference. data = { u"response": { u"body": body, u"headers": dict( (text_type(k), text_type(v)) for k, v in response.headers.items() ), u"status": response.status, u"version": response.version, u"reason": text_type(response.reason), u"strict": response.strict, u"decode_content": response.decode_content, }, } # Construct our vary headers data[u"vary"] = {} if u"vary" in response_headers: varied_headers = response_headers[u'vary'].split(',') for header in varied_headers: header = header.strip() header_value = request.headers.get(header, None) if header_value is not None: header_value = text_type(header_value) data[u"vary"][header] = header_value return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)]) def loads(self, request, data): # Short circuit if we've been given an empty set of data if not data: return # Determine what version of the serializer the data was serialized # with try: ver, data = data.split(b",", 1) except ValueError: ver = b"cc=0" # Make sure that our "ver" is actually a version and isn't a false # positive from a , being in the data stream. if ver[:3] != b"cc=": data = ver + data ver = b"cc=0" # Get the version number out of the cc=N ver = ver.split(b"=", 1)[-1].decode("ascii") # Dispatch to the actual load method for the given version try: return getattr(self, "_loads_v{0}".format(ver))(request, data) except AttributeError: # This is a version we don't have a loads function for, so we'll # just treat it as a miss and return None return def prepare_response(self, request, cached): """Verify our vary headers match and construct a real urllib3 HTTPResponse object. """ # Special case the '*' Vary value as it means we cannot actually # determine if the cached response is suitable for this request. if "*" in cached.get("vary", {}): return # Ensure that the Vary headers for the cached response match our # request for header, value in cached.get("vary", {}).items(): if request.headers.get(header, None) != value: return body_raw = cached["response"].pop("body") headers = CaseInsensitiveDict(data=cached['response']['headers']) if headers.get('transfer-encoding', '') == 'chunked': headers.pop('transfer-encoding') cached['response']['headers'] = headers try: body = io.BytesIO(body_raw) except TypeError: # This can happen if cachecontrol serialized to v1 format (pickle) # using Python 2. A Python 2 str(byte string) will be unpickled as # a Python 3 str (unicode string), which will cause the above to # fail with: # # TypeError: 'str' does not support the buffer interface body = io.BytesIO(body_raw.encode('utf8')) return HTTPResponse( body=body, preload_content=False, **cached["response"] ) def _loads_v0(self, request, data): # The original legacy cache data. This doesn't contain enough # information to construct everything we need, so we'll treat this as # a miss. return def _loads_v1(self, request, data): try: cached = pickle.loads(data) except ValueError: return return self.prepare_response(request, cached) def _loads_v2(self, request, data): try: cached = json.loads(zlib.decompress(data).decode("utf8")) except (ValueError, zlib.error): return # We need to decode the items that we've base64 encoded cached["response"]["body"] = _b64_decode_bytes( cached["response"]["body"] ) cached["response"]["headers"] = dict( (_b64_decode_str(k), _b64_decode_str(v)) for k, v in cached["response"]["headers"].items() ) cached["response"]["reason"] = _b64_decode_str( cached["response"]["reason"], ) cached["vary"] = dict( (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) for k, v in cached["vary"].items() ) return self.prepare_response(request, cached) def _loads_v3(self, request, data): # Due to Python 2 encoding issues, it's impossible to know for sure # exactly how to load v3 entries, thus we'll treat these as a miss so # that they get rewritten out as v4 entries. return def _loads_v4(self, request, data): try: cached = msgpack.loads(data, encoding='utf-8') except ValueError: return return self.prepare_response(request, cached)
7,055
35.184615
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/caches/redis_cache.py
from __future__ import division from datetime import datetime from pip._vendor.cachecontrol.cache import BaseCache def total_seconds(td): """Python 2.6 compatability""" if hasattr(td, 'total_seconds'): return int(td.total_seconds()) ms = td.microseconds secs = (td.seconds + td.days * 24 * 3600) return int((ms + secs * 10**6) / 10**6) class RedisCache(BaseCache): def __init__(self, conn): self.conn = conn def get(self, key): return self.conn.get(key) def set(self, key, value, expires=None): if not expires: self.conn.set(key, value) else: expires = expires - datetime.utcnow() self.conn.setex(key, total_seconds(expires), value) def delete(self, key): self.conn.delete(key) def clear(self): """Helper for clearing all the keys in a database. Use with caution!""" for key in self.conn.keys(): self.conn.delete(key) def close(self): """Redis uses connection pooling, no need to close the connection.""" pass
1,102
24.068182
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_vendor/cachecontrol/caches/__init__.py
from .file_cache import FileCache # noqa from .redis_cache import RedisCache # noqa
86
28
43
py