code
stringlengths
501
4.91M
package
stringlengths
2
88
path
stringlengths
11
291
filename
stringlengths
4
197
parsed_code
stringlengths
0
4.91M
quality_prob
float64
0
0.99
learning_prob
float64
0.02
1
import warnings from copy import deepcopy import numpy as np import scipy from scipy import integrate from scipy.special import roots_hermitenorm from sklearn import clone from sklearn.exceptions import NotFittedError from sklearn.metrics import pairwise_kernels from sklearn.utils import column_or_1d from sklearn.utils.validation import check_array, check_consistent_length from ..base import ( SkactivemlClassifier, ProbabilisticRegressor, SkactivemlRegressor, ) from ..classifier import ParzenWindowClassifier from ..utils import ( MISSING_LABEL, is_labeled, is_unlabeled, check_missing_label, check_equal_missing_label, check_type, check_indices, check_random_state, check_scalar, ) __all__ = ["IndexClassifierWrapper"] from ..utils._validation import _check_callable class IndexClassifierWrapper: """ Classifier to simplify retraining classifiers in an active learning scenario. The idea is to pass all instances at once and use their indices to access them. Thereby, optimization is possible e.g. by pre-computing kernel-matrices. Moreover, this wrapper implements partial fit for all classifiers and includes a base classifier that can be used to simulate adding different instance-label pairs to the same classifier. Parameters ---------- clf : skactiveml.base.SkactivemlClassifier The base classifier implementing the methods `fit` and `predict_proba`. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.missing_label). sample_weight : array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier ignore_partial_fit : bool, optional (default: True) Specifies if the `partial_fit` function of `self.clf` should be used (if implemented). enforce_unique_samples : bool, optional (default: False) If True, `partial_fit` will not simply append additional samples but replace the current labels by the new one. If False, instances might appear multiple times if their indices are repeated. use_speed_up : bool, optional (default: True) Specifies if potentially available speed ups should be used. Currently implemented for Parzen Window Classifier. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. """ def __init__( self, clf, X, y, sample_weight=None, set_base_clf=False, ignore_partial_fit=False, enforce_unique_samples=False, use_speed_up=False, missing_label=MISSING_LABEL, ): self.clf = clf self.X = X self.y = y self.sample_weight = sample_weight self.ignore_partial_fit = ignore_partial_fit self.enforce_unique_samples = enforce_unique_samples self.use_speed_up = use_speed_up self.missing_label = missing_label # Validate classifier type. check_type(self.clf, "clf", SkactivemlClassifier) # Check X, y, sample_weight: will be done by base clf self.X = check_array(self.X, allow_nd="True") self.y = check_array( self.y, ensure_2d=False, force_all_finite=False, dtype=None, ) check_consistent_length(self.X, self.y) if self.sample_weight is not None: check_consistent_length(self.X, self.sample_weight) check_type(set_base_clf, "set_base_clf", bool) # deep copy classifier as it might be fitted already if hasattr(self.clf, "classes_"): self.clf_ = deepcopy(self.clf) if set_base_clf: self.base_clf_ = deepcopy(self.clf_) else: if set_base_clf: raise NotFittedError( "Classifier is not yet fitted but `set_base_clf=True` " "in `__init__` is set to True." ) # Check and use partial fit if applicable check_type(self.ignore_partial_fit, "ignore_partial_fit", bool) self.use_partial_fit = ( hasattr(self.clf, "partial_fit") and not self.ignore_partial_fit ) check_type(self.enforce_unique_samples, "enforce_unique_samples", bool) self.enforce_unique_samples = ( "check_unique" if enforce_unique_samples else False ) # TODO better change check_indices function if self.use_partial_fit and self.enforce_unique_samples: warnings.warn( "The `partial_fit` function by sklearn might not " "ensure that every sample is used only once in the " "fitting process." ) # Check use_speed_up check_type(self.use_speed_up, "use_speed_up", bool) # Check missing label check_missing_label(self.missing_label) self.missing_label_ = self.missing_label if not np.issubdtype(type(self.missing_label), self.y.dtype): raise TypeError( f"`missing_label` has type {type(missing_label)}, " f"which is not compatible with {self.y.dtype} as the " f"type of `y`." ) check_equal_missing_label(self.clf.missing_label, self.missing_label_) # prepare ParzenWindowClassifier if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: self.pwc_metric_ = self.clf.metric self.pwc_metric_dict_ = ( {} if self.clf.metric_dict is None else self.clf.metric_dict ) self.pwc_K_ = np.full([len(self.X), len(self.X)], np.nan) self.clf_ = clone(self.clf) self.clf_.metric = "precomputed" self.clf_.metric_dict = {} def precompute( self, idx_fit, idx_pred, fit_params="all", pred_params="all" ): """ Function to describe for which samples we should precompute something. Will be internally handled differently for different classifiers. The function consists of pairs of `idx_fit` and `idx_predict` to describe which sequences of fitting and predicting are to be expected. Parameters ---------- idx_fit : array-like of shape (n_fit_samples) Indices of samples in `X` that will be used to fit the classifier. idx_pred : array-like of shape (n_predict_samples) Indices of samples in `X` that the classifier will predict for. fit_params : string, optional (default='all') Parameter to specify if only a subset of the `idx_fit` indices will be used later. Can be of value 'all', 'labeled', or 'unlabeled'. pred_params : string, optional (default='all') Parameter to specify if only a subset of the `idx_predict` indices will be used later. Can be of value 'all', 'labeled', or 'unlabeled'. """ idx_fit = check_array( idx_fit, ensure_2d=False, dtype=int, input_name="`idx_fit`" ) idx_fit = check_indices(idx_fit, self.X, dim=0) idx_pred = check_array( idx_pred, ensure_2d=False, dtype=int, input_name="`idx_pred`" ) idx_pred = check_indices(idx_pred, self.X, dim=0) # precompute ParzenWindowClassifier if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if fit_params == "all": idx_fit_ = idx_fit elif fit_params == "labeled": idx_fit_ = idx_fit[ is_labeled( self.y[idx_fit], missing_label=self.missing_label_ ) ] elif fit_params == "unlabeled": idx_fit_ = idx_fit[ is_unlabeled( self.y[idx_fit], missing_label=self.missing_label_ ) ] else: raise ValueError(f"`fit_params`== {fit_params} not defined") if pred_params == "all": idx_pred_ = idx_pred elif pred_params == "labeled": idx_pred_ = idx_pred[ is_labeled( self.y[idx_pred], missing_label=self.missing_label_ ) ] elif pred_params == "unlabeled": idx_pred_ = idx_pred[ is_unlabeled( self.y[idx_pred], missing_label=self.missing_label_ ) ] else: raise ValueError(f"`pred_params`== {pred_params} not defined") if len(idx_fit_) > 0 and len(idx_pred_) > 0: self.pwc_K_[np.ix_(idx_fit_, idx_pred_)] = pairwise_kernels( self.X[idx_fit_], self.X[idx_pred_], self.pwc_metric_, **self.pwc_metric_dict_, ) def fit(self, idx, y=None, sample_weight=None, set_base_clf=False): """Fit the model using `self.X[idx]` as training data and `self.y[idx]` as class labels. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that will be used to fit the classifier. y : array-like of shape (n_sub_samples), optional (default=None) Class labels of the training samples corresponding to `X[idx]`. Missing labels are represented the attribute 'missing_label'. If None, labels passed in the `init` will be used. sample_weight: array-like of shape (n_sub_samples), optional (default=None) Weights of training samples in `X[idx]`. If None, weights passed in the `init` will be used. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier Returns ------- self: IndexClassifierWrapper, The fitted IndexClassifierWrapper. """ # check idx idx = check_array(idx, ensure_2d=False, dtype=int, input_name="`idx`") idx = check_indices( idx, self.X, dim=0, unique=self.enforce_unique_samples ) # check set_base_clf check_type(set_base_clf, "set_base_clf", bool) # check y if y is None: y = self.y[idx] if is_unlabeled(y, missing_label=self.missing_label_).all(): warnings.warn("All labels are of `missing_label` in `fit`.") else: y = check_array( y, ensure_2d=False, force_all_finite=False, dtype=self.y.dtype, input_name="`y`", ) check_consistent_length(idx, y) # check sample_weight if sample_weight is None: sample_weight = self._copy_sw( self._get_sw(self.sample_weight, idx=idx) ) # TODO deepcopy else: sample_weight = check_array( sample_weight, ensure_2d=False, input_name="`sample_weight`" ) check_consistent_length(sample_weight, y) # check if a clf_ exists if "clf_" not in self.__dict__: self.clf_ = clone(self.clf) # fit classifier self.clf_.fit(self.X[idx], y, sample_weight) # store data for further processing if not self.use_partial_fit: self.idx_ = idx self.y_ = y self.sample_weight_ = sample_weight # set base clf if necessary if set_base_clf: self.base_clf_ = deepcopy(self.clf_) if not self.use_partial_fit: self.base_idx_ = self.idx_.copy() self.base_y_ = self.y_.copy() self.base_sample_weight_ = self._copy_sw(self.sample_weight_) return self def partial_fit( self, idx, y=None, sample_weight=None, use_base_clf=False, set_base_clf=False, ): """Update the fitted model using additional samples in `self.X[idx]` and y as class labels. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that will be used to fit the classifier. y : array-like of shape (n_sub_samples), optional (default=None) Class labels of the training samples corresponding to `X[idx]`. Missing labels are represented the attribute 'missing_label'. sample_weight: array-like of shape (n_sub_samples), optional (default=None) Weights of training samples in `X[idx]`. use_base_clf : bool, default=False If True, the base classifier will be used to update the fit instead of the current classifier. Here, it is necessary that the base classifier has been set once. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier. Returns ------- self: IndexClassifierWrapper, The fitted IndexClassifierWrapper. """ # check idx add_idx = check_array( idx, ensure_2d=False, dtype=int, input_name="`add_idx`" ) add_idx = check_indices( add_idx, self.X, dim=0, unique=self.enforce_unique_samples ) # check use_base_clf check_type(use_base_clf, "use_base_clf", bool) if use_base_clf: if not self.is_fitted(base_clf=True): raise NotFittedError( "Base classifier is not set. Please use " "`set_base_clf=True` in `__init__`, `fit`, or " "`partial_fit`." ) else: if not self.is_fitted(base_clf=False): raise NotFittedError( "Classifier is not fitted. Please `fit` before using " "`partial_fit`." ) # check set_base_clf check_type(set_base_clf, "set_base_clf", bool) # check y if y is None: add_y = self.y[add_idx] if is_unlabeled(add_y, missing_label=self.missing_label_).all(): warnings.warn( "All labels are of `missing_label` in " "`partial_fit`." ) else: add_y = check_array( y, ensure_2d=False, force_all_finite=False, dtype=self.y.dtype, input_name="`y`", ) check_consistent_length(add_idx, add_y) # check sample_weight if sample_weight is None: add_sample_weight = self._copy_sw( self._get_sw(self.sample_weight, idx=add_idx) ) else: add_sample_weight = check_array( sample_weight, ensure_2d=False, input_name="`sample_weight`" ) check_consistent_length(add_idx, add_sample_weight) # handle case when partial fit of clf is used if self.use_partial_fit: if use_base_clf: self.clf_ = deepcopy(self.base_clf_) # partial fit clf self.clf_.partial_fit(self.X[add_idx], add_y, add_sample_weight) if set_base_clf: self.base_clf_ = deepcopy(self.clf_) # handle case using regular fit from clf else: if not hasattr(self, "idx_"): raise NotFittedError( "Fitted classifier from `init` cannot be " "used for `partial_fit` as it is unknown " "where it has been fitted on." ) if use_base_clf: self.clf_ = clone(self.base_clf_) self.idx_ = self.base_idx_.copy() self.y_ = self.base_y_.copy() self.sample_weight_ = self._copy_sw(self.base_sample_weight_) if self.enforce_unique_samples: cur_idx = np.array([i not in add_idx for i in self.idx_]) else: cur_idx = np.arange(len(self.idx_)) self.idx_ = np.concatenate([self.idx_[cur_idx], add_idx], axis=0) self.y_ = np.concatenate([self.y_[cur_idx], add_y], axis=0) self.sample_weight_ = self._concat_sw( self._get_sw(self.sample_weight_, cur_idx), add_sample_weight ) self.fit( self.idx_, y=self.y_, sample_weight=self.sample_weight_, set_base_clf=set_base_clf, ) return self def predict(self, idx): """Return class label predictions for the input data `X[idx]`. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- y : array-like, shape (n_sub_samples) Predicted class labels of the input samples. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict(P) else: return self.clf_.predict(self.X[idx]) def predict_proba(self, idx): """Return probability estimates for the input data `X[idx]`. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- P : array-like, shape (n_sub_samples, classes) The class probabilities of the input samples. Classes are ordered by lexicographic order. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict_proba(P) else: return self.clf_.predict_proba(self.X[idx]) def predict_freq(self, idx): """Return class frequency estimates for the input samples 'X[idx]'. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- F: array-like of shape (n_sub_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict_freq(P) else: return self.clf_.predict_freq(self.X[idx]) def is_fitted(self, base_clf=False): """Returns if the classifier (resp. the base classifier) is fitted. Parameters ---------- base_clf : bool, default=False If True, the result will describe if the base classifier is fitted. Returns ------- is_fitted : boolean Boolean describing if the classifier is fitted. """ clf = "base_clf_" if base_clf else "clf_" if clf in self.__dict__: return hasattr(getattr(self, clf), "classes_") else: return False def __getattr__(self, item): if "clf_" in self.__dict__ and hasattr(self.clf_, item): return getattr(self.clf_, item) else: return getattr(self.clf, item) def _get_sw(self, sample_weight, idx=None): if sample_weight is None: return None else: return sample_weight[idx] def _copy_sw(self, sample_weight): if sample_weight is None: return None else: return sample_weight.copy() def _concat_sw(self, sample_weight, sample_weight_add): if sample_weight is None and sample_weight_add is None: return None if sample_weight is not None and sample_weight_add is not None: return np.concatenate([sample_weight, sample_weight_add], axis=0) else: raise ValueError( "All `sample_weight` must be either None or " "given." ) def _cross_entropy( X_eval, true_reg, other_reg, integration_dict=None, random_state=None ): """Calculates the cross entropy. Parameters ---------- X_eval : array-like of shape (n_samples, n_features) The samples where the cross entropy should be evaluated. true_reg: ProbabilisticRegressor True distribution of the cross entropy. other_reg: ProbabilisticRegressor Evaluated distribution of the cross entropy. integration_dict: dict, optional default = None Dictionary for integration arguments, i.e. `integration method` etc.. For details see method `conditional_expect`. random_state : int | np.random.RandomState, optional Random state for cross entropy calculation. Returns ------- cross_ent : numpy.ndarray of shape (n_samples) The cross entropy. """ if integration_dict is None: integration_dict = {} check_type(integration_dict, "integration_dict", dict) check_type(true_reg, "true_reg", ProbabilisticRegressor) check_type(other_reg, "other_reg", ProbabilisticRegressor) random_state = check_random_state(random_state) dist = _reshape_scipy_dist( other_reg.predict_target_distribution(X_eval), shape=(len(X_eval), 1) ) cross_ent = -expected_target_val( X_eval, dist.logpdf, reg=true_reg, random_state=random_state, **integration_dict, vector_func="both", ) return cross_ent def _update_reg( reg, X, y, y_update, sample_weight=None, idx_update=None, X_update=None, mapping=None, ): """Update the regressor by the updating samples, depending on the mapping. Chooses `X_update` if `mapping is None` and updates `X[mapping[idx_update]]` otherwise. Parameters ---------- reg : SkactivemlRegressor The regressor to be updated. X : array-like of shape (n_samples, n_features) Training data set. y : array-like of shape (n_samples) Labels of the training data set. y_update : array-like of shape (n_updates) or numeric Updating labels or updating label. sample_weight : array-like of shape (n_samples), optional (default = None) Sample weight of the training data set. If idx_update : array-like of shape (n_updates) or int Index of the samples or sample to be updated. X_update : array-like of shape (n_updates, n_features) or (n_features) Samples to be updated or sample to be updated. mapping : array-like of shape (n_candidates), optional (default = None) The deciding mapping. Returns ------- reg_new : SkaktivemlRegressor The updated regressor. """ if sample_weight is not None and mapping is None: raise ValueError( "If `sample_weight` is not `None` a mapping " "between candidates and the training dataset must " "exist." ) if mapping is not None: if isinstance(idx_update, (int, np.integer)): check_indices([idx_update], A=mapping, unique="check_unique") else: check_indices(idx_update, A=mapping, unique="check_unique") X_new, y_new = _update_X_y( X, y, y_update, idx_update=mapping[idx_update] ) else: X_new, y_new = _update_X_y(X, y, y_update, X_update=X_update) reg_new = clone(reg).fit(X_new, y_new, sample_weight) return reg_new def _update_X_y(X, y, y_update, idx_update=None, X_update=None): """Update the training data by the updating samples/labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set. y : array-like of shape (n_samples) Labels of the training data set. idx_update : array-like of shape (n_updates) or int Index of the samples or sample to be updated. X_update : array-like of shape (n_updates, n_features) or (n_features) Samples to be updated or sample to be updated. y_update : array-like of shape (n_updates) or numeric Updating labels or updating label. Returns ------- X_new : np.ndarray of shape (n_new_samples, n_features) The new training data set. y_new : np.ndarray of shape (n_new_samples) The new labels. """ X = check_array(X, input_name="`X`") y = column_or_1d( check_array( y, force_all_finite=False, ensure_2d=False, input_name="`y`" ) ) check_consistent_length(X, y) if isinstance(y_update, (int, float)): y_update = np.array([y_update]) else: y_update = check_array( y_update, force_all_finite=False, ensure_2d=False, ensure_min_samples=0, input_name="`y`", ) y_update = column_or_1d(y_update) if idx_update is not None: if isinstance(idx_update, (int, np.integer)): idx_update = np.array([idx_update]) idx_update = check_indices(idx_update, A=X, unique="check_unique") check_consistent_length(y_update, idx_update) X_new = X.copy() y_new = y.copy() y_new[idx_update] = y_update return X_new, y_new elif X_update is not None: X_update = check_array( X_update, ensure_2d=False, input_name="`X_update`" ) if X_update.ndim == 1: X_update = X_update.reshape(1, -1) check_consistent_length(X.T, X_update.T) check_consistent_length(y_update, X_update) X_new = np.append(X, X_update, axis=0) y_new = np.append(y, y_update, axis=0) return X_new, y_new else: raise ValueError("`idx_update` or `X_update` must not be `None`") def _reshape_scipy_dist(dist, shape): """Reshapes the parameters "loc", "scale", "df" of a distribution, if they exist. Parameters ---------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution. shape : tuple The new shape. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The reshaped distribution. """ check_type(dist, "dist", scipy.stats._distn_infrastructure.rv_frozen) check_type(shape, "shape", tuple) for idx, item in enumerate(shape): check_type(item, f"shape[{idx}]", int) for argument in ["loc", "scale", "df"]: if argument in dist.kwds: # check if shapes are compatible dist.kwds[argument].shape = shape return dist def expected_target_val(X, target_func, reg, **kwargs): """Calculates the conditional expectation of a function depending only on the target value for each sample in `X`, i.e. E[target_func(Y)|X=x], where Y | X=x ~ reg.predict_target_distribution, for x in `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples where the expectation should be evaluated. target_func : callable The function that transforms the random variable. reg: ProbabilisticRegressor Predicts the target distribution over which the expectation is calculated. Other Parameters ---------------- method: string, optional, optional (default='gauss_hermite') The method by which the expectation is computed. -'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and thereby only takes the function value at the expected y value. -'monte_carlo' Basic monte carlo integration. Taking the average of randomly drawn samples. `n_integration_samples` specifies the number of monte carlo samples. -'quantile' Uses the quantile function to transform the integration space into the interval from 0 to 1 and than uses the method from 'quantile_method' to calculate the integral. The number of integration points is specified by `n_integration_samples`. -'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X to be gaussian distributed. The number of evaluation points is given by `n_integration_samples`. -'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous` random variable of `reg`, which in turn uses a dynamic gaussian quadrature routine for calculating the integral. Performance is worse using a vector function. quantile_method: string, optional (default='quadrature') Specifies the integration methods used after the quantile transformation. -'trapezoid' Trapezoidal method for integration using evenly spaced samples. -'simpson' Simpson method for integration using evenly spaced samples. -'average' Taking the average value for integration using evenly spaced samples. -'romberg' Romberg method for integration. If `n_integration_samples` is not equal to `2**k + 1` for a natural number k, the number of samples used for integration is put to the smallest such number greater than `n_integration_samples`. -'quadrature' Gaussian quadrature method for integration. n_integration_samples: int, optional (default=10) The number of integration samples used in 'quantile', 'monte_carlo' and 'gauss-hermite'. quad_dict: dict, optional (default=None) Further arguments for using `scipy's` `expect` random_state : int | np.random.RandomState, optional (default=None) Random state for fixing the number generation. target_func : bool If `True` only the target values will be passed to `func`. vector_func : bool or str, optional (default=False) If `vector_func` is `True`, the integration values are passed as a whole to the function `func`. If `vector_func` is 'both', the integration values might or might not be passed as a whole. The integration values if passed as a whole are of the form (n_samples, n_integration), where n_integration denotes the number of integration values. Returns ------- expectation : numpy.ndarray of shape (n_samples) The conditional expectation for each value applied. """ _check_callable(target_func, "target_func", n_positional_parameters=1) def arg_filtered_func(idx_y, x_y, y): return target_func(y) return _conditional_expect(X, arg_filtered_func, reg, **kwargs) def _conditional_expect( X, func, reg, method=None, quantile_method=None, n_integration_samples=10, quad_dict=None, random_state=None, vector_func=False, ): """Calculates the conditional expectation of a function depending on the target value the corresponding feature value and an index for each sample in `X`, i.e. E[func(Y, x, idx)|X=x], where Y | X=x ~ reg.predict_target_distribution, for x in `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples where the expectation should be evaluated. func : callable The function that transforms the random variable. The signature of the function must be of the form `func(y, x, idx)`, where `y` is the target value, `x` is the feature value and `idx` is such that `X[idx] = x`. reg: ProbabilisticRegressor Predicts the target distribution over which the expectation is calculated. method: string, optional, optional (default='gauss_hermite') The method by which the expectation is computed. -'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and thereby only takes the function value at the expected y value. -'monte_carlo' Basic monte carlo integration. Taking the average of randomly drawn samples. `n_integration_samples` specifies the number of monte carlo samples. -'quantile' Uses the quantile function to transform the integration space into the interval from 0 to 1 and than uses the method from 'quantile_method' to calculate the integral. The number of integration points is specified by `n_integration_samples`. -'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X to be gaussian distributed. The number of evaluation points is given by `n_integration_samples`. -'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous` random variable of `reg`, which in turn uses a dynamic gaussian quadrature routine for calculating the integral. Performance is worse using a vector function. quantile_method: string, optional (default='quadrature') Specifies the integration methods used after the quantile transformation. -'trapezoid' Trapezoidal method for integration using evenly spaced samples. -'simpson' Simpson method for integration using evenly spaced samples. -'average' Taking the average value for integration using evenly spaced samples. -'romberg' Romberg method for integration. If `n_integration_samples` is not equal to `2**k + 1` for a natural number k, the number of samples used for integration is put to the smallest such number greater than `n_integration_samples`. -'quadrature' Gaussian quadrature method for integration. n_integration_samples: int, optional (default=10) The number of integration samples used in 'quantile', 'monte_carlo' and 'gauss-hermite'. quad_dict: dict, optional (default=None) Further arguments for using `scipy's` `expect` random_state : int | np.random.RandomState, optional (default=None) Random state for fixing the number generation. vector_func : bool or str, optional (default=False) If `vector_func` is `True`, the integration values are passes in vectorized form to `func`. If `vector_func` is 'both', the integration values might or might not be passed in vectorized form, depending what is more efficient. The integration values are passed in vectorized form, means that in a call like `func(y, x, idx)` `y` is of the form (n_samples, n_integration_samples), `x` equals `X` and `idx` is an index map of `X. Returns ------- expectation : numpy.ndarray of shape (n_samples) The conditional expectation for each value applied. """ X = check_array(X, allow_nd=True, input_name="`X`") check_type(reg, "reg", ProbabilisticRegressor) check_type( method, "method", target_vals=[ "monte_carlo", "assume_linear", "dynamic_quad", "gauss_hermite", "quantile", None, ], ) check_type( quantile_method, "quantile_method", target_vals=[ "trapezoid", "simpson", "average", "romberg", "quadrature", None, ], ) check_scalar(n_integration_samples, "n_monte_carlo", int, min_val=1) check_type(quad_dict, "scipy_args", dict, target_vals=[None]) check_type(vector_func, "vector_func", bool, target_vals=["both"]) _check_callable(func, "func", n_positional_parameters=3) if method is None: method = "gauss_hermite" if quantile_method is None: quantile_method = "quadrature" if quad_dict is None: quad_dict = {} if method == "quantile" and quantile_method == "romberg": # n_integration_samples need to be of the form 2**k + 1 n_integration_samples = ( 2 ** int(np.log2(n_integration_samples) + 1) + 1 ) is_optional = vector_func == "both" if is_optional: vector_func = True random_state = check_random_state(random_state) def evaluate_func(inner_potential_y): if vector_func: inner_output = func(np.arange(len(X)), X, inner_potential_y) else: inner_output = np.zeros_like(inner_potential_y) for idx_x, inner_x in enumerate(X): for idx_y, y_val in enumerate(inner_potential_y[idx_x]): inner_output[idx_x, idx_y] = func(idx_x, inner_x, y_val) return inner_output expectation = np.zeros(len(X)) if method in ["assume_linear", "monte_carlo"]: if method == "assume_linear": potential_y = reg.predict(X).reshape(-1, 1) else: # method equals "monte_carlo" potential_y = reg.sample_y( X=X, n_samples=n_integration_samples, random_state=random_state, ) expectation = np.average(evaluate_func(potential_y), axis=1) elif method == "quantile": if quantile_method in ["trapezoid", "simpson", "average", "romberg"]: eval_points = np.arange(1, n_integration_samples + 1) / ( n_integration_samples + 1 ) cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) potential_y = cond_dist.ppf(eval_points.reshape(1, -1)) output = evaluate_func(potential_y) if quantile_method == "trapezoid": expectation = integrate.trapezoid( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "simpson": expectation = integrate.simpson( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "average": expectation = np.average(output, axis=-1) else: # quantile_method equals "romberg" expectation = integrate.romb( output, dx=1 / n_integration_samples, axis=1 ) else: # quantile_method equals "quadrature" def fixed_quad_function_wrapper(inner_eval_points): inner_cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) inner_potential_y = inner_cond_dist.ppf( inner_eval_points.reshape(1, -1) ) return evaluate_func(inner_potential_y) expectation, _ = integrate.fixed_quad( fixed_quad_function_wrapper, 0, 1, n=n_integration_samples ) elif method == "gauss_hermite": unscaled_potential_y, weights = roots_hermitenorm( n_integration_samples ) cond_mean, cond_std = reg.predict(X, return_std=True) potential_y = ( cond_std[:, np.newaxis] * unscaled_potential_y[np.newaxis, :] + cond_mean[:, np.newaxis] ) output = evaluate_func(potential_y) expectation = ( 1 / (2 * np.pi) ** (1 / 2) * np.sum(weights[np.newaxis, :] * output, axis=1) ) else: # method equals "dynamic_quad" for idx, x in enumerate(X): cond_dist = reg.predict_target_distribution([x]) def quad_function_wrapper(y): if is_optional or not vector_func: return func(idx, x, y) else: return func(np.arange(len(X)), X, np.full((len(X), 1), y))[ idx ] expectation[idx] = cond_dist.expect( quad_function_wrapper, **quad_dict, ) return expectation
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/utils.py
utils.py
import warnings from copy import deepcopy import numpy as np import scipy from scipy import integrate from scipy.special import roots_hermitenorm from sklearn import clone from sklearn.exceptions import NotFittedError from sklearn.metrics import pairwise_kernels from sklearn.utils import column_or_1d from sklearn.utils.validation import check_array, check_consistent_length from ..base import ( SkactivemlClassifier, ProbabilisticRegressor, SkactivemlRegressor, ) from ..classifier import ParzenWindowClassifier from ..utils import ( MISSING_LABEL, is_labeled, is_unlabeled, check_missing_label, check_equal_missing_label, check_type, check_indices, check_random_state, check_scalar, ) __all__ = ["IndexClassifierWrapper"] from ..utils._validation import _check_callable class IndexClassifierWrapper: """ Classifier to simplify retraining classifiers in an active learning scenario. The idea is to pass all instances at once and use their indices to access them. Thereby, optimization is possible e.g. by pre-computing kernel-matrices. Moreover, this wrapper implements partial fit for all classifiers and includes a base classifier that can be used to simulate adding different instance-label pairs to the same classifier. Parameters ---------- clf : skactiveml.base.SkactivemlClassifier The base classifier implementing the methods `fit` and `predict_proba`. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.missing_label). sample_weight : array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier ignore_partial_fit : bool, optional (default: True) Specifies if the `partial_fit` function of `self.clf` should be used (if implemented). enforce_unique_samples : bool, optional (default: False) If True, `partial_fit` will not simply append additional samples but replace the current labels by the new one. If False, instances might appear multiple times if their indices are repeated. use_speed_up : bool, optional (default: True) Specifies if potentially available speed ups should be used. Currently implemented for Parzen Window Classifier. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. """ def __init__( self, clf, X, y, sample_weight=None, set_base_clf=False, ignore_partial_fit=False, enforce_unique_samples=False, use_speed_up=False, missing_label=MISSING_LABEL, ): self.clf = clf self.X = X self.y = y self.sample_weight = sample_weight self.ignore_partial_fit = ignore_partial_fit self.enforce_unique_samples = enforce_unique_samples self.use_speed_up = use_speed_up self.missing_label = missing_label # Validate classifier type. check_type(self.clf, "clf", SkactivemlClassifier) # Check X, y, sample_weight: will be done by base clf self.X = check_array(self.X, allow_nd="True") self.y = check_array( self.y, ensure_2d=False, force_all_finite=False, dtype=None, ) check_consistent_length(self.X, self.y) if self.sample_weight is not None: check_consistent_length(self.X, self.sample_weight) check_type(set_base_clf, "set_base_clf", bool) # deep copy classifier as it might be fitted already if hasattr(self.clf, "classes_"): self.clf_ = deepcopy(self.clf) if set_base_clf: self.base_clf_ = deepcopy(self.clf_) else: if set_base_clf: raise NotFittedError( "Classifier is not yet fitted but `set_base_clf=True` " "in `__init__` is set to True." ) # Check and use partial fit if applicable check_type(self.ignore_partial_fit, "ignore_partial_fit", bool) self.use_partial_fit = ( hasattr(self.clf, "partial_fit") and not self.ignore_partial_fit ) check_type(self.enforce_unique_samples, "enforce_unique_samples", bool) self.enforce_unique_samples = ( "check_unique" if enforce_unique_samples else False ) # TODO better change check_indices function if self.use_partial_fit and self.enforce_unique_samples: warnings.warn( "The `partial_fit` function by sklearn might not " "ensure that every sample is used only once in the " "fitting process." ) # Check use_speed_up check_type(self.use_speed_up, "use_speed_up", bool) # Check missing label check_missing_label(self.missing_label) self.missing_label_ = self.missing_label if not np.issubdtype(type(self.missing_label), self.y.dtype): raise TypeError( f"`missing_label` has type {type(missing_label)}, " f"which is not compatible with {self.y.dtype} as the " f"type of `y`." ) check_equal_missing_label(self.clf.missing_label, self.missing_label_) # prepare ParzenWindowClassifier if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: self.pwc_metric_ = self.clf.metric self.pwc_metric_dict_ = ( {} if self.clf.metric_dict is None else self.clf.metric_dict ) self.pwc_K_ = np.full([len(self.X), len(self.X)], np.nan) self.clf_ = clone(self.clf) self.clf_.metric = "precomputed" self.clf_.metric_dict = {} def precompute( self, idx_fit, idx_pred, fit_params="all", pred_params="all" ): """ Function to describe for which samples we should precompute something. Will be internally handled differently for different classifiers. The function consists of pairs of `idx_fit` and `idx_predict` to describe which sequences of fitting and predicting are to be expected. Parameters ---------- idx_fit : array-like of shape (n_fit_samples) Indices of samples in `X` that will be used to fit the classifier. idx_pred : array-like of shape (n_predict_samples) Indices of samples in `X` that the classifier will predict for. fit_params : string, optional (default='all') Parameter to specify if only a subset of the `idx_fit` indices will be used later. Can be of value 'all', 'labeled', or 'unlabeled'. pred_params : string, optional (default='all') Parameter to specify if only a subset of the `idx_predict` indices will be used later. Can be of value 'all', 'labeled', or 'unlabeled'. """ idx_fit = check_array( idx_fit, ensure_2d=False, dtype=int, input_name="`idx_fit`" ) idx_fit = check_indices(idx_fit, self.X, dim=0) idx_pred = check_array( idx_pred, ensure_2d=False, dtype=int, input_name="`idx_pred`" ) idx_pred = check_indices(idx_pred, self.X, dim=0) # precompute ParzenWindowClassifier if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if fit_params == "all": idx_fit_ = idx_fit elif fit_params == "labeled": idx_fit_ = idx_fit[ is_labeled( self.y[idx_fit], missing_label=self.missing_label_ ) ] elif fit_params == "unlabeled": idx_fit_ = idx_fit[ is_unlabeled( self.y[idx_fit], missing_label=self.missing_label_ ) ] else: raise ValueError(f"`fit_params`== {fit_params} not defined") if pred_params == "all": idx_pred_ = idx_pred elif pred_params == "labeled": idx_pred_ = idx_pred[ is_labeled( self.y[idx_pred], missing_label=self.missing_label_ ) ] elif pred_params == "unlabeled": idx_pred_ = idx_pred[ is_unlabeled( self.y[idx_pred], missing_label=self.missing_label_ ) ] else: raise ValueError(f"`pred_params`== {pred_params} not defined") if len(idx_fit_) > 0 and len(idx_pred_) > 0: self.pwc_K_[np.ix_(idx_fit_, idx_pred_)] = pairwise_kernels( self.X[idx_fit_], self.X[idx_pred_], self.pwc_metric_, **self.pwc_metric_dict_, ) def fit(self, idx, y=None, sample_weight=None, set_base_clf=False): """Fit the model using `self.X[idx]` as training data and `self.y[idx]` as class labels. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that will be used to fit the classifier. y : array-like of shape (n_sub_samples), optional (default=None) Class labels of the training samples corresponding to `X[idx]`. Missing labels are represented the attribute 'missing_label'. If None, labels passed in the `init` will be used. sample_weight: array-like of shape (n_sub_samples), optional (default=None) Weights of training samples in `X[idx]`. If None, weights passed in the `init` will be used. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier Returns ------- self: IndexClassifierWrapper, The fitted IndexClassifierWrapper. """ # check idx idx = check_array(idx, ensure_2d=False, dtype=int, input_name="`idx`") idx = check_indices( idx, self.X, dim=0, unique=self.enforce_unique_samples ) # check set_base_clf check_type(set_base_clf, "set_base_clf", bool) # check y if y is None: y = self.y[idx] if is_unlabeled(y, missing_label=self.missing_label_).all(): warnings.warn("All labels are of `missing_label` in `fit`.") else: y = check_array( y, ensure_2d=False, force_all_finite=False, dtype=self.y.dtype, input_name="`y`", ) check_consistent_length(idx, y) # check sample_weight if sample_weight is None: sample_weight = self._copy_sw( self._get_sw(self.sample_weight, idx=idx) ) # TODO deepcopy else: sample_weight = check_array( sample_weight, ensure_2d=False, input_name="`sample_weight`" ) check_consistent_length(sample_weight, y) # check if a clf_ exists if "clf_" not in self.__dict__: self.clf_ = clone(self.clf) # fit classifier self.clf_.fit(self.X[idx], y, sample_weight) # store data for further processing if not self.use_partial_fit: self.idx_ = idx self.y_ = y self.sample_weight_ = sample_weight # set base clf if necessary if set_base_clf: self.base_clf_ = deepcopy(self.clf_) if not self.use_partial_fit: self.base_idx_ = self.idx_.copy() self.base_y_ = self.y_.copy() self.base_sample_weight_ = self._copy_sw(self.sample_weight_) return self def partial_fit( self, idx, y=None, sample_weight=None, use_base_clf=False, set_base_clf=False, ): """Update the fitted model using additional samples in `self.X[idx]` and y as class labels. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that will be used to fit the classifier. y : array-like of shape (n_sub_samples), optional (default=None) Class labels of the training samples corresponding to `X[idx]`. Missing labels are represented the attribute 'missing_label'. sample_weight: array-like of shape (n_sub_samples), optional (default=None) Weights of training samples in `X[idx]`. use_base_clf : bool, default=False If True, the base classifier will be used to update the fit instead of the current classifier. Here, it is necessary that the base classifier has been set once. set_base_clf : bool, default=False If True, the base classifier will be set to the newly fitted classifier. Returns ------- self: IndexClassifierWrapper, The fitted IndexClassifierWrapper. """ # check idx add_idx = check_array( idx, ensure_2d=False, dtype=int, input_name="`add_idx`" ) add_idx = check_indices( add_idx, self.X, dim=0, unique=self.enforce_unique_samples ) # check use_base_clf check_type(use_base_clf, "use_base_clf", bool) if use_base_clf: if not self.is_fitted(base_clf=True): raise NotFittedError( "Base classifier is not set. Please use " "`set_base_clf=True` in `__init__`, `fit`, or " "`partial_fit`." ) else: if not self.is_fitted(base_clf=False): raise NotFittedError( "Classifier is not fitted. Please `fit` before using " "`partial_fit`." ) # check set_base_clf check_type(set_base_clf, "set_base_clf", bool) # check y if y is None: add_y = self.y[add_idx] if is_unlabeled(add_y, missing_label=self.missing_label_).all(): warnings.warn( "All labels are of `missing_label` in " "`partial_fit`." ) else: add_y = check_array( y, ensure_2d=False, force_all_finite=False, dtype=self.y.dtype, input_name="`y`", ) check_consistent_length(add_idx, add_y) # check sample_weight if sample_weight is None: add_sample_weight = self._copy_sw( self._get_sw(self.sample_weight, idx=add_idx) ) else: add_sample_weight = check_array( sample_weight, ensure_2d=False, input_name="`sample_weight`" ) check_consistent_length(add_idx, add_sample_weight) # handle case when partial fit of clf is used if self.use_partial_fit: if use_base_clf: self.clf_ = deepcopy(self.base_clf_) # partial fit clf self.clf_.partial_fit(self.X[add_idx], add_y, add_sample_weight) if set_base_clf: self.base_clf_ = deepcopy(self.clf_) # handle case using regular fit from clf else: if not hasattr(self, "idx_"): raise NotFittedError( "Fitted classifier from `init` cannot be " "used for `partial_fit` as it is unknown " "where it has been fitted on." ) if use_base_clf: self.clf_ = clone(self.base_clf_) self.idx_ = self.base_idx_.copy() self.y_ = self.base_y_.copy() self.sample_weight_ = self._copy_sw(self.base_sample_weight_) if self.enforce_unique_samples: cur_idx = np.array([i not in add_idx for i in self.idx_]) else: cur_idx = np.arange(len(self.idx_)) self.idx_ = np.concatenate([self.idx_[cur_idx], add_idx], axis=0) self.y_ = np.concatenate([self.y_[cur_idx], add_y], axis=0) self.sample_weight_ = self._concat_sw( self._get_sw(self.sample_weight_, cur_idx), add_sample_weight ) self.fit( self.idx_, y=self.y_, sample_weight=self.sample_weight_, set_base_clf=set_base_clf, ) return self def predict(self, idx): """Return class label predictions for the input data `X[idx]`. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- y : array-like, shape (n_sub_samples) Predicted class labels of the input samples. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict(P) else: return self.clf_.predict(self.X[idx]) def predict_proba(self, idx): """Return probability estimates for the input data `X[idx]`. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- P : array-like, shape (n_sub_samples, classes) The class probabilities of the input samples. Classes are ordered by lexicographic order. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict_proba(P) else: return self.clf_.predict_proba(self.X[idx]) def predict_freq(self, idx): """Return class frequency estimates for the input samples 'X[idx]'. Parameters ---------- idx : array-like of shape (n_sub_samples) Indices of samples in `X` that are to be predicted. Returns ------- F: array-like of shape (n_sub_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ if isinstance(self.clf, ParzenWindowClassifier) and self.use_speed_up: if hasattr(self, "idx_"): P = self.pwc_K_[self.idx_, :][:, idx].T else: warnings.warn("Speed-up not possible when prefitted") return self.clf.predict_proba(self.X[idx]) # check if results contain NAN if np.isnan(P).any(): raise ValueError( "Error in defining what should be " "pre-computed in ParzenWindowClassifier. " "Not all necessary " "information is available which results in " "NaNs in `predict_proba`." ) return self.clf_.predict_freq(P) else: return self.clf_.predict_freq(self.X[idx]) def is_fitted(self, base_clf=False): """Returns if the classifier (resp. the base classifier) is fitted. Parameters ---------- base_clf : bool, default=False If True, the result will describe if the base classifier is fitted. Returns ------- is_fitted : boolean Boolean describing if the classifier is fitted. """ clf = "base_clf_" if base_clf else "clf_" if clf in self.__dict__: return hasattr(getattr(self, clf), "classes_") else: return False def __getattr__(self, item): if "clf_" in self.__dict__ and hasattr(self.clf_, item): return getattr(self.clf_, item) else: return getattr(self.clf, item) def _get_sw(self, sample_weight, idx=None): if sample_weight is None: return None else: return sample_weight[idx] def _copy_sw(self, sample_weight): if sample_weight is None: return None else: return sample_weight.copy() def _concat_sw(self, sample_weight, sample_weight_add): if sample_weight is None and sample_weight_add is None: return None if sample_weight is not None and sample_weight_add is not None: return np.concatenate([sample_weight, sample_weight_add], axis=0) else: raise ValueError( "All `sample_weight` must be either None or " "given." ) def _cross_entropy( X_eval, true_reg, other_reg, integration_dict=None, random_state=None ): """Calculates the cross entropy. Parameters ---------- X_eval : array-like of shape (n_samples, n_features) The samples where the cross entropy should be evaluated. true_reg: ProbabilisticRegressor True distribution of the cross entropy. other_reg: ProbabilisticRegressor Evaluated distribution of the cross entropy. integration_dict: dict, optional default = None Dictionary for integration arguments, i.e. `integration method` etc.. For details see method `conditional_expect`. random_state : int | np.random.RandomState, optional Random state for cross entropy calculation. Returns ------- cross_ent : numpy.ndarray of shape (n_samples) The cross entropy. """ if integration_dict is None: integration_dict = {} check_type(integration_dict, "integration_dict", dict) check_type(true_reg, "true_reg", ProbabilisticRegressor) check_type(other_reg, "other_reg", ProbabilisticRegressor) random_state = check_random_state(random_state) dist = _reshape_scipy_dist( other_reg.predict_target_distribution(X_eval), shape=(len(X_eval), 1) ) cross_ent = -expected_target_val( X_eval, dist.logpdf, reg=true_reg, random_state=random_state, **integration_dict, vector_func="both", ) return cross_ent def _update_reg( reg, X, y, y_update, sample_weight=None, idx_update=None, X_update=None, mapping=None, ): """Update the regressor by the updating samples, depending on the mapping. Chooses `X_update` if `mapping is None` and updates `X[mapping[idx_update]]` otherwise. Parameters ---------- reg : SkactivemlRegressor The regressor to be updated. X : array-like of shape (n_samples, n_features) Training data set. y : array-like of shape (n_samples) Labels of the training data set. y_update : array-like of shape (n_updates) or numeric Updating labels or updating label. sample_weight : array-like of shape (n_samples), optional (default = None) Sample weight of the training data set. If idx_update : array-like of shape (n_updates) or int Index of the samples or sample to be updated. X_update : array-like of shape (n_updates, n_features) or (n_features) Samples to be updated or sample to be updated. mapping : array-like of shape (n_candidates), optional (default = None) The deciding mapping. Returns ------- reg_new : SkaktivemlRegressor The updated regressor. """ if sample_weight is not None and mapping is None: raise ValueError( "If `sample_weight` is not `None` a mapping " "between candidates and the training dataset must " "exist." ) if mapping is not None: if isinstance(idx_update, (int, np.integer)): check_indices([idx_update], A=mapping, unique="check_unique") else: check_indices(idx_update, A=mapping, unique="check_unique") X_new, y_new = _update_X_y( X, y, y_update, idx_update=mapping[idx_update] ) else: X_new, y_new = _update_X_y(X, y, y_update, X_update=X_update) reg_new = clone(reg).fit(X_new, y_new, sample_weight) return reg_new def _update_X_y(X, y, y_update, idx_update=None, X_update=None): """Update the training data by the updating samples/labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set. y : array-like of shape (n_samples) Labels of the training data set. idx_update : array-like of shape (n_updates) or int Index of the samples or sample to be updated. X_update : array-like of shape (n_updates, n_features) or (n_features) Samples to be updated or sample to be updated. y_update : array-like of shape (n_updates) or numeric Updating labels or updating label. Returns ------- X_new : np.ndarray of shape (n_new_samples, n_features) The new training data set. y_new : np.ndarray of shape (n_new_samples) The new labels. """ X = check_array(X, input_name="`X`") y = column_or_1d( check_array( y, force_all_finite=False, ensure_2d=False, input_name="`y`" ) ) check_consistent_length(X, y) if isinstance(y_update, (int, float)): y_update = np.array([y_update]) else: y_update = check_array( y_update, force_all_finite=False, ensure_2d=False, ensure_min_samples=0, input_name="`y`", ) y_update = column_or_1d(y_update) if idx_update is not None: if isinstance(idx_update, (int, np.integer)): idx_update = np.array([idx_update]) idx_update = check_indices(idx_update, A=X, unique="check_unique") check_consistent_length(y_update, idx_update) X_new = X.copy() y_new = y.copy() y_new[idx_update] = y_update return X_new, y_new elif X_update is not None: X_update = check_array( X_update, ensure_2d=False, input_name="`X_update`" ) if X_update.ndim == 1: X_update = X_update.reshape(1, -1) check_consistent_length(X.T, X_update.T) check_consistent_length(y_update, X_update) X_new = np.append(X, X_update, axis=0) y_new = np.append(y, y_update, axis=0) return X_new, y_new else: raise ValueError("`idx_update` or `X_update` must not be `None`") def _reshape_scipy_dist(dist, shape): """Reshapes the parameters "loc", "scale", "df" of a distribution, if they exist. Parameters ---------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution. shape : tuple The new shape. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The reshaped distribution. """ check_type(dist, "dist", scipy.stats._distn_infrastructure.rv_frozen) check_type(shape, "shape", tuple) for idx, item in enumerate(shape): check_type(item, f"shape[{idx}]", int) for argument in ["loc", "scale", "df"]: if argument in dist.kwds: # check if shapes are compatible dist.kwds[argument].shape = shape return dist def expected_target_val(X, target_func, reg, **kwargs): """Calculates the conditional expectation of a function depending only on the target value for each sample in `X`, i.e. E[target_func(Y)|X=x], where Y | X=x ~ reg.predict_target_distribution, for x in `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples where the expectation should be evaluated. target_func : callable The function that transforms the random variable. reg: ProbabilisticRegressor Predicts the target distribution over which the expectation is calculated. Other Parameters ---------------- method: string, optional, optional (default='gauss_hermite') The method by which the expectation is computed. -'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and thereby only takes the function value at the expected y value. -'monte_carlo' Basic monte carlo integration. Taking the average of randomly drawn samples. `n_integration_samples` specifies the number of monte carlo samples. -'quantile' Uses the quantile function to transform the integration space into the interval from 0 to 1 and than uses the method from 'quantile_method' to calculate the integral. The number of integration points is specified by `n_integration_samples`. -'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X to be gaussian distributed. The number of evaluation points is given by `n_integration_samples`. -'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous` random variable of `reg`, which in turn uses a dynamic gaussian quadrature routine for calculating the integral. Performance is worse using a vector function. quantile_method: string, optional (default='quadrature') Specifies the integration methods used after the quantile transformation. -'trapezoid' Trapezoidal method for integration using evenly spaced samples. -'simpson' Simpson method for integration using evenly spaced samples. -'average' Taking the average value for integration using evenly spaced samples. -'romberg' Romberg method for integration. If `n_integration_samples` is not equal to `2**k + 1` for a natural number k, the number of samples used for integration is put to the smallest such number greater than `n_integration_samples`. -'quadrature' Gaussian quadrature method for integration. n_integration_samples: int, optional (default=10) The number of integration samples used in 'quantile', 'monte_carlo' and 'gauss-hermite'. quad_dict: dict, optional (default=None) Further arguments for using `scipy's` `expect` random_state : int | np.random.RandomState, optional (default=None) Random state for fixing the number generation. target_func : bool If `True` only the target values will be passed to `func`. vector_func : bool or str, optional (default=False) If `vector_func` is `True`, the integration values are passed as a whole to the function `func`. If `vector_func` is 'both', the integration values might or might not be passed as a whole. The integration values if passed as a whole are of the form (n_samples, n_integration), where n_integration denotes the number of integration values. Returns ------- expectation : numpy.ndarray of shape (n_samples) The conditional expectation for each value applied. """ _check_callable(target_func, "target_func", n_positional_parameters=1) def arg_filtered_func(idx_y, x_y, y): return target_func(y) return _conditional_expect(X, arg_filtered_func, reg, **kwargs) def _conditional_expect( X, func, reg, method=None, quantile_method=None, n_integration_samples=10, quad_dict=None, random_state=None, vector_func=False, ): """Calculates the conditional expectation of a function depending on the target value the corresponding feature value and an index for each sample in `X`, i.e. E[func(Y, x, idx)|X=x], where Y | X=x ~ reg.predict_target_distribution, for x in `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples where the expectation should be evaluated. func : callable The function that transforms the random variable. The signature of the function must be of the form `func(y, x, idx)`, where `y` is the target value, `x` is the feature value and `idx` is such that `X[idx] = x`. reg: ProbabilisticRegressor Predicts the target distribution over which the expectation is calculated. method: string, optional, optional (default='gauss_hermite') The method by which the expectation is computed. -'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and thereby only takes the function value at the expected y value. -'monte_carlo' Basic monte carlo integration. Taking the average of randomly drawn samples. `n_integration_samples` specifies the number of monte carlo samples. -'quantile' Uses the quantile function to transform the integration space into the interval from 0 to 1 and than uses the method from 'quantile_method' to calculate the integral. The number of integration points is specified by `n_integration_samples`. -'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X to be gaussian distributed. The number of evaluation points is given by `n_integration_samples`. -'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous` random variable of `reg`, which in turn uses a dynamic gaussian quadrature routine for calculating the integral. Performance is worse using a vector function. quantile_method: string, optional (default='quadrature') Specifies the integration methods used after the quantile transformation. -'trapezoid' Trapezoidal method for integration using evenly spaced samples. -'simpson' Simpson method for integration using evenly spaced samples. -'average' Taking the average value for integration using evenly spaced samples. -'romberg' Romberg method for integration. If `n_integration_samples` is not equal to `2**k + 1` for a natural number k, the number of samples used for integration is put to the smallest such number greater than `n_integration_samples`. -'quadrature' Gaussian quadrature method for integration. n_integration_samples: int, optional (default=10) The number of integration samples used in 'quantile', 'monte_carlo' and 'gauss-hermite'. quad_dict: dict, optional (default=None) Further arguments for using `scipy's` `expect` random_state : int | np.random.RandomState, optional (default=None) Random state for fixing the number generation. vector_func : bool or str, optional (default=False) If `vector_func` is `True`, the integration values are passes in vectorized form to `func`. If `vector_func` is 'both', the integration values might or might not be passed in vectorized form, depending what is more efficient. The integration values are passed in vectorized form, means that in a call like `func(y, x, idx)` `y` is of the form (n_samples, n_integration_samples), `x` equals `X` and `idx` is an index map of `X. Returns ------- expectation : numpy.ndarray of shape (n_samples) The conditional expectation for each value applied. """ X = check_array(X, allow_nd=True, input_name="`X`") check_type(reg, "reg", ProbabilisticRegressor) check_type( method, "method", target_vals=[ "monte_carlo", "assume_linear", "dynamic_quad", "gauss_hermite", "quantile", None, ], ) check_type( quantile_method, "quantile_method", target_vals=[ "trapezoid", "simpson", "average", "romberg", "quadrature", None, ], ) check_scalar(n_integration_samples, "n_monte_carlo", int, min_val=1) check_type(quad_dict, "scipy_args", dict, target_vals=[None]) check_type(vector_func, "vector_func", bool, target_vals=["both"]) _check_callable(func, "func", n_positional_parameters=3) if method is None: method = "gauss_hermite" if quantile_method is None: quantile_method = "quadrature" if quad_dict is None: quad_dict = {} if method == "quantile" and quantile_method == "romberg": # n_integration_samples need to be of the form 2**k + 1 n_integration_samples = ( 2 ** int(np.log2(n_integration_samples) + 1) + 1 ) is_optional = vector_func == "both" if is_optional: vector_func = True random_state = check_random_state(random_state) def evaluate_func(inner_potential_y): if vector_func: inner_output = func(np.arange(len(X)), X, inner_potential_y) else: inner_output = np.zeros_like(inner_potential_y) for idx_x, inner_x in enumerate(X): for idx_y, y_val in enumerate(inner_potential_y[idx_x]): inner_output[idx_x, idx_y] = func(idx_x, inner_x, y_val) return inner_output expectation = np.zeros(len(X)) if method in ["assume_linear", "monte_carlo"]: if method == "assume_linear": potential_y = reg.predict(X).reshape(-1, 1) else: # method equals "monte_carlo" potential_y = reg.sample_y( X=X, n_samples=n_integration_samples, random_state=random_state, ) expectation = np.average(evaluate_func(potential_y), axis=1) elif method == "quantile": if quantile_method in ["trapezoid", "simpson", "average", "romberg"]: eval_points = np.arange(1, n_integration_samples + 1) / ( n_integration_samples + 1 ) cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) potential_y = cond_dist.ppf(eval_points.reshape(1, -1)) output = evaluate_func(potential_y) if quantile_method == "trapezoid": expectation = integrate.trapezoid( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "simpson": expectation = integrate.simpson( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "average": expectation = np.average(output, axis=-1) else: # quantile_method equals "romberg" expectation = integrate.romb( output, dx=1 / n_integration_samples, axis=1 ) else: # quantile_method equals "quadrature" def fixed_quad_function_wrapper(inner_eval_points): inner_cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) inner_potential_y = inner_cond_dist.ppf( inner_eval_points.reshape(1, -1) ) return evaluate_func(inner_potential_y) expectation, _ = integrate.fixed_quad( fixed_quad_function_wrapper, 0, 1, n=n_integration_samples ) elif method == "gauss_hermite": unscaled_potential_y, weights = roots_hermitenorm( n_integration_samples ) cond_mean, cond_std = reg.predict(X, return_std=True) potential_y = ( cond_std[:, np.newaxis] * unscaled_potential_y[np.newaxis, :] + cond_mean[:, np.newaxis] ) output = evaluate_func(potential_y) expectation = ( 1 / (2 * np.pi) ** (1 / 2) * np.sum(weights[np.newaxis, :] * output, axis=1) ) else: # method equals "dynamic_quad" for idx, x in enumerate(X): cond_dist = reg.predict_target_distribution([x]) def quad_function_wrapper(y): if is_optional or not vector_func: return func(idx, x, y) else: return func(np.arange(len(X)), X, np.full((len(X), 1), y))[ idx ] expectation[idx] = cond_dist.expect( quad_function_wrapper, **quad_dict, ) return expectation
0.781956
0.437523
import numpy as np from sklearn import clone from sklearn.utils import check_array from skactiveml.base import ( SingleAnnotatorPoolQueryStrategy, ProbabilisticRegressor, ) from skactiveml.pool.utils import ( _update_reg, _conditional_expect, _cross_entropy, ) from skactiveml.utils import ( check_type, simple_batch, MISSING_LABEL, is_unlabeled, ) class KLDivergenceMaximization(SingleAnnotatorPoolQueryStrategy): """Regression based Kullback Leibler Divergence Maximization. This class implements a query strategy, which selects those samples that maximize the expected kullback leibler divergence, where it is assumed that the target probabilities for different samples are independent. Parameters ---------- integration_dict_target_val : dict, optional (default=None) Dictionary for integration arguments, i.e. `integration method` etc., used for calculating the expected `y` value for the candidate samples. For details see method `skactiveml.pool.utils._conditional_expect`. integration_dict_cross_entropy : dict, optional (default=None) Dictionary for integration arguments, i.e. `integration method` etc., used for calculating the cross entropy between the updated conditional estimator by the `X_cand` value and the old conditional estimator. For details see method `conditional_expect`. missing_label : scalar or string or np.nan or None, (default=skactiveml.utils.MISSING_LABEL) Value to represent a missing label. random_state : int | np.random.RandomState, optional (default=None) Random state for candidate selection. References ---------- [1] Elreedy, Dina and F Atiya, Amir and I Shaheen, Samir. A novel active learning regression framework for balancing the exploration-exploitation trade-off, page 651 and subsequently, 2019. """ def __init__( self, integration_dict_target_val=None, integration_dict_cross_entropy=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.integration_dict_target_val = integration_dict_target_val self.integration_dict_cross_entropy = integration_dict_cross_entropy def query( self, X, y, reg, fit_reg=True, sample_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). reg : ProbabilisticRegressor Predicts the entropy and the cross entropy and the potential y-values for the candidate samples. fit_reg : bool, optional (default=True) Defines whether the regressor should be fitted on `X`, `y`, and `sample_weight`. sample_weight : array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) check_type(reg, "reg", ProbabilisticRegressor) check_type(fit_reg, "fit_reg", bool) X_eval = X[is_unlabeled(y, missing_label=self.missing_label_)] if len(X_eval) == 0: raise ValueError( "The training data contains no unlabeled " "data. This can be fixed by setting the " "evaluation set manually, e.g. set " "`X_eval=X`." ) if self.integration_dict_target_val is None: self.integration_dict_target_val = {"method": "assume_linear"} if self.integration_dict_cross_entropy is None: self.integration_dict_cross_entropy = { "method": "gauss_hermite", "n_integration_samples": 10, } check_type( self.integration_dict_target_val, "self.integration_dict", dict ) check_type( self.integration_dict_cross_entropy, "self.integration_dict", dict ) X_cand, mapping = self._transform_candidates(candidates, X, y) if fit_reg: reg = clone(reg).fit(X, y, sample_weight) utilities_cand = self._kullback_leibler_divergence( X_eval, X_cand, mapping, reg, X, y, sample_weight=sample_weight ) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def _kullback_leibler_divergence( self, X_eval, X_cand, mapping, reg, X, y, sample_weight=None ): """Calculates the expected kullback leibler divergence over the evaluation set if each candidate sample where to be labeled. Parameters ---------- X_eval : array-like of shape (n_samples, n_features) The samples where the information gain should be evaluated. X_cand : array-like of shape (n_candidate_samples, n_features) The candidate samples that determine the information gain. mapping : array-like of shape (n_candidate_samples,) or None A mapping between `X_cand` and `X` if it exists. reg: ProbabilisticRegressor Predicts the entropy, predicts values. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). sample_weight: array-like of shape (n_samples,), optional (default=None) Weights of training samples in `X`. Returns ------- kl_div : numpy.ndarray of shape (n_candidate_samples) The calculated expected kullback leibler divergence. """ def new_kl_divergence(idx, x_cand, y_pot): reg_new = _update_reg( reg, X, y, sample_weight=sample_weight, y_update=y_pot, idx_update=idx, X_update=x_cand, mapping=mapping, ) entropy_post = np.sum( reg_new.predict(X_eval, return_entropy=True)[1] ) cross_ent = np.sum( _cross_entropy( X_eval, reg_new, reg, integration_dict=self.integration_dict_cross_entropy, random_state=self.random_state_, ) ) return cross_ent - entropy_post kl_div = _conditional_expect( X_cand, new_kl_divergence, reg, random_state=self.random_state_, **self.integration_dict_target_val ) return kl_div
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/_information_gain_maximization.py
_information_gain_maximization.py
import numpy as np from sklearn import clone from sklearn.utils import check_array from skactiveml.base import ( SingleAnnotatorPoolQueryStrategy, ProbabilisticRegressor, ) from skactiveml.pool.utils import ( _update_reg, _conditional_expect, _cross_entropy, ) from skactiveml.utils import ( check_type, simple_batch, MISSING_LABEL, is_unlabeled, ) class KLDivergenceMaximization(SingleAnnotatorPoolQueryStrategy): """Regression based Kullback Leibler Divergence Maximization. This class implements a query strategy, which selects those samples that maximize the expected kullback leibler divergence, where it is assumed that the target probabilities for different samples are independent. Parameters ---------- integration_dict_target_val : dict, optional (default=None) Dictionary for integration arguments, i.e. `integration method` etc., used for calculating the expected `y` value for the candidate samples. For details see method `skactiveml.pool.utils._conditional_expect`. integration_dict_cross_entropy : dict, optional (default=None) Dictionary for integration arguments, i.e. `integration method` etc., used for calculating the cross entropy between the updated conditional estimator by the `X_cand` value and the old conditional estimator. For details see method `conditional_expect`. missing_label : scalar or string or np.nan or None, (default=skactiveml.utils.MISSING_LABEL) Value to represent a missing label. random_state : int | np.random.RandomState, optional (default=None) Random state for candidate selection. References ---------- [1] Elreedy, Dina and F Atiya, Amir and I Shaheen, Samir. A novel active learning regression framework for balancing the exploration-exploitation trade-off, page 651 and subsequently, 2019. """ def __init__( self, integration_dict_target_val=None, integration_dict_cross_entropy=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.integration_dict_target_val = integration_dict_target_val self.integration_dict_cross_entropy = integration_dict_cross_entropy def query( self, X, y, reg, fit_reg=True, sample_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). reg : ProbabilisticRegressor Predicts the entropy and the cross entropy and the potential y-values for the candidate samples. fit_reg : bool, optional (default=True) Defines whether the regressor should be fitted on `X`, `y`, and `sample_weight`. sample_weight : array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) check_type(reg, "reg", ProbabilisticRegressor) check_type(fit_reg, "fit_reg", bool) X_eval = X[is_unlabeled(y, missing_label=self.missing_label_)] if len(X_eval) == 0: raise ValueError( "The training data contains no unlabeled " "data. This can be fixed by setting the " "evaluation set manually, e.g. set " "`X_eval=X`." ) if self.integration_dict_target_val is None: self.integration_dict_target_val = {"method": "assume_linear"} if self.integration_dict_cross_entropy is None: self.integration_dict_cross_entropy = { "method": "gauss_hermite", "n_integration_samples": 10, } check_type( self.integration_dict_target_val, "self.integration_dict", dict ) check_type( self.integration_dict_cross_entropy, "self.integration_dict", dict ) X_cand, mapping = self._transform_candidates(candidates, X, y) if fit_reg: reg = clone(reg).fit(X, y, sample_weight) utilities_cand = self._kullback_leibler_divergence( X_eval, X_cand, mapping, reg, X, y, sample_weight=sample_weight ) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def _kullback_leibler_divergence( self, X_eval, X_cand, mapping, reg, X, y, sample_weight=None ): """Calculates the expected kullback leibler divergence over the evaluation set if each candidate sample where to be labeled. Parameters ---------- X_eval : array-like of shape (n_samples, n_features) The samples where the information gain should be evaluated. X_cand : array-like of shape (n_candidate_samples, n_features) The candidate samples that determine the information gain. mapping : array-like of shape (n_candidate_samples,) or None A mapping between `X_cand` and `X` if it exists. reg: ProbabilisticRegressor Predicts the entropy, predicts values. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). sample_weight: array-like of shape (n_samples,), optional (default=None) Weights of training samples in `X`. Returns ------- kl_div : numpy.ndarray of shape (n_candidate_samples) The calculated expected kullback leibler divergence. """ def new_kl_divergence(idx, x_cand, y_pot): reg_new = _update_reg( reg, X, y, sample_weight=sample_weight, y_update=y_pot, idx_update=idx, X_update=x_cand, mapping=mapping, ) entropy_post = np.sum( reg_new.predict(X_eval, return_entropy=True)[1] ) cross_ent = np.sum( _cross_entropy( X_eval, reg_new, reg, integration_dict=self.integration_dict_cross_entropy, random_state=self.random_state_, ) ) return cross_ent - entropy_post kl_div = _conditional_expect( X_cand, new_kl_divergence, reg, random_state=self.random_state_, **self.integration_dict_target_val ) return kl_div
0.934447
0.677541
import numpy as np from ..base import SingleAnnotatorPoolQueryStrategy from ..utils import MISSING_LABEL, simple_batch class RandomSampling(SingleAnnotatorPoolQueryStrategy): """Random Sampling. This class implements random sampling Parameters ---------- missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance, default=None Random state for candidate selection. """ def __init__(self, missing_label=MISSING_LABEL, random_state=None): super().__init__( missing_label=missing_label, random_state=random_state ) def query( self, X, y, candidates=None, batch_size=1, return_utilities=False ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) if mapping is None: utilities = self.random_state_.random_sample(len(X_cand)) else: utilities = np.full(len(X), np.nan) utilities[mapping] = self.random_state_.random_sample(len(mapping)) return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/_random_sampling.py
_random_sampling.py
import numpy as np from ..base import SingleAnnotatorPoolQueryStrategy from ..utils import MISSING_LABEL, simple_batch class RandomSampling(SingleAnnotatorPoolQueryStrategy): """Random Sampling. This class implements random sampling Parameters ---------- missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance, default=None Random state for candidate selection. """ def __init__(self, missing_label=MISSING_LABEL, random_state=None): super().__init__( missing_label=missing_label, random_state=random_state ) def query( self, X, y, candidates=None, batch_size=1, return_utilities=False ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) if mapping is None: utilities = self.random_state_.random_sample(len(X_cand)) else: utilities = np.full(len(X), np.nan) utilities[mapping] = self.random_state_.random_sample(len(mapping)) return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, )
0.93206
0.557484
import copy import numpy as np from sklearn import clone from sklearn.utils.validation import check_array, check_is_fitted from ..base import ( SingleAnnotatorPoolQueryStrategy, SkactivemlClassifier, SkactivemlRegressor, ) from ..utils import ( simple_batch, check_type, compute_vote_vectors, MISSING_LABEL, check_equal_missing_label, ) class QueryByCommittee(SingleAnnotatorPoolQueryStrategy): """Query-by-Committee. The Query-by-Committee (QueryByCommittee) strategy uses an ensemble of estimators to identify on which instances many estimators disagree. Parameters ---------- method : string, default='KL_divergence' The method to calculate the disagreement in the case of classification. KL_divergence or vote_entropy are possible. In the case of regression the empirical variance is used. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or np.random.RandomState, default=None The random state to use. References ---------- [1] H.S. Seung, M. Opper, and H. Sompolinsky. Query by committee. In Proceedings of the ACM Workshop on Computational Learning Theory, pages 287-294, 1992. [2] N. Abe and H. Mamitsuka. Query learning strategies using boosting and bagging. In Proceedings of the International Conference on Machine Learning (ICML), pages 1-9. Morgan Kaufmann, 1998. [3] Burbidge, Robert and Rowland, Jem J and King, Ross D. Active learning for regression based on query by committee. International conference on intelligent data engineering and automated learning, pages 209--218, 2007. """ def __init__( self, method="KL_divergence", missing_label=MISSING_LABEL, random_state=None, ): super().__init__( missing_label=missing_label, random_state=random_state ) self.method = method def query( self, X, y, ensemble, fit_ensemble=True, sample_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL.) ensemble : list or tuple of SkactivemlClassifier or list or tuple of SkactivemlRegressor, SkactivemlClassifier or SkactivemlRegressor If `ensemble` is a `SkactivemlClassifier` or a `SkactivemlRegressor` , it must have `n_estimators` and `estimators_` after fitting as attribute. Then, its estimators will be used as committee. If `ensemble` is array-like, each element of this list must be `SkactivemlClassifier` or a `SkactivemlRegressor` and will be used as committee member. fit_ensemble : bool, default=True Defines whether the ensemble should be fitted on `X`, `y`, and `sample_weight`. sample_weight: array-like of shape (n_samples), default=None Weights of training samples in `X`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), default=None If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, default=1 The number of samples to be selected in one AL cycle. return_utilities : bool, default=False If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ # Validate input parameters. X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) # Validate classifier type. check_type(fit_ensemble, "fit_ensemble", bool) ensemble, est_arr, classes = _check_ensemble( ensemble=ensemble, X=X, y=y, sample_weight=sample_weight, fit_ensemble=fit_ensemble, missing_label=self.missing_label_, estimator_types=[SkactivemlClassifier, SkactivemlRegressor], ) # Validate 'method' check_type( self.method, "method", target_vals=["KL_divergence", "vote_entropy"], ) # classes is None if the ensemble is a regressor if classes is not None: # Compute utilities. if self.method == "KL_divergence": probas = np.array( [est.predict_proba(X_cand) for est in est_arr] ) utilities_cand = average_kl_divergence(probas) else: # self.method == "vote_entropy": votes = np.array([est.predict(X_cand) for est in est_arr]).T utilities_cand = vote_entropy(votes, classes) else: results = np.array( [learner.predict(X_cand) for learner in est_arr] ) utilities_cand = np.std(results, axis=0) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def average_kl_divergence(probas): """Calculates the average Kullback-Leibler (KL) divergence for measuring the level of disagreement in QueryByCommittee. Parameters ---------- probas : array-like, shape (n_estimators, n_samples, n_classes) The probability estimates of all estimators, samples, and classes. Returns ------- scores: np.ndarray, shape (n_samples) The Kullback-Leibler (KL) divergences. References ---------- [1] A. McCallum and K. Nigam. Employing EM in pool-based active learning for text classification. In Proceedings of the International Conference on Machine Learning (ICML), pages 359-367. Morgan Kaufmann, 1998. """ # Check probabilities. probas = check_array(probas, allow_nd=True) if probas.ndim != 3: raise ValueError( f"Expected 3D array, got {probas.ndim}D array instead." ) n_estimators = probas.shape[0] # Calculate the average KL divergence. probas_mean = np.mean(probas, axis=0) with np.errstate(divide="ignore", invalid="ignore"): scores = np.nansum( np.nansum(probas * np.log(probas / probas_mean), axis=2), axis=0 ) scores = scores / n_estimators return scores def vote_entropy(votes, classes): """Calculates the vote entropy for measuring the level of disagreement in QueryByCommittee. Parameters ---------- votes : array-like, shape (n_samples, n_estimators) The class predicted by the estimators for each sample. classes : array-like, shape (n_classes) A list of all possible classes. Returns ------- vote_entropy : np.ndarray, shape (n_samples) The vote entropy of each row in `votes`. References ---------- [1] Engelson, Sean P., and Ido Dagan. Minimizing manual annotation cost in supervised training from corpora. arXiv preprint cmp-lg/9606030 (1996). """ # Check `votes` array. votes = check_array(votes) n_estimators = votes.shape[1] # Count the votes. vote_count = compute_vote_vectors( y=votes, classes=classes, missing_label=None ) # Compute vote entropy. v = vote_count / n_estimators with np.errstate(divide="ignore", invalid="ignore"): scores = np.nansum(-v * np.log(v), axis=1) return scores def _check_ensemble( ensemble, estimator_types, X, y, sample_weight, fit_ensemble=True, missing_label=MISSING_LABEL, ): # Check if the parameter `ensemble` is valid. for estimator_type in estimator_types: if isinstance(ensemble, estimator_type) and ( hasattr(ensemble, "n_estimators") or hasattr(ensemble, "estimators") ): check_equal_missing_label(ensemble.missing_label, missing_label) # Fit the ensemble. if fit_ensemble: ensemble = clone(ensemble).fit(X, y, sample_weight) else: check_is_fitted(ensemble) if hasattr(ensemble, "estimators_"): est_arr = ensemble.estimators_ else: if hasattr(ensemble, "estimators"): n_estimators = len(ensemble.estimators) else: n_estimators = ensemble.n_estimators est_arr = [ensemble] * n_estimators if estimator_type == SkactivemlClassifier: return ensemble, est_arr, ensemble.classes_ else: return ensemble, est_arr, None elif isinstance(ensemble, (list, tuple)) and isinstance( ensemble[0], estimator_type ): est_arr = copy.deepcopy(ensemble) for i in range(len(est_arr)): check_type( est_arr[i], f"ensemble[{i}]", estimator_type ) # better error message check_equal_missing_label( est_arr[i].missing_label, missing_label ) # Fit the ensemble. if fit_ensemble: est_arr[i] = est_arr[i].fit(X, y, sample_weight) else: check_is_fitted(est_arr[i]) if i > 0 and estimator_type == SkactivemlClassifier: np.testing.assert_array_equal( est_arr[i - 1].classes_, est_arr[i].classes_, err_msg=f"The inferred classes of the {i - 1}-th and " f"{i}-th are not equal. Set the `classes` " f"parameter of each ensemble member to avoid " f"this error.", ) if estimator_type == SkactivemlClassifier: return ensemble, est_arr, est_arr[0].classes_ else: return ensemble, est_arr, None raise TypeError( f"`ensemble` must either be a `{estimator_types} " f"with the attribute `n_ensembles` and `estimators_` after " f"fitting or a list of {estimator_types} objects." )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/_query_by_committee.py
_query_by_committee.py
import copy import numpy as np from sklearn import clone from sklearn.utils.validation import check_array, check_is_fitted from ..base import ( SingleAnnotatorPoolQueryStrategy, SkactivemlClassifier, SkactivemlRegressor, ) from ..utils import ( simple_batch, check_type, compute_vote_vectors, MISSING_LABEL, check_equal_missing_label, ) class QueryByCommittee(SingleAnnotatorPoolQueryStrategy): """Query-by-Committee. The Query-by-Committee (QueryByCommittee) strategy uses an ensemble of estimators to identify on which instances many estimators disagree. Parameters ---------- method : string, default='KL_divergence' The method to calculate the disagreement in the case of classification. KL_divergence or vote_entropy are possible. In the case of regression the empirical variance is used. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or np.random.RandomState, default=None The random state to use. References ---------- [1] H.S. Seung, M. Opper, and H. Sompolinsky. Query by committee. In Proceedings of the ACM Workshop on Computational Learning Theory, pages 287-294, 1992. [2] N. Abe and H. Mamitsuka. Query learning strategies using boosting and bagging. In Proceedings of the International Conference on Machine Learning (ICML), pages 1-9. Morgan Kaufmann, 1998. [3] Burbidge, Robert and Rowland, Jem J and King, Ross D. Active learning for regression based on query by committee. International conference on intelligent data engineering and automated learning, pages 209--218, 2007. """ def __init__( self, method="KL_divergence", missing_label=MISSING_LABEL, random_state=None, ): super().__init__( missing_label=missing_label, random_state=random_state ) self.method = method def query( self, X, y, ensemble, fit_ensemble=True, sample_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Determines for which candidate samples labels are to be queried. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL.) ensemble : list or tuple of SkactivemlClassifier or list or tuple of SkactivemlRegressor, SkactivemlClassifier or SkactivemlRegressor If `ensemble` is a `SkactivemlClassifier` or a `SkactivemlRegressor` , it must have `n_estimators` and `estimators_` after fitting as attribute. Then, its estimators will be used as committee. If `ensemble` is array-like, each element of this list must be `SkactivemlClassifier` or a `SkactivemlRegressor` and will be used as committee member. fit_ensemble : bool, default=True Defines whether the ensemble should be fitted on `X`, `y`, and `sample_weight`. sample_weight: array-like of shape (n_samples), default=None Weights of training samples in `X`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), default=None If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, default=1 The number of samples to be selected in one AL cycle. return_utilities : bool, default=False If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. utilities : numpy.ndarray of shape (batch_size, n_samples) or numpy.ndarray of shape (batch_size, n_candidates) The utilities of samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. Utilities for labeled samples will be set to np.nan. If candidates is None or of shape (n_candidates), the indexing refers to samples in X. If candidates is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ # Validate input parameters. X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) # Validate classifier type. check_type(fit_ensemble, "fit_ensemble", bool) ensemble, est_arr, classes = _check_ensemble( ensemble=ensemble, X=X, y=y, sample_weight=sample_weight, fit_ensemble=fit_ensemble, missing_label=self.missing_label_, estimator_types=[SkactivemlClassifier, SkactivemlRegressor], ) # Validate 'method' check_type( self.method, "method", target_vals=["KL_divergence", "vote_entropy"], ) # classes is None if the ensemble is a regressor if classes is not None: # Compute utilities. if self.method == "KL_divergence": probas = np.array( [est.predict_proba(X_cand) for est in est_arr] ) utilities_cand = average_kl_divergence(probas) else: # self.method == "vote_entropy": votes = np.array([est.predict(X_cand) for est in est_arr]).T utilities_cand = vote_entropy(votes, classes) else: results = np.array( [learner.predict(X_cand) for learner in est_arr] ) utilities_cand = np.std(results, axis=0) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def average_kl_divergence(probas): """Calculates the average Kullback-Leibler (KL) divergence for measuring the level of disagreement in QueryByCommittee. Parameters ---------- probas : array-like, shape (n_estimators, n_samples, n_classes) The probability estimates of all estimators, samples, and classes. Returns ------- scores: np.ndarray, shape (n_samples) The Kullback-Leibler (KL) divergences. References ---------- [1] A. McCallum and K. Nigam. Employing EM in pool-based active learning for text classification. In Proceedings of the International Conference on Machine Learning (ICML), pages 359-367. Morgan Kaufmann, 1998. """ # Check probabilities. probas = check_array(probas, allow_nd=True) if probas.ndim != 3: raise ValueError( f"Expected 3D array, got {probas.ndim}D array instead." ) n_estimators = probas.shape[0] # Calculate the average KL divergence. probas_mean = np.mean(probas, axis=0) with np.errstate(divide="ignore", invalid="ignore"): scores = np.nansum( np.nansum(probas * np.log(probas / probas_mean), axis=2), axis=0 ) scores = scores / n_estimators return scores def vote_entropy(votes, classes): """Calculates the vote entropy for measuring the level of disagreement in QueryByCommittee. Parameters ---------- votes : array-like, shape (n_samples, n_estimators) The class predicted by the estimators for each sample. classes : array-like, shape (n_classes) A list of all possible classes. Returns ------- vote_entropy : np.ndarray, shape (n_samples) The vote entropy of each row in `votes`. References ---------- [1] Engelson, Sean P., and Ido Dagan. Minimizing manual annotation cost in supervised training from corpora. arXiv preprint cmp-lg/9606030 (1996). """ # Check `votes` array. votes = check_array(votes) n_estimators = votes.shape[1] # Count the votes. vote_count = compute_vote_vectors( y=votes, classes=classes, missing_label=None ) # Compute vote entropy. v = vote_count / n_estimators with np.errstate(divide="ignore", invalid="ignore"): scores = np.nansum(-v * np.log(v), axis=1) return scores def _check_ensemble( ensemble, estimator_types, X, y, sample_weight, fit_ensemble=True, missing_label=MISSING_LABEL, ): # Check if the parameter `ensemble` is valid. for estimator_type in estimator_types: if isinstance(ensemble, estimator_type) and ( hasattr(ensemble, "n_estimators") or hasattr(ensemble, "estimators") ): check_equal_missing_label(ensemble.missing_label, missing_label) # Fit the ensemble. if fit_ensemble: ensemble = clone(ensemble).fit(X, y, sample_weight) else: check_is_fitted(ensemble) if hasattr(ensemble, "estimators_"): est_arr = ensemble.estimators_ else: if hasattr(ensemble, "estimators"): n_estimators = len(ensemble.estimators) else: n_estimators = ensemble.n_estimators est_arr = [ensemble] * n_estimators if estimator_type == SkactivemlClassifier: return ensemble, est_arr, ensemble.classes_ else: return ensemble, est_arr, None elif isinstance(ensemble, (list, tuple)) and isinstance( ensemble[0], estimator_type ): est_arr = copy.deepcopy(ensemble) for i in range(len(est_arr)): check_type( est_arr[i], f"ensemble[{i}]", estimator_type ) # better error message check_equal_missing_label( est_arr[i].missing_label, missing_label ) # Fit the ensemble. if fit_ensemble: est_arr[i] = est_arr[i].fit(X, y, sample_weight) else: check_is_fitted(est_arr[i]) if i > 0 and estimator_type == SkactivemlClassifier: np.testing.assert_array_equal( est_arr[i - 1].classes_, est_arr[i].classes_, err_msg=f"The inferred classes of the {i - 1}-th and " f"{i}-th are not equal. Set the `classes` " f"parameter of each ensemble member to avoid " f"this error.", ) if estimator_type == SkactivemlClassifier: return ensemble, est_arr, est_arr[0].classes_ else: return ensemble, est_arr, None raise TypeError( f"`ensemble` must either be a `{estimator_types} " f"with the attribute `n_ensembles` and `estimators_` after " f"fitting or a list of {estimator_types} objects." )
0.870982
0.557604
import itertools import numpy as np from scipy.special import factorial, gammaln from sklearn import clone from sklearn.utils.validation import check_array from ..base import SkactivemlClassifier from ..base import SingleAnnotatorPoolQueryStrategy from ..classifier import ParzenWindowClassifier from ..utils import ( MISSING_LABEL, check_scalar, simple_batch, check_type, check_equal_missing_label, ) class ProbabilisticAL(SingleAnnotatorPoolQueryStrategy): """(Multi-class) Probabilistic Active Learning This class implements multi-class probabilistic active learning (McPAL) [1] strategy. Parameters ---------- prior: float, optional (default=1) Prior probabilities for the Dirichlet distribution of the samples. m_max: int, optional (default=1) Maximum number of hypothetically acquired labels. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. metric : str or callable, default=None The metric must be None or a valid kernel as defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. The kernel is used to calculate the frequency of labels near the candidates and multiplied with the probabilities returned by `clf` to get a kernel frequency estimate for each class. If metric is set to None, the `predict_freq` function of the `clf` will be used instead. If this is not defined, a TypeError is raised. metric_dict : dict, default=None Any further parameters that should be passed directly to the kernel function. If metric_dict is None and metric is 'rbf' metric_dict is set to {'gamma': 'mean'}. random_state: numeric | np.random.RandomState, optional Random state for candidate selection. References ---------- [1] Daniel Kottke, Georg Krempl, Dominik Lang, Johannes Teschner, and Myra Spiliopoulou. Multi-Class Probabilistic Active Learning, vol. 285 of Frontiers in Artificial Intelligence and Applications, pages 586-594. IOS Press, 2016 """ def __init__( self, prior=1, m_max=1, missing_label=MISSING_LABEL, metric=None, metric_dict=None, random_state=None, ): super().__init__( missing_label=missing_label, random_state=random_state ) self.metric = metric self.metric_dict = metric_dict self.prior = prior self.m_max = m_max def query( self, X, y, clf, fit_clf=True, sample_weight=None, utility_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Query the next instance to be labeled. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL. clf : skactiveml.base.ClassFrequencyEstimator Model implementing the methods `fit` and `predict_freq`. fit_clf : bool, default=True Defines whether the classifier should be fitted on `X`, `y`, and `sample_weight`. sample_weight: array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. utility_weight: array-like, optional (default=None) Weight for each candidate (multiplied with utilities). Usually, this is to be the density of a candidate in ProbabilisticAL. The length of `utility_weight` is usually n_samples, except for the case when candidates contains samples (ndim >= 2). Then the length is `n_candidates`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray, shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. utilities : numpy.ndarray, shape (batch_size, n_samples) The utilities of all candidate samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. """ # Validate input parameters. X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) # Check the classifier's type. check_type(clf, "clf", SkactivemlClassifier) check_equal_missing_label(clf.missing_label, self.missing_label_) check_type(fit_clf, "fit_clf", bool) # Check `utility_weight`. if utility_weight is None: if mapping is None: utility_weight = np.ones(len(X_cand)) else: utility_weight = np.ones(len(X)) utility_weight = check_array(utility_weight, ensure_2d=False) if mapping is None and len(X_cand) != len(utility_weight): raise ValueError( f"'utility_weight' must have length 'n_candidates' but " f"{len(X_cand)} != {len(utility_weight)}." ) if mapping is not None and len(X) != len(utility_weight): raise ValueError( f"'utility_weight' must have length 'n_samples' but " f"{len(X)} != {len(utility_weight)}." ) if self.metric is None and not hasattr(clf, "predict_freq"): raise TypeError( "clf has no predict_freq and metric was set to None" ) # Fit the classifier and predict frequencies. if fit_clf: clf = clone(clf).fit(X, y, sample_weight) if self.metric is not None: if self.metric_dict is None and self.metric == "rbf": self.metric_dict = {"gamma": "mean"} pwc = ParzenWindowClassifier( metric=self.metric, metric_dict=self.metric_dict, missing_label=clf.missing_label, classes=clf.classes, ) pwc.fit(X=X, y=y, sample_weight=sample_weight) n = pwc.predict_freq(X_cand).sum(axis=1, keepdims=True) pred_proba = clf.predict_proba(X_cand) k_vec = n * pred_proba else: k_vec = clf.predict_freq(X_cand) # Calculate utilities and return the output. utilities_cand = cost_reduction( k_vec, prior=self.prior, m_max=self.m_max ) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand utilities *= utility_weight return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def cost_reduction(k_vec_list, C=None, m_max=2, prior=1.0e-3): """Calculate the expected cost reduction. Calculate the expected cost reduction for given maximum number of hypothetically acquired labels, observed labels and cost matrix. Parameters ---------- k_vec_list: array-like, shape (n_samples, n_classes) Observed class labels. C: array-like, shape = (n_classes, n_classes) Cost matrix. m_max: int Maximal number of hypothetically acquired labels. prior : float | array-like, shape (n_classes) Prior value for each class. Returns ------- expected_cost_reduction: array-like, shape (n_samples) Expected cost reduction for given parameters. """ # Check if 'prior' is valid check_scalar(prior, "prior", (float, int), min_inclusive=False, min_val=0) # Check if 'm_max' is valid check_scalar(m_max, "m_max", int, min_val=1) n_classes = len(k_vec_list[0]) n_samples = len(k_vec_list) # check cost matrix C = 1 - np.eye(n_classes) if C is None else np.asarray(C) # generate labelling vectors for all possible m values l_vec_list = np.vstack( [_gen_l_vec_list(m, n_classes) for m in range(m_max + 1)] ) m_list = np.sum(l_vec_list, axis=1) n_l_vecs = len(l_vec_list) # compute optimal cost-sensitive decision for all combination of k-vectors # and l-vectors tile = np.tile(k_vec_list, (n_l_vecs, 1, 1)) k_l_vec_list = np.swapaxes(tile, 0, 1) + l_vec_list y_hats = np.argmin(k_l_vec_list @ C, axis=2) # add prior to k-vectors prior = prior * np.ones(n_classes) k_vec_list = np.asarray(k_vec_list) + prior # all combination of k-, l-, and prediction indicator vectors combs = [k_vec_list, l_vec_list, np.eye(n_classes)] combs = np.asarray( [list(elem) for elem in list(itertools.product(*combs))] ) # three factors of the closed form solution factor_1 = 1 / _euler_beta(k_vec_list) factor_2 = _multinomial(l_vec_list) factor_3 = _euler_beta(np.sum(combs, axis=1)).reshape( n_samples, n_l_vecs, n_classes ) # expected classification cost for each m m_sums = np.asarray( [ factor_1[k_idx] * np.bincount( m_list, factor_2 * [ C[:, y_hats[k_idx, l_idx]] @ factor_3[k_idx, l_idx] for l_idx in range(n_l_vecs) ], ) for k_idx in range(n_samples) ] ) # compute classification cost reduction as difference gains = np.zeros((n_samples, m_max)) + m_sums[:, 0].reshape(-1, 1) gains -= m_sums[:, 1:] # normalize cost reduction by number of hypothetical label acquisitions gains /= np.arange(1, m_max + 1) return np.max(gains, axis=1) def _gen_l_vec_list(m_approx, n_classes): """ Creates all possible class labeling vectors for given number of hypothetically acquired labels and given number of classes. Parameters ---------- m_approx: int Number of hypothetically acquired labels.. n_classes: int, Number of classes Returns ------- label_vec_list: array-like, shape = [n_labelings, n_classes] All possible class labelings for given parameters. """ label_vec_list = [[]] label_vec_res = np.arange(m_approx + 1) for i in range(n_classes - 1): new_label_vec_list = [] for labelVec in label_vec_list: for newLabel in label_vec_res[ label_vec_res - (m_approx - sum(labelVec)) <= 1.0e-10 ]: new_label_vec_list.append(labelVec + [newLabel]) label_vec_list = new_label_vec_list new_label_vec_list = [] for labelVec in label_vec_list: new_label_vec_list.append(labelVec + [m_approx - sum(labelVec)]) label_vec_list = np.array(new_label_vec_list, int) return label_vec_list def _euler_beta(a): """ Represents Euler beta function: B(a(i)) = Gamma(a(i,1))*...*Gamma(a_n)/Gamma(a(i,1)+...+a(i,n)) Parameters ---------- a: array-like, shape (m, n) Vectors to evaluated. Returns ------- result: array-like, shape (m) Euler beta function results [B(a(0)), ..., B(a(m)) """ return np.exp(np.sum(gammaln(a), axis=1) - gammaln(np.sum(a, axis=1))) def _multinomial(a): """ Computes Multinomial coefficient: Mult(a(i)) = (a(i,1)+...+a(i,n))!/(a(i,1)!...a(i,n)!) Parameters ---------- a: array-like, shape (m, n) Vectors to evaluated. Returns ------- result: array-like, shape (m) Multinomial coefficients [Mult(a(0)), ..., Mult(a(m)) """ return factorial(np.sum(a, axis=1)) / np.prod(factorial(a), axis=1)
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/_probabilistic_al.py
_probabilistic_al.py
import itertools import numpy as np from scipy.special import factorial, gammaln from sklearn import clone from sklearn.utils.validation import check_array from ..base import SkactivemlClassifier from ..base import SingleAnnotatorPoolQueryStrategy from ..classifier import ParzenWindowClassifier from ..utils import ( MISSING_LABEL, check_scalar, simple_batch, check_type, check_equal_missing_label, ) class ProbabilisticAL(SingleAnnotatorPoolQueryStrategy): """(Multi-class) Probabilistic Active Learning This class implements multi-class probabilistic active learning (McPAL) [1] strategy. Parameters ---------- prior: float, optional (default=1) Prior probabilities for the Dirichlet distribution of the samples. m_max: int, optional (default=1) Maximum number of hypothetically acquired labels. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. metric : str or callable, default=None The metric must be None or a valid kernel as defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. The kernel is used to calculate the frequency of labels near the candidates and multiplied with the probabilities returned by `clf` to get a kernel frequency estimate for each class. If metric is set to None, the `predict_freq` function of the `clf` will be used instead. If this is not defined, a TypeError is raised. metric_dict : dict, default=None Any further parameters that should be passed directly to the kernel function. If metric_dict is None and metric is 'rbf' metric_dict is set to {'gamma': 'mean'}. random_state: numeric | np.random.RandomState, optional Random state for candidate selection. References ---------- [1] Daniel Kottke, Georg Krempl, Dominik Lang, Johannes Teschner, and Myra Spiliopoulou. Multi-Class Probabilistic Active Learning, vol. 285 of Frontiers in Artificial Intelligence and Applications, pages 586-594. IOS Press, 2016 """ def __init__( self, prior=1, m_max=1, missing_label=MISSING_LABEL, metric=None, metric_dict=None, random_state=None, ): super().__init__( missing_label=missing_label, random_state=random_state ) self.metric = metric self.metric_dict = metric_dict self.prior = prior self.m_max = m_max def query( self, X, y, clf, fit_clf=True, sample_weight=None, utility_weight=None, candidates=None, batch_size=1, return_utilities=False, ): """Query the next instance to be labeled. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL. clf : skactiveml.base.ClassFrequencyEstimator Model implementing the methods `fit` and `predict_freq`. fit_clf : bool, default=True Defines whether the classifier should be fitted on `X`, `y`, and `sample_weight`. sample_weight: array-like of shape (n_samples), optional (default=None) Weights of training samples in `X`. utility_weight: array-like, optional (default=None) Weight for each candidate (multiplied with utilities). Usually, this is to be the density of a candidate in ProbabilisticAL. The length of `utility_weight` is usually n_samples, except for the case when candidates contains samples (ndim >= 2). Then the length is `n_candidates`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If candidates is None, the unlabeled samples from (X,y) are considered as candidates. If candidates is of shape (n_candidates) and of type int, candidates is considered as the indices of the samples in (X,y). If candidates is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray, shape (batch_size) The query_indices indicate for which candidate sample a label is to queried, e.g., `query_indices[0]` indicates the first selected sample. utilities : numpy.ndarray, shape (batch_size, n_samples) The utilities of all candidate samples after each selected sample of the batch, e.g., `utilities[0]` indicates the utilities used for selecting the first sample (with index `query_indices[0]`) of the batch. """ # Validate input parameters. X, y, candidates, batch_size, return_utilities = self._validate_data( X, y, candidates, batch_size, return_utilities, reset=True ) X_cand, mapping = self._transform_candidates(candidates, X, y) # Check the classifier's type. check_type(clf, "clf", SkactivemlClassifier) check_equal_missing_label(clf.missing_label, self.missing_label_) check_type(fit_clf, "fit_clf", bool) # Check `utility_weight`. if utility_weight is None: if mapping is None: utility_weight = np.ones(len(X_cand)) else: utility_weight = np.ones(len(X)) utility_weight = check_array(utility_weight, ensure_2d=False) if mapping is None and len(X_cand) != len(utility_weight): raise ValueError( f"'utility_weight' must have length 'n_candidates' but " f"{len(X_cand)} != {len(utility_weight)}." ) if mapping is not None and len(X) != len(utility_weight): raise ValueError( f"'utility_weight' must have length 'n_samples' but " f"{len(X)} != {len(utility_weight)}." ) if self.metric is None and not hasattr(clf, "predict_freq"): raise TypeError( "clf has no predict_freq and metric was set to None" ) # Fit the classifier and predict frequencies. if fit_clf: clf = clone(clf).fit(X, y, sample_weight) if self.metric is not None: if self.metric_dict is None and self.metric == "rbf": self.metric_dict = {"gamma": "mean"} pwc = ParzenWindowClassifier( metric=self.metric, metric_dict=self.metric_dict, missing_label=clf.missing_label, classes=clf.classes, ) pwc.fit(X=X, y=y, sample_weight=sample_weight) n = pwc.predict_freq(X_cand).sum(axis=1, keepdims=True) pred_proba = clf.predict_proba(X_cand) k_vec = n * pred_proba else: k_vec = clf.predict_freq(X_cand) # Calculate utilities and return the output. utilities_cand = cost_reduction( k_vec, prior=self.prior, m_max=self.m_max ) if mapping is None: utilities = utilities_cand else: utilities = np.full(len(X), np.nan) utilities[mapping] = utilities_cand utilities *= utility_weight return simple_batch( utilities, self.random_state_, batch_size=batch_size, return_utilities=return_utilities, ) def cost_reduction(k_vec_list, C=None, m_max=2, prior=1.0e-3): """Calculate the expected cost reduction. Calculate the expected cost reduction for given maximum number of hypothetically acquired labels, observed labels and cost matrix. Parameters ---------- k_vec_list: array-like, shape (n_samples, n_classes) Observed class labels. C: array-like, shape = (n_classes, n_classes) Cost matrix. m_max: int Maximal number of hypothetically acquired labels. prior : float | array-like, shape (n_classes) Prior value for each class. Returns ------- expected_cost_reduction: array-like, shape (n_samples) Expected cost reduction for given parameters. """ # Check if 'prior' is valid check_scalar(prior, "prior", (float, int), min_inclusive=False, min_val=0) # Check if 'm_max' is valid check_scalar(m_max, "m_max", int, min_val=1) n_classes = len(k_vec_list[0]) n_samples = len(k_vec_list) # check cost matrix C = 1 - np.eye(n_classes) if C is None else np.asarray(C) # generate labelling vectors for all possible m values l_vec_list = np.vstack( [_gen_l_vec_list(m, n_classes) for m in range(m_max + 1)] ) m_list = np.sum(l_vec_list, axis=1) n_l_vecs = len(l_vec_list) # compute optimal cost-sensitive decision for all combination of k-vectors # and l-vectors tile = np.tile(k_vec_list, (n_l_vecs, 1, 1)) k_l_vec_list = np.swapaxes(tile, 0, 1) + l_vec_list y_hats = np.argmin(k_l_vec_list @ C, axis=2) # add prior to k-vectors prior = prior * np.ones(n_classes) k_vec_list = np.asarray(k_vec_list) + prior # all combination of k-, l-, and prediction indicator vectors combs = [k_vec_list, l_vec_list, np.eye(n_classes)] combs = np.asarray( [list(elem) for elem in list(itertools.product(*combs))] ) # three factors of the closed form solution factor_1 = 1 / _euler_beta(k_vec_list) factor_2 = _multinomial(l_vec_list) factor_3 = _euler_beta(np.sum(combs, axis=1)).reshape( n_samples, n_l_vecs, n_classes ) # expected classification cost for each m m_sums = np.asarray( [ factor_1[k_idx] * np.bincount( m_list, factor_2 * [ C[:, y_hats[k_idx, l_idx]] @ factor_3[k_idx, l_idx] for l_idx in range(n_l_vecs) ], ) for k_idx in range(n_samples) ] ) # compute classification cost reduction as difference gains = np.zeros((n_samples, m_max)) + m_sums[:, 0].reshape(-1, 1) gains -= m_sums[:, 1:] # normalize cost reduction by number of hypothetical label acquisitions gains /= np.arange(1, m_max + 1) return np.max(gains, axis=1) def _gen_l_vec_list(m_approx, n_classes): """ Creates all possible class labeling vectors for given number of hypothetically acquired labels and given number of classes. Parameters ---------- m_approx: int Number of hypothetically acquired labels.. n_classes: int, Number of classes Returns ------- label_vec_list: array-like, shape = [n_labelings, n_classes] All possible class labelings for given parameters. """ label_vec_list = [[]] label_vec_res = np.arange(m_approx + 1) for i in range(n_classes - 1): new_label_vec_list = [] for labelVec in label_vec_list: for newLabel in label_vec_res[ label_vec_res - (m_approx - sum(labelVec)) <= 1.0e-10 ]: new_label_vec_list.append(labelVec + [newLabel]) label_vec_list = new_label_vec_list new_label_vec_list = [] for labelVec in label_vec_list: new_label_vec_list.append(labelVec + [m_approx - sum(labelVec)]) label_vec_list = np.array(new_label_vec_list, int) return label_vec_list def _euler_beta(a): """ Represents Euler beta function: B(a(i)) = Gamma(a(i,1))*...*Gamma(a_n)/Gamma(a(i,1)+...+a(i,n)) Parameters ---------- a: array-like, shape (m, n) Vectors to evaluated. Returns ------- result: array-like, shape (m) Euler beta function results [B(a(0)), ..., B(a(m)) """ return np.exp(np.sum(gammaln(a), axis=1) - gammaln(np.sum(a, axis=1))) def _multinomial(a): """ Computes Multinomial coefficient: Mult(a(i)) = (a(i,1)+...+a(i,n))!/(a(i,1)!...a(i,n)!) Parameters ---------- a: array-like, shape (m, n) Vectors to evaluated. Returns ------- result: array-like, shape (m) Multinomial coefficients [Mult(a(0)), ..., Mult(a(m)) """ return factorial(np.sum(a, axis=1)) / np.prod(factorial(a), axis=1)
0.890735
0.426859
import numpy as np from scipy.stats import t, rankdata from sklearn.base import BaseEstimator, clone from sklearn.utils.validation import check_array, check_is_fitted from ...base import ( MultiAnnotatorPoolQueryStrategy, SkactivemlClassifier, AnnotatorModelMixin, ) from ...pool._uncertainty_sampling import uncertainty_scores from ...utils import ( check_scalar, MISSING_LABEL, is_labeled, check_type, simple_batch, majority_vote, ) class IntervalEstimationAnnotModel(BaseEstimator, AnnotatorModelMixin): """IntervalEstimationAnnotModel This annotator model relies on 'Interval Estimation Learning' (IELearning) for estimating the annotation performances, i.e., labeling accuracies, of multiple annotators [1]. Therefore, it computes the mean accuracy and the lower as well as the upper bound of the labeling accuracy per annotator. (Weighted) majority vote is used as estimated ground truth. Parameters ---------- classes : array-like, shape (n_classes), optional (default=None) Holds the label for each class. missing_label : scalar or string or np.nan or None, optional (default=np.nan) Value to represent a missing label. alpha : float, interval=(0, 1), optional (default=0.05) Half of the confidence level for student's t-distribution. mode : 'lower' or 'mean' or 'upper', optional (default='upper') Mode of the estimated annotation performance. random_state : None|int|numpy.random.RandomState, optional (default=None) The random state used for deciding on majority vote labels in case of ties. Attributes ---------- classes_: array-like, shape (n_classes) Holds the label for each class. n_annotators_: int Number of annotators. A_perf_ : ndarray, shape (n_annotators, 3) Estimated annotation performances (i.e., labeling accuracies), where `A_cand[i, 0]` indicates the lower bound, `A_cand[i, 1]` indicates the mean, and `A_cand[i, 2]` indicates the upper bound of the estimation labeling accuracy. References ---------- [1] Donmez, Pinar, Jaime G. Carbonell, and Jeff Schneider. "Efficiently learning the accuracy of labeling sources for selective sampling." 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 259-268. 2009. """ def __init__( self, classes=None, missing_label=MISSING_LABEL, alpha=0.05, mode="upper", random_state=None, ): self.classes = classes self.missing_label = missing_label self.alpha = alpha self.mode = mode self.random_state = random_state def fit(self, X, y, sample_weight=None): """Fit annotator model for given samples. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. y : array-like, shape (n_samples, n_annotators) Class labels of annotators. sample_weight : array-like, shape (n_samples, n_annotators), optional (default=None) Sample weight for each label and annotator. Returns ------- self : IntervalEstimationAnnotModel object The fitted annotator model. """ # Check whether alpha is float in (0, 1). check_scalar( x=self.alpha, target_type=float, name="alpha", min_val=0, max_val=1, min_inclusive=False, max_inclusive=False, ) # Check mode. if self.mode not in ["lower", "mean", "upper"]: raise ValueError("`mode` must be in `['lower', 'mean', `upper`].`") # Check shape of labels. if y.ndim != 2: raise ValueError( "`y` but must be a 2d array with shape " "`(n_samples, n_annotators)`." ) # Compute majority vote labels. y_mv = majority_vote( y=y, w=sample_weight, classes=self.classes, random_state=self.random_state, missing_label=self.missing_label, ) # Number of annotators. self.n_annotators_ = y.shape[1] is_lbld = is_labeled(y, missing_label=self.missing_label) self.A_perf_ = np.zeros((self.n_annotators_, 3)) for a_idx in range(self.n_annotators_): is_correct = np.equal( y_mv[is_lbld[:, a_idx]], y[is_lbld[:, a_idx], a_idx] ) is_correct = np.concatenate((is_correct, [0, 1])) mean = np.mean(is_correct) std = np.std(is_correct) t_value = t.isf([self.alpha / 2], len(is_correct) - 1)[0] t_value *= std / np.sqrt(len(is_correct)) self.A_perf_[a_idx, 0] = mean - t_value self.A_perf_[a_idx, 1] = mean self.A_perf_[a_idx, 2] = mean + t_value return self def predict_annotator_perf(self, X): """Calculates the probability that an annotator provides the true label for a given sample. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. Returns ------- P_annot : numpy.ndarray, shape (n_samples, n_annotators) `P_annot[i,l]` is the probability, that annotator `l` provides the correct class label for sample `X[i]`. """ check_is_fitted(self) X = check_array(X) if self.mode == "lower": mode = 0 elif self.mode == "mean": mode = 1 else: mode = 2 return np.tile(self.A_perf_[:, mode], (len(X), 1)) class IntervalEstimationThreshold(MultiAnnotatorPoolQueryStrategy): """IntervalEstimationThreshold The strategy 'Interval Estimation Threshold' (IEThresh) [1] is useful for addressing the exploration vs. exploitation trade-off when dealing with multiple error-prone annotators in active learning. This class relies on `IntervalEstimationAnnotModel` for estimating the annotation performances, i.e., label accuracies, of multiple annotators. Samples are selected based on 'Uncertainty Sampling' (US). The selected samples are labeled by the annotators whose estimated annotation performances are equal or greater than an adaptive threshold. The strategy assumes all annotators to be available and is not defined otherwise. To deal with this case nonetheless value-annotator pairs are first ranked according to the amount of annotators available for the given value in `candidates` and are than ranked according to `IntervalEstimationThreshold`. Parameters ---------- epsilon : float, interval=[0, 1], optional (default=0.9) Parameter for specifying the adaptive threshold used for annotator selection. alpha : float, interval=(0, 1), optional (default=0.05) Half of the confidence level for student's t-distribution. random_state : None or int or numpy.random.RandomState, optional (default=None) The random state used for deciding on majority vote labels in case of ties. References ---------- [1] Donmez, Pinar, Jaime G. Carbonell, and Jeff Schneider. "Efficiently learning the accuracy of labeling sources for selective sampling." 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 259-268. 2009. """ def __init__( self, epsilon=0.9, alpha=0.05, random_state=None, missing_label=MISSING_LABEL, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.epsilon = epsilon self.alpha = alpha def query( self, X, y, clf, fit_clf=True, candidates=None, annotators=None, sample_weight=None, batch_size="adaptive", return_utilities=False, ): """Determines which candidate sample is to be annotated by which annotator. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e., including the labeled and unlabeled samples. y : array-like of shape (n_samples, n_annotators) Labels of the training data set for each annotator (possibly including unlabeled ones indicated by self.MISSING_LABEL), meaning that `y[i, j]` contains the label annotated by annotator `i` for sample `j`. clf : skactiveml.base.SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. fit_clf : bool, default=True Defines whether the classifier should be fitted on `X`, `y`, and `sample_weight`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the samples from (X,y), for which an annotator exists such that the annotator sample pairs is unlabeled are considered as sample candidates. If `candidates` is of shape (n_candidates,) and of type int, `candidates` is considered as the indices of the sample candidates in (X,y). If `candidates` is of shape (n_candidates, n_features), the sample candidates are directly given in `candidates` (not necessarily contained in `X`). annotators : array-like, shape (n_candidates, n_annotators), optional (default=None) If `annotators` is None, all annotators are considered as available annotators. If `annotators` is of shape (n_avl_annotators) and of type int, `annotators` is considered as the indices of the available annotators. If candidate samples and available annotators are specified: The annotator sample pairs, for which the sample is a candidate sample and the annotator is an available annotator are considered as candidate annotator sample pairs. If `annotators` is None and `candidates` is of shape (n_candidates,), all annotator sample pairs, for which the sample is indexed by `candidates` are considered as candidate annotator sample pairs. If `annotators` is a boolean array of shape (n_candidates, n_avl_annotators) the annotator sample pairs, for which the sample is a candidate sample and the boolean matrix has entry `True` are considered as candidate sample pairs. sample_weight : array-like, (n_samples, n_annotators), optional (default=None) It contains the weights of the training samples' class labels. It must have the same shape as y. batch_size : 'adaptive' or int, optional (default=1) The number of samples to be selected in one AL cycle. If 'adaptive' is set, the `batch_size` is determined based on the annotation performances and the parameter `epsilon`. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size, 2) The query_indices indicate which candidate sample is to be annotated by which annotator, e.g., `query_indices[:, 0]` indicates the selected candidate samples and `query_indices[:, 1]` indicates the respectively selected annotators. utilities: numpy.ndarray of shape (batch_size, n_cand_samples, n_annotators) The utilities of all candidate samples w.r.t. to the available annotators after each selected sample of the batch, e.g., `utilities[0, :, j]` indicates the utilities used for selecting the first sample-annotator pair (with indices `query_indices[0]`). """ # base check ( X, y, candidates, annotators, _, return_utilities, ) = super()._validate_data( X, y, candidates, annotators, 1, return_utilities, reset=True ) X_cand, mapping, A_cand = self._transform_cand_annot( candidates, annotators, X, y ) # Validate classifier type. check_type(clf, "clf", SkactivemlClassifier) # Check whether epsilon is float in [0, 1]. check_scalar( x=self.epsilon, target_type=float, name="epsilon", min_val=0, max_val=1, ) # Check whether alpha is float in (0, 1). check_scalar( x=self.alpha, target_type=float, name="alpha", min_val=0, max_val=1, min_inclusive=False, max_inclusive=False, ) n_annotators = y.shape[1] # Check whether unlabeled data exists A_cand = np.repeat( np.all(A_cand, axis=1).reshape(-1, 1), n_annotators, axis=1 ) # Fit classifier and compute uncertainties on candidate samples. if fit_clf: clf = clone(clf).fit(X, y, sample_weight) P = clf.predict_proba(X_cand) uncertainties = uncertainty_scores(probas=P, method="least_confident") # Fit annotator model and compute performance estimates. ie_model = IntervalEstimationAnnotModel( classes=clf.classes_, missing_label=clf.missing_label, alpha=self.alpha, mode="upper", ) ie_model.fit(X=X, y=y, sample_weight=sample_weight) A_perf = ie_model.A_perf_ # Compute utilities. # combine the values of A_perf and uncertainties A_perf = A_perf[:, 2] + 1 A_perf = A_perf[np.newaxis] max_range = np.max(A_perf) + 1 uncertainties = rankdata(uncertainties, method="ordinal") * max_range uncertainties = np.tile(uncertainties, (n_annotators, 1)).T utilities = uncertainties + A_perf # exclude not available annotators utilities[~A_cand] = np.nan # Determine actual batch size. if isinstance(batch_size, str) and batch_size != "adaptive": raise ValueError( f"If `batch_size` is of type `string`, " f"it must equal `'adaptive'`." ) elif batch_size == "adaptive": required_perf = self.epsilon * np.max(A_perf) actl_batch_size = int(np.sum(A_perf >= required_perf)) elif isinstance(batch_size, int): actl_batch_size = batch_size else: raise TypeError( f"`batch_size` is of type `{type(batch_size)}` " f"but must equal `'adaptive'` or be of type " f"`int`." ) if mapping is not None: w_utilities = utilities utilities = np.full((len(X), n_annotators), np.nan) utilities[mapping, :] = w_utilities # Perform selection based on previously computed utilities. return simple_batch( utilities, self.random_state_, batch_size=actl_batch_size, return_utilities=return_utilities, )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/multiannotator/_interval_estimation_threshold.py
_interval_estimation_threshold.py
import numpy as np from scipy.stats import t, rankdata from sklearn.base import BaseEstimator, clone from sklearn.utils.validation import check_array, check_is_fitted from ...base import ( MultiAnnotatorPoolQueryStrategy, SkactivemlClassifier, AnnotatorModelMixin, ) from ...pool._uncertainty_sampling import uncertainty_scores from ...utils import ( check_scalar, MISSING_LABEL, is_labeled, check_type, simple_batch, majority_vote, ) class IntervalEstimationAnnotModel(BaseEstimator, AnnotatorModelMixin): """IntervalEstimationAnnotModel This annotator model relies on 'Interval Estimation Learning' (IELearning) for estimating the annotation performances, i.e., labeling accuracies, of multiple annotators [1]. Therefore, it computes the mean accuracy and the lower as well as the upper bound of the labeling accuracy per annotator. (Weighted) majority vote is used as estimated ground truth. Parameters ---------- classes : array-like, shape (n_classes), optional (default=None) Holds the label for each class. missing_label : scalar or string or np.nan or None, optional (default=np.nan) Value to represent a missing label. alpha : float, interval=(0, 1), optional (default=0.05) Half of the confidence level for student's t-distribution. mode : 'lower' or 'mean' or 'upper', optional (default='upper') Mode of the estimated annotation performance. random_state : None|int|numpy.random.RandomState, optional (default=None) The random state used for deciding on majority vote labels in case of ties. Attributes ---------- classes_: array-like, shape (n_classes) Holds the label for each class. n_annotators_: int Number of annotators. A_perf_ : ndarray, shape (n_annotators, 3) Estimated annotation performances (i.e., labeling accuracies), where `A_cand[i, 0]` indicates the lower bound, `A_cand[i, 1]` indicates the mean, and `A_cand[i, 2]` indicates the upper bound of the estimation labeling accuracy. References ---------- [1] Donmez, Pinar, Jaime G. Carbonell, and Jeff Schneider. "Efficiently learning the accuracy of labeling sources for selective sampling." 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 259-268. 2009. """ def __init__( self, classes=None, missing_label=MISSING_LABEL, alpha=0.05, mode="upper", random_state=None, ): self.classes = classes self.missing_label = missing_label self.alpha = alpha self.mode = mode self.random_state = random_state def fit(self, X, y, sample_weight=None): """Fit annotator model for given samples. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. y : array-like, shape (n_samples, n_annotators) Class labels of annotators. sample_weight : array-like, shape (n_samples, n_annotators), optional (default=None) Sample weight for each label and annotator. Returns ------- self : IntervalEstimationAnnotModel object The fitted annotator model. """ # Check whether alpha is float in (0, 1). check_scalar( x=self.alpha, target_type=float, name="alpha", min_val=0, max_val=1, min_inclusive=False, max_inclusive=False, ) # Check mode. if self.mode not in ["lower", "mean", "upper"]: raise ValueError("`mode` must be in `['lower', 'mean', `upper`].`") # Check shape of labels. if y.ndim != 2: raise ValueError( "`y` but must be a 2d array with shape " "`(n_samples, n_annotators)`." ) # Compute majority vote labels. y_mv = majority_vote( y=y, w=sample_weight, classes=self.classes, random_state=self.random_state, missing_label=self.missing_label, ) # Number of annotators. self.n_annotators_ = y.shape[1] is_lbld = is_labeled(y, missing_label=self.missing_label) self.A_perf_ = np.zeros((self.n_annotators_, 3)) for a_idx in range(self.n_annotators_): is_correct = np.equal( y_mv[is_lbld[:, a_idx]], y[is_lbld[:, a_idx], a_idx] ) is_correct = np.concatenate((is_correct, [0, 1])) mean = np.mean(is_correct) std = np.std(is_correct) t_value = t.isf([self.alpha / 2], len(is_correct) - 1)[0] t_value *= std / np.sqrt(len(is_correct)) self.A_perf_[a_idx, 0] = mean - t_value self.A_perf_[a_idx, 1] = mean self.A_perf_[a_idx, 2] = mean + t_value return self def predict_annotator_perf(self, X): """Calculates the probability that an annotator provides the true label for a given sample. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. Returns ------- P_annot : numpy.ndarray, shape (n_samples, n_annotators) `P_annot[i,l]` is the probability, that annotator `l` provides the correct class label for sample `X[i]`. """ check_is_fitted(self) X = check_array(X) if self.mode == "lower": mode = 0 elif self.mode == "mean": mode = 1 else: mode = 2 return np.tile(self.A_perf_[:, mode], (len(X), 1)) class IntervalEstimationThreshold(MultiAnnotatorPoolQueryStrategy): """IntervalEstimationThreshold The strategy 'Interval Estimation Threshold' (IEThresh) [1] is useful for addressing the exploration vs. exploitation trade-off when dealing with multiple error-prone annotators in active learning. This class relies on `IntervalEstimationAnnotModel` for estimating the annotation performances, i.e., label accuracies, of multiple annotators. Samples are selected based on 'Uncertainty Sampling' (US). The selected samples are labeled by the annotators whose estimated annotation performances are equal or greater than an adaptive threshold. The strategy assumes all annotators to be available and is not defined otherwise. To deal with this case nonetheless value-annotator pairs are first ranked according to the amount of annotators available for the given value in `candidates` and are than ranked according to `IntervalEstimationThreshold`. Parameters ---------- epsilon : float, interval=[0, 1], optional (default=0.9) Parameter for specifying the adaptive threshold used for annotator selection. alpha : float, interval=(0, 1), optional (default=0.05) Half of the confidence level for student's t-distribution. random_state : None or int or numpy.random.RandomState, optional (default=None) The random state used for deciding on majority vote labels in case of ties. References ---------- [1] Donmez, Pinar, Jaime G. Carbonell, and Jeff Schneider. "Efficiently learning the accuracy of labeling sources for selective sampling." 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 259-268. 2009. """ def __init__( self, epsilon=0.9, alpha=0.05, random_state=None, missing_label=MISSING_LABEL, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.epsilon = epsilon self.alpha = alpha def query( self, X, y, clf, fit_clf=True, candidates=None, annotators=None, sample_weight=None, batch_size="adaptive", return_utilities=False, ): """Determines which candidate sample is to be annotated by which annotator. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e., including the labeled and unlabeled samples. y : array-like of shape (n_samples, n_annotators) Labels of the training data set for each annotator (possibly including unlabeled ones indicated by self.MISSING_LABEL), meaning that `y[i, j]` contains the label annotated by annotator `i` for sample `j`. clf : skactiveml.base.SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. fit_clf : bool, default=True Defines whether the classifier should be fitted on `X`, `y`, and `sample_weight`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the samples from (X,y), for which an annotator exists such that the annotator sample pairs is unlabeled are considered as sample candidates. If `candidates` is of shape (n_candidates,) and of type int, `candidates` is considered as the indices of the sample candidates in (X,y). If `candidates` is of shape (n_candidates, n_features), the sample candidates are directly given in `candidates` (not necessarily contained in `X`). annotators : array-like, shape (n_candidates, n_annotators), optional (default=None) If `annotators` is None, all annotators are considered as available annotators. If `annotators` is of shape (n_avl_annotators) and of type int, `annotators` is considered as the indices of the available annotators. If candidate samples and available annotators are specified: The annotator sample pairs, for which the sample is a candidate sample and the annotator is an available annotator are considered as candidate annotator sample pairs. If `annotators` is None and `candidates` is of shape (n_candidates,), all annotator sample pairs, for which the sample is indexed by `candidates` are considered as candidate annotator sample pairs. If `annotators` is a boolean array of shape (n_candidates, n_avl_annotators) the annotator sample pairs, for which the sample is a candidate sample and the boolean matrix has entry `True` are considered as candidate sample pairs. sample_weight : array-like, (n_samples, n_annotators), optional (default=None) It contains the weights of the training samples' class labels. It must have the same shape as y. batch_size : 'adaptive' or int, optional (default=1) The number of samples to be selected in one AL cycle. If 'adaptive' is set, the `batch_size` is determined based on the annotation performances and the parameter `epsilon`. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. Returns ------- query_indices : numpy.ndarray of shape (batch_size, 2) The query_indices indicate which candidate sample is to be annotated by which annotator, e.g., `query_indices[:, 0]` indicates the selected candidate samples and `query_indices[:, 1]` indicates the respectively selected annotators. utilities: numpy.ndarray of shape (batch_size, n_cand_samples, n_annotators) The utilities of all candidate samples w.r.t. to the available annotators after each selected sample of the batch, e.g., `utilities[0, :, j]` indicates the utilities used for selecting the first sample-annotator pair (with indices `query_indices[0]`). """ # base check ( X, y, candidates, annotators, _, return_utilities, ) = super()._validate_data( X, y, candidates, annotators, 1, return_utilities, reset=True ) X_cand, mapping, A_cand = self._transform_cand_annot( candidates, annotators, X, y ) # Validate classifier type. check_type(clf, "clf", SkactivemlClassifier) # Check whether epsilon is float in [0, 1]. check_scalar( x=self.epsilon, target_type=float, name="epsilon", min_val=0, max_val=1, ) # Check whether alpha is float in (0, 1). check_scalar( x=self.alpha, target_type=float, name="alpha", min_val=0, max_val=1, min_inclusive=False, max_inclusive=False, ) n_annotators = y.shape[1] # Check whether unlabeled data exists A_cand = np.repeat( np.all(A_cand, axis=1).reshape(-1, 1), n_annotators, axis=1 ) # Fit classifier and compute uncertainties on candidate samples. if fit_clf: clf = clone(clf).fit(X, y, sample_weight) P = clf.predict_proba(X_cand) uncertainties = uncertainty_scores(probas=P, method="least_confident") # Fit annotator model and compute performance estimates. ie_model = IntervalEstimationAnnotModel( classes=clf.classes_, missing_label=clf.missing_label, alpha=self.alpha, mode="upper", ) ie_model.fit(X=X, y=y, sample_weight=sample_weight) A_perf = ie_model.A_perf_ # Compute utilities. # combine the values of A_perf and uncertainties A_perf = A_perf[:, 2] + 1 A_perf = A_perf[np.newaxis] max_range = np.max(A_perf) + 1 uncertainties = rankdata(uncertainties, method="ordinal") * max_range uncertainties = np.tile(uncertainties, (n_annotators, 1)).T utilities = uncertainties + A_perf # exclude not available annotators utilities[~A_cand] = np.nan # Determine actual batch size. if isinstance(batch_size, str) and batch_size != "adaptive": raise ValueError( f"If `batch_size` is of type `string`, " f"it must equal `'adaptive'`." ) elif batch_size == "adaptive": required_perf = self.epsilon * np.max(A_perf) actl_batch_size = int(np.sum(A_perf >= required_perf)) elif isinstance(batch_size, int): actl_batch_size = batch_size else: raise TypeError( f"`batch_size` is of type `{type(batch_size)}` " f"but must equal `'adaptive'` or be of type " f"`int`." ) if mapping is not None: w_utilities = utilities utilities = np.full((len(X), n_annotators), np.nan) utilities[mapping, :] = w_utilities # Perform selection based on previously computed utilities. return simple_batch( utilities, self.random_state_, batch_size=actl_batch_size, return_utilities=return_utilities, )
0.941027
0.551815
from inspect import signature, Parameter import numpy as np from scipy.stats import rankdata from sklearn.utils.validation import check_array, _is_arraylike from ...base import ( MultiAnnotatorPoolQueryStrategy, SingleAnnotatorPoolQueryStrategy, ) from ...utils import ( rand_argmax, check_type, MISSING_LABEL, majority_vote, check_random_state, check_scalar, ) class SingleAnnotatorWrapper(MultiAnnotatorPoolQueryStrategy): """SingleAnnotatorWrapper Implementation of a wrapper class for pool-based active learning query strategies with a single annotator such that it transforms the query strategy for the single annotator into a query strategy for multiple annotators by choosing an annotator randomly or according to the parameter `A_pef` and setting the labeled matrix to a labeled vector by an aggregation function, e.g., majority voting. Parameters ---------- strategy : SingleAnnotatorPoolQueryStrategy An active learning strategy for a single annotator. y_aggregate : callable, optional (default=None) `y_aggregate` is used to transform `y` as a matrix of shape (n_samples, n_annotators) into a vector of shape (n_samples) during the querying process and is then passed to the given `strategy`. If `y_aggregate is None` and `y` is used in the strategy, majority_vote is used as `y_aggregate`. missing_label : scalar or string or np.nan or None, optional (default=np.nan) Value to represent a missing label. random_state : int or RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__( self, strategy, y_aggregate=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.strategy = strategy self.y_aggregate = y_aggregate def query( self, X, y, candidates=None, annotators=None, batch_size=1, query_params_dict=None, n_annotators_per_sample=1, A_perf=None, return_utilities=False, ): """Determines which candidate sample is to be annotated by which annotator. The samples are first and primarily ranked by the given strategy as if one unspecified annotator where to annotate the sample. Then for each sample the sample-annotator pairs are ranked based either on previously set preferences or at random. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e., including the labeled and unlabeled samples. y : array-like of shape (n_samples, n_annotators) Labels of the training data set for each annotator (possibly including unlabeled ones indicated by self.MISSING_LABEL), meaning that `y[i, j]` contains the label annotated by annotator `i` for sample `j`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the samples from (X,y), for which an annotator exists such that the annotator sample pairs is unlabeled are considered as sample candidates. If `candidates` is of shape (n_candidates) and of type int, candidates is considered as the indices of the sample candidates in (X,y). If `candidates` is of shape (n_candidates, n_features), the sample candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. annotators : array-like of shape (n_candidates, n_annotators), optional (default=None) If `annotators` is None, all annotators are considered as available annotators. If `annotators` is of shape (n_avl_annotators) and of type int, `annotators` is considered as the indices of the available annotators. If candidate samples and available annotators are specified: The annotator sample pairs, for which the sample is a candidate sample and the annotator is an available annotator are considered as candidate annotator-sample-pairs. If `annotators` is None and `candidates` is of shape (n_candidates), all annotator sample pairs, for which the sample is indexed by `candidates` are considered as candidate annotator-sample-pairs. If `annotators` is a boolean array of shape (n_candidates, n_avl_annotators) the annotator sample pairs, for which the sample is a candidate sample and the boolean matrix has entry `True` are considered as candidate sample pairs. batch_size : int, optional (default=1) The number of annotators sample pairs to be selected in one AL cycle. query_params_dict : dict, optional (default=None) Dictionary for the parameters of the query method besides `X` and the transformed `y`. A_perf : array-like, shape (n_samples, n_annotators) or (n_annotators,) optional (default=None) The performance based ranking of each annotator. 1.) If `A_perf` is of shape (n_samples, n_annotators) for each sample `i` the value-annotators pair `(i, j)` is chosen over the pair `(i, k)` if `A_perf[i, j]` is greater or equal to `A_perf[i, k]`. 2.) If `A_perf` is of shape (n_annotators,) for each sample `i` the value-annotators pair `(i, j)` is chosen over the pair `(i, k)` if `A_perf[j]` is greater or equal to `A_perf[k]`. 3.) If `A_perf` is None, the annotators are chosen at random, with a different distribution for each sample. return_utilities : bool, optional (default=False) If true, also returns the utilities based on the query strategy. n_annotators_per_sample : int, array-like, optional (default=1) array-like of shape (k,), k <= n_samples If `n_annotators_per_sample` is an int, the value indicates the number of annotators that are preferably assigned to a candidate sample, selected by the query_strategy. `Preferably` in this case means depending on how many annotators can be assigned to a given candidate sample and how many annotator-sample pairs should be assigned considering the `batch_size`. If `n_annotators_per_sample` is an int array, the values of the array are interpreted as follows. The value at the i-th index determines the preferred number of annotators for the candidate sample at the i-th index in the ranking of the batch. The ranking of the batch is given by the `strategy` (SingleAnnotatorPoolQueryStrategy). The last index of the n_annotators_per_sample array (k-1) indicates the preferred number of annotators for all candidate sample at an index greater of equal to k-1. Returns ------- query_indices : np.ndarray of shape (batchsize, 2) The query_indices indicate which candidate sample pairs are to be queried is, i. e. which candidate sample is to be annotated by which annotator, e.g., `query_indices[:, 0]` indicates the selected candidate samples and `query_indices[:, 1]` indicates the respectively selected annotators. utilities: np.ndarray of shape (batch_size, n_samples, n_annotators) or np.ndarray of shape (batch_size, n_candidates, n_annotators) The utilities of all candidate samples w.r.t. to the available annotators after each selected sample of the batch, e.g., `utilities[0, :, j]` indicates the utilities used for selecting the first sample-annotator-pair (with indices `query_indices[0]`). If `candidates` is None or of shape (n_candidates,), the indexing refers to samples in `X`. If `candidates` is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ ( X, y, candidates, annotators, batch_size, return_utilities, ) = super()._validate_data( X, y, candidates, annotators, batch_size, return_utilities, reset=True, ) X_cand, mapping, A_cand = self._transform_cand_annot( candidates, annotators, X, y ) random_state = self.random_state_ # check strategy check_type( self.strategy, "self.strategy", SingleAnnotatorPoolQueryStrategy ) # check query_params_dict if query_params_dict is None: query_params_dict = {} check_type(query_params_dict, "query_params_dict", dict) # aggregate y if self.y_aggregate is None: y_aggregate = lambda y: majority_vote(y, random_state=random_state) else: y_aggregate = self.y_aggregate if not callable(y_aggregate): raise TypeError( f"`self.y_aggregate` must be callable. " f"`self.y_aggregate` is of type {type(y_aggregate)}" ) # count the number of arguments that have no default value n_free_params = len( list( filter( lambda x: x.default == Parameter.empty, signature(y_aggregate).parameters.values(), ) ) ) if n_free_params != 1: raise TypeError( f"The number of free parameters of the callable has to " f"equal one. " f"The number of free parameters is {n_free_params}." ) y_sq = y_aggregate(y) n_candidates = X_cand.shape[0] n_annotators = A_cand.shape[1] n_samples = X.shape[0] batch_size_sq = min(batch_size, X_cand.shape[0]) # check n_annotators_per_sample and set pref_n_annotators if isinstance(n_annotators_per_sample, (int, np.int_)): check_scalar( n_annotators_per_sample, name="n_annotators_per_sample", target_type=int, min_val=1, ) pref_n_annotators = n_annotators_per_sample * np.ones( batch_size_sq ) elif _is_arraylike(n_annotators_per_sample): pref_n_annotators = check_array( n_annotators_per_sample, ensure_2d=False ) if pref_n_annotators.ndim != 1: raise ValueError( "n_annotators_per_sample, if an array, must be of dim " f"1 but, it is of dim {pref_n_annotators.ndim}" ) else: pref_length = pref_n_annotators.shape[0] if pref_length > batch_size_sq: pref_n_annotators = pref_n_annotators[:batch_size_sq] if pref_length < batch_size_sq: appended = pref_n_annotators[-1] * np.ones( batch_size_sq - pref_length ) pref_n_annotators = np.append(pref_n_annotators, appended) else: raise TypeError( "n_annotators_per_sample must be array like " "or an integer" ) # check A_perf and set annotator_utilities if A_perf is None: annotator_utilities = random_state.rand( 1, n_candidates, n_annotators ).repeat(batch_size_sq, axis=0) elif _is_arraylike(A_perf): A_perf = check_array(A_perf, ensure_2d=False) # ensure A_perf lies in [0, 1) if A_perf.min() != A_perf.max(): A_perf = ( 1 / (A_perf.max() - A_perf.min() + 1) * (A_perf - A_perf.min()) ) else: A_perf = np.zeros_like(A_perf, dtype=float) if A_perf.shape == (n_candidates, n_annotators): annotator_utilities = A_perf[np.newaxis, :, :].repeat( batch_size_sq, axis=0 ) elif A_perf.shape == (n_annotators,): annotator_utilities = ( A_perf[np.newaxis, np.newaxis, :] .repeat(n_candidates, axis=1) .repeat(batch_size_sq, axis=0) ) else: raise ValueError( f"`A_perf` is of shape {A_perf.shape}, but must be of " f"shape ({n_candidates}, {n_annotators}) or of shape " f"({n_annotators},)." ) else: raise TypeError( f"`A_perf` is of type {type(A_perf)}, but must be array like " f"or of type None." ) candidates_sq = mapping if mapping is not None else X_cand re_val = self.strategy.query( X=X, y=y_sq, candidates=candidates_sq, **query_params_dict, batch_size=batch_size_sq, return_utilities=True, ) single_query_indices, w_utilities = re_val if mapping is None: sample_utilities = w_utilities else: sample_utilities = w_utilities[:, mapping] re_val = self._query_annotators( A_cand, batch_size, sample_utilities, annotator_utilities, return_utilities, pref_n_annotators, ) if mapping is None: return re_val elif return_utilities: w_indices, w_utilities = re_val utilities = np.full((batch_size, n_samples, n_annotators), np.nan) utilities[:, mapping, :] = w_utilities indices = np.zeros_like(w_indices) indices[:, 0] = mapping[w_indices[:, 0]] indices[:, 1] = w_indices[:, 1] return indices, utilities else: w_indices = re_val indices = np.zeros_like(w_indices) indices[:, 0] = mapping[w_indices[:, 0]] indices[:, 1] = w_indices[:, 1] return indices def _query_annotators( self, A_cand, batch_size, sample_utilities, annotator_utilities, return_utilities, pref_n_annotators, ): random_state = check_random_state(self.random_state) n_annotators = A_cand.shape[1] n_samples = A_cand.shape[0] re_val = self._get_order_preserving_s_query( A_cand, sample_utilities, annotator_utilities ) s_indices, s_utilities = re_val n_as_annotators = self._n_to_assign_annotators( batch_size, A_cand, s_indices, pref_n_annotators ) utilities = np.zeros((batch_size, n_samples, n_annotators)) query_indices = np.zeros((batch_size, 2), dtype=int) batch_index = 0 # actual batch index annotator_ps = 0 # current annotators per sample sample_index = 0 # sample batch index while batch_index < batch_size: utilities[batch_index] = s_utilities[sample_index] query_indices[batch_index] = rand_argmax( utilities[batch_index], random_state=random_state ) s_utilities[ :, query_indices[batch_index, 0], query_indices[batch_index, 1] ] = np.nan batch_index += 1 annotator_ps += 1 if annotator_ps >= n_as_annotators[sample_index]: sample_index += 1 annotator_ps = 0 if return_utilities: return query_indices, utilities else: return query_indices @staticmethod def _get_order_preserving_s_query( A, candidate_utilities, annotator_utilities ): nan_indices = np.argwhere(np.isnan(candidate_utilities)) candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = -np.inf # prepare candidate_utilities candidate_utilities = rankdata( candidate_utilities, method="ordinal", axis=1 ).astype(float) # calculate indices of maximum sample indices = np.argmax(candidate_utilities, axis=1) candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = np.nan annotator_utilities[:, A == 0] = np.nan # combine utilities by addition utilities = candidate_utilities[:, :, np.newaxis] + annotator_utilities return indices, utilities @staticmethod def _n_to_assign_annotators(batch_size, A, s_indices, pref_n_annotators): n_max_annotators = np.sum(A, axis=1) n_max_chosen_annotators = n_max_annotators[s_indices] annot_per_sample = np.minimum( n_max_chosen_annotators, pref_n_annotators ) n_annotator_sample_pairs = np.sum(annot_per_sample) while n_annotator_sample_pairs < batch_size: annot_per_sample = np.minimum( n_max_chosen_annotators, annot_per_sample + 1 ) n_annotator_sample_pairs = np.sum(annot_per_sample) return annot_per_sample
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/pool/multiannotator/_wrapper.py
_wrapper.py
from inspect import signature, Parameter import numpy as np from scipy.stats import rankdata from sklearn.utils.validation import check_array, _is_arraylike from ...base import ( MultiAnnotatorPoolQueryStrategy, SingleAnnotatorPoolQueryStrategy, ) from ...utils import ( rand_argmax, check_type, MISSING_LABEL, majority_vote, check_random_state, check_scalar, ) class SingleAnnotatorWrapper(MultiAnnotatorPoolQueryStrategy): """SingleAnnotatorWrapper Implementation of a wrapper class for pool-based active learning query strategies with a single annotator such that it transforms the query strategy for the single annotator into a query strategy for multiple annotators by choosing an annotator randomly or according to the parameter `A_pef` and setting the labeled matrix to a labeled vector by an aggregation function, e.g., majority voting. Parameters ---------- strategy : SingleAnnotatorPoolQueryStrategy An active learning strategy for a single annotator. y_aggregate : callable, optional (default=None) `y_aggregate` is used to transform `y` as a matrix of shape (n_samples, n_annotators) into a vector of shape (n_samples) during the querying process and is then passed to the given `strategy`. If `y_aggregate is None` and `y` is used in the strategy, majority_vote is used as `y_aggregate`. missing_label : scalar or string or np.nan or None, optional (default=np.nan) Value to represent a missing label. random_state : int or RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__( self, strategy, y_aggregate=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.strategy = strategy self.y_aggregate = y_aggregate def query( self, X, y, candidates=None, annotators=None, batch_size=1, query_params_dict=None, n_annotators_per_sample=1, A_perf=None, return_utilities=False, ): """Determines which candidate sample is to be annotated by which annotator. The samples are first and primarily ranked by the given strategy as if one unspecified annotator where to annotate the sample. Then for each sample the sample-annotator pairs are ranked based either on previously set preferences or at random. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e., including the labeled and unlabeled samples. y : array-like of shape (n_samples, n_annotators) Labels of the training data set for each annotator (possibly including unlabeled ones indicated by self.MISSING_LABEL), meaning that `y[i, j]` contains the label annotated by annotator `i` for sample `j`. candidates : None or array-like of shape (n_candidates), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the samples from (X,y), for which an annotator exists such that the annotator sample pairs is unlabeled are considered as sample candidates. If `candidates` is of shape (n_candidates) and of type int, candidates is considered as the indices of the sample candidates in (X,y). If `candidates` is of shape (n_candidates, n_features), the sample candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. annotators : array-like of shape (n_candidates, n_annotators), optional (default=None) If `annotators` is None, all annotators are considered as available annotators. If `annotators` is of shape (n_avl_annotators) and of type int, `annotators` is considered as the indices of the available annotators. If candidate samples and available annotators are specified: The annotator sample pairs, for which the sample is a candidate sample and the annotator is an available annotator are considered as candidate annotator-sample-pairs. If `annotators` is None and `candidates` is of shape (n_candidates), all annotator sample pairs, for which the sample is indexed by `candidates` are considered as candidate annotator-sample-pairs. If `annotators` is a boolean array of shape (n_candidates, n_avl_annotators) the annotator sample pairs, for which the sample is a candidate sample and the boolean matrix has entry `True` are considered as candidate sample pairs. batch_size : int, optional (default=1) The number of annotators sample pairs to be selected in one AL cycle. query_params_dict : dict, optional (default=None) Dictionary for the parameters of the query method besides `X` and the transformed `y`. A_perf : array-like, shape (n_samples, n_annotators) or (n_annotators,) optional (default=None) The performance based ranking of each annotator. 1.) If `A_perf` is of shape (n_samples, n_annotators) for each sample `i` the value-annotators pair `(i, j)` is chosen over the pair `(i, k)` if `A_perf[i, j]` is greater or equal to `A_perf[i, k]`. 2.) If `A_perf` is of shape (n_annotators,) for each sample `i` the value-annotators pair `(i, j)` is chosen over the pair `(i, k)` if `A_perf[j]` is greater or equal to `A_perf[k]`. 3.) If `A_perf` is None, the annotators are chosen at random, with a different distribution for each sample. return_utilities : bool, optional (default=False) If true, also returns the utilities based on the query strategy. n_annotators_per_sample : int, array-like, optional (default=1) array-like of shape (k,), k <= n_samples If `n_annotators_per_sample` is an int, the value indicates the number of annotators that are preferably assigned to a candidate sample, selected by the query_strategy. `Preferably` in this case means depending on how many annotators can be assigned to a given candidate sample and how many annotator-sample pairs should be assigned considering the `batch_size`. If `n_annotators_per_sample` is an int array, the values of the array are interpreted as follows. The value at the i-th index determines the preferred number of annotators for the candidate sample at the i-th index in the ranking of the batch. The ranking of the batch is given by the `strategy` (SingleAnnotatorPoolQueryStrategy). The last index of the n_annotators_per_sample array (k-1) indicates the preferred number of annotators for all candidate sample at an index greater of equal to k-1. Returns ------- query_indices : np.ndarray of shape (batchsize, 2) The query_indices indicate which candidate sample pairs are to be queried is, i. e. which candidate sample is to be annotated by which annotator, e.g., `query_indices[:, 0]` indicates the selected candidate samples and `query_indices[:, 1]` indicates the respectively selected annotators. utilities: np.ndarray of shape (batch_size, n_samples, n_annotators) or np.ndarray of shape (batch_size, n_candidates, n_annotators) The utilities of all candidate samples w.r.t. to the available annotators after each selected sample of the batch, e.g., `utilities[0, :, j]` indicates the utilities used for selecting the first sample-annotator-pair (with indices `query_indices[0]`). If `candidates` is None or of shape (n_candidates,), the indexing refers to samples in `X`. If `candidates` is of shape (n_candidates, n_features), the indexing refers to samples in candidates. """ ( X, y, candidates, annotators, batch_size, return_utilities, ) = super()._validate_data( X, y, candidates, annotators, batch_size, return_utilities, reset=True, ) X_cand, mapping, A_cand = self._transform_cand_annot( candidates, annotators, X, y ) random_state = self.random_state_ # check strategy check_type( self.strategy, "self.strategy", SingleAnnotatorPoolQueryStrategy ) # check query_params_dict if query_params_dict is None: query_params_dict = {} check_type(query_params_dict, "query_params_dict", dict) # aggregate y if self.y_aggregate is None: y_aggregate = lambda y: majority_vote(y, random_state=random_state) else: y_aggregate = self.y_aggregate if not callable(y_aggregate): raise TypeError( f"`self.y_aggregate` must be callable. " f"`self.y_aggregate` is of type {type(y_aggregate)}" ) # count the number of arguments that have no default value n_free_params = len( list( filter( lambda x: x.default == Parameter.empty, signature(y_aggregate).parameters.values(), ) ) ) if n_free_params != 1: raise TypeError( f"The number of free parameters of the callable has to " f"equal one. " f"The number of free parameters is {n_free_params}." ) y_sq = y_aggregate(y) n_candidates = X_cand.shape[0] n_annotators = A_cand.shape[1] n_samples = X.shape[0] batch_size_sq = min(batch_size, X_cand.shape[0]) # check n_annotators_per_sample and set pref_n_annotators if isinstance(n_annotators_per_sample, (int, np.int_)): check_scalar( n_annotators_per_sample, name="n_annotators_per_sample", target_type=int, min_val=1, ) pref_n_annotators = n_annotators_per_sample * np.ones( batch_size_sq ) elif _is_arraylike(n_annotators_per_sample): pref_n_annotators = check_array( n_annotators_per_sample, ensure_2d=False ) if pref_n_annotators.ndim != 1: raise ValueError( "n_annotators_per_sample, if an array, must be of dim " f"1 but, it is of dim {pref_n_annotators.ndim}" ) else: pref_length = pref_n_annotators.shape[0] if pref_length > batch_size_sq: pref_n_annotators = pref_n_annotators[:batch_size_sq] if pref_length < batch_size_sq: appended = pref_n_annotators[-1] * np.ones( batch_size_sq - pref_length ) pref_n_annotators = np.append(pref_n_annotators, appended) else: raise TypeError( "n_annotators_per_sample must be array like " "or an integer" ) # check A_perf and set annotator_utilities if A_perf is None: annotator_utilities = random_state.rand( 1, n_candidates, n_annotators ).repeat(batch_size_sq, axis=0) elif _is_arraylike(A_perf): A_perf = check_array(A_perf, ensure_2d=False) # ensure A_perf lies in [0, 1) if A_perf.min() != A_perf.max(): A_perf = ( 1 / (A_perf.max() - A_perf.min() + 1) * (A_perf - A_perf.min()) ) else: A_perf = np.zeros_like(A_perf, dtype=float) if A_perf.shape == (n_candidates, n_annotators): annotator_utilities = A_perf[np.newaxis, :, :].repeat( batch_size_sq, axis=0 ) elif A_perf.shape == (n_annotators,): annotator_utilities = ( A_perf[np.newaxis, np.newaxis, :] .repeat(n_candidates, axis=1) .repeat(batch_size_sq, axis=0) ) else: raise ValueError( f"`A_perf` is of shape {A_perf.shape}, but must be of " f"shape ({n_candidates}, {n_annotators}) or of shape " f"({n_annotators},)." ) else: raise TypeError( f"`A_perf` is of type {type(A_perf)}, but must be array like " f"or of type None." ) candidates_sq = mapping if mapping is not None else X_cand re_val = self.strategy.query( X=X, y=y_sq, candidates=candidates_sq, **query_params_dict, batch_size=batch_size_sq, return_utilities=True, ) single_query_indices, w_utilities = re_val if mapping is None: sample_utilities = w_utilities else: sample_utilities = w_utilities[:, mapping] re_val = self._query_annotators( A_cand, batch_size, sample_utilities, annotator_utilities, return_utilities, pref_n_annotators, ) if mapping is None: return re_val elif return_utilities: w_indices, w_utilities = re_val utilities = np.full((batch_size, n_samples, n_annotators), np.nan) utilities[:, mapping, :] = w_utilities indices = np.zeros_like(w_indices) indices[:, 0] = mapping[w_indices[:, 0]] indices[:, 1] = w_indices[:, 1] return indices, utilities else: w_indices = re_val indices = np.zeros_like(w_indices) indices[:, 0] = mapping[w_indices[:, 0]] indices[:, 1] = w_indices[:, 1] return indices def _query_annotators( self, A_cand, batch_size, sample_utilities, annotator_utilities, return_utilities, pref_n_annotators, ): random_state = check_random_state(self.random_state) n_annotators = A_cand.shape[1] n_samples = A_cand.shape[0] re_val = self._get_order_preserving_s_query( A_cand, sample_utilities, annotator_utilities ) s_indices, s_utilities = re_val n_as_annotators = self._n_to_assign_annotators( batch_size, A_cand, s_indices, pref_n_annotators ) utilities = np.zeros((batch_size, n_samples, n_annotators)) query_indices = np.zeros((batch_size, 2), dtype=int) batch_index = 0 # actual batch index annotator_ps = 0 # current annotators per sample sample_index = 0 # sample batch index while batch_index < batch_size: utilities[batch_index] = s_utilities[sample_index] query_indices[batch_index] = rand_argmax( utilities[batch_index], random_state=random_state ) s_utilities[ :, query_indices[batch_index, 0], query_indices[batch_index, 1] ] = np.nan batch_index += 1 annotator_ps += 1 if annotator_ps >= n_as_annotators[sample_index]: sample_index += 1 annotator_ps = 0 if return_utilities: return query_indices, utilities else: return query_indices @staticmethod def _get_order_preserving_s_query( A, candidate_utilities, annotator_utilities ): nan_indices = np.argwhere(np.isnan(candidate_utilities)) candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = -np.inf # prepare candidate_utilities candidate_utilities = rankdata( candidate_utilities, method="ordinal", axis=1 ).astype(float) # calculate indices of maximum sample indices = np.argmax(candidate_utilities, axis=1) candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = np.nan annotator_utilities[:, A == 0] = np.nan # combine utilities by addition utilities = candidate_utilities[:, :, np.newaxis] + annotator_utilities return indices, utilities @staticmethod def _n_to_assign_annotators(batch_size, A, s_indices, pref_n_annotators): n_max_annotators = np.sum(A, axis=1) n_max_chosen_annotators = n_max_annotators[s_indices] annot_per_sample = np.minimum( n_max_chosen_annotators, pref_n_annotators ) n_annotator_sample_pairs = np.sum(annot_per_sample) while n_annotator_sample_pairs < batch_size: annot_per_sample = np.minimum( n_max_chosen_annotators, annot_per_sample + 1 ) n_annotator_sample_pairs = np.sum(annot_per_sample) return annot_per_sample
0.943925
0.632162
import numpy as np from ..base import SingleAnnotatorStreamQueryStrategy from ..utils import check_scalar class StreamRandomSampling(SingleAnnotatorStreamQueryStrategy): """Random Sampling for Datastreams. The RandomSampling samples instances completely randomly. The probability to sample an instance is dependent on the budget specified in the budget manager. Given a budget of 10%, the utility exceeds 0.9 (1-0.1) with a probability of 10%. Instances are queried regardless of their position in the feature space. As this query strategy disregards any information about the instance. Thus, it should only be used as a baseline strategy. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. allow_exceeding_budget : bool, optional (default=True) If True, the query strategy is allowed to exceed it's budget as long as the average number of queries will be within the budget. If False, queries are not allowed if the budget is exhausted. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__( self, budget=None, allow_exceeding_budget=True, random_state=None ): super().__init__(budget=budget, random_state=random_state) self.allow_exceeding_budget = allow_exceeding_budget def query(self, candidates, return_utilities=False): """Ask the query strategy which instances in candidates to acquire. Please note that, when the decisions from this function may differ from the final sampling, simulate=True can set, so that the query strategy can be updated later with update(...) with the final sampling. This is especially helpful, when developing wrapper query strategies. Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ candidates, return_utilities = self._validate_data( candidates, return_utilities ) # copy random state in case of simulating the query prior_random_state = self.random_state_.get_state() utilities = self.random_state_.random_sample(len(candidates)) self.random_state_.set_state(prior_random_state) # keep record if the instance is queried and if there was budget left, # when assessing the corresponding utilities queried = np.full(len(utilities), False) # keep the internal state to reset it later if simulate is true tmp_observed_instances = self.observed_instances_ tmp_queried_instances = self.queried_instances_ # check for each sample separately if budget is left and the utility is # high enough for i, utility in enumerate(utilities): tmp_observed_instances += 1 available_budget = ( tmp_observed_instances * self.budget_ - tmp_queried_instances ) queried[i] = ( self.allow_exceeding_budget or available_budget > 1 ) and (utility >= 1 - self.budget_) tmp_queried_instances += queried[i] # get the indices instances that should be queried queried_indices = np.where(queried)[0] # queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : StreamRandomSampling The RandomSampling returns itself, after it is updated. """ # check if a random state is set self._validate_data([[0]], False) # update observed instances and queried instances queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += candidates.shape[0] self.queried_instances_ += np.sum(queried) # update the random state assuming, that query(..., simulate=True) was # used self.random_state_.random_sample(len(candidates)) return self def _validate_data( self, candidates, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray of shape (n_candidates, n_features) Checked candidate samples. return_utilities : bool, Checked boolean value of `return_utilities`. """ # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 check_scalar( self.allow_exceeding_budget, "allow_exceeding_budget", bool ) candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() return candidates, return_utilities class PeriodicSampling(SingleAnnotatorStreamQueryStrategy): """The PeriodicSampling samples instances periodically. The length of that period is determined by the budget specified in the budgetmanager. For instance, a budget of 25% would result in the PeriodicSampling sampling every fourth instance. The main idea behind this query strategy is to exhaust a given budget as soon it is available. Instances are queried regardless of their position in the feature space. As this query strategy disregards any information about the instance. Thus, it should only be used as a baseline strategy. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__(self, budget=None, random_state=None): super().__init__(budget=budget, random_state=random_state) def query(self, candidates, return_utilities=False): """Ask the query strategy which instances in candidates to acquire. This query strategy only evaluates the time each instance arrives at. The utilities returned, when return_utilities is set to True, are either 0 (the instance is not queried) or 1 (the instance is queried). Please note that, when the decisions from this function may differ from the final sampling, simulate=True can set, so that the query strategy can be updated later with update(...) with the final sampling. This is especially helpful, when developing wrapper query strategies. Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ candidates, return_utilities = self._validate_data( candidates, return_utilities ) utilities = np.zeros(candidates.shape[0]) # keep record if the instance is queried and if there was budget left, # when assessing the corresponding utilities queried = np.full(len(candidates), False) tmp_observed_instances = self.observed_instances_ tmp_queried_instances = self.queried_instances_ for i, x in enumerate(candidates): tmp_observed_instances += 1 remaining_budget = ( tmp_observed_instances * self.budget_ - tmp_queried_instances ) queried[i] = remaining_budget >= 1 if queried[i]: utilities[i] = 1 tmp_queried_instances += queried[i] # get the indices instances that should be queried queried_indices = np.where(queried)[0] # queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : PeriodicSampling The PeriodicSampler returns itself, after it is updated. """ # check if a budgetmanager is set self._validate_data(np.array([[0]]), False) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += len(queried) self.queried_instances_ += np.sum(queried) return self def _validate_data( self, candidates, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray of shape (n_candidates, n_features) Checked candidate samples. batch_size : int Checked number of samples to be selected in one AL cycle. return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 return candidates, return_utilities
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/_stream_baselines.py
_stream_baselines.py
import numpy as np from ..base import SingleAnnotatorStreamQueryStrategy from ..utils import check_scalar class StreamRandomSampling(SingleAnnotatorStreamQueryStrategy): """Random Sampling for Datastreams. The RandomSampling samples instances completely randomly. The probability to sample an instance is dependent on the budget specified in the budget manager. Given a budget of 10%, the utility exceeds 0.9 (1-0.1) with a probability of 10%. Instances are queried regardless of their position in the feature space. As this query strategy disregards any information about the instance. Thus, it should only be used as a baseline strategy. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. allow_exceeding_budget : bool, optional (default=True) If True, the query strategy is allowed to exceed it's budget as long as the average number of queries will be within the budget. If False, queries are not allowed if the budget is exhausted. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__( self, budget=None, allow_exceeding_budget=True, random_state=None ): super().__init__(budget=budget, random_state=random_state) self.allow_exceeding_budget = allow_exceeding_budget def query(self, candidates, return_utilities=False): """Ask the query strategy which instances in candidates to acquire. Please note that, when the decisions from this function may differ from the final sampling, simulate=True can set, so that the query strategy can be updated later with update(...) with the final sampling. This is especially helpful, when developing wrapper query strategies. Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ candidates, return_utilities = self._validate_data( candidates, return_utilities ) # copy random state in case of simulating the query prior_random_state = self.random_state_.get_state() utilities = self.random_state_.random_sample(len(candidates)) self.random_state_.set_state(prior_random_state) # keep record if the instance is queried and if there was budget left, # when assessing the corresponding utilities queried = np.full(len(utilities), False) # keep the internal state to reset it later if simulate is true tmp_observed_instances = self.observed_instances_ tmp_queried_instances = self.queried_instances_ # check for each sample separately if budget is left and the utility is # high enough for i, utility in enumerate(utilities): tmp_observed_instances += 1 available_budget = ( tmp_observed_instances * self.budget_ - tmp_queried_instances ) queried[i] = ( self.allow_exceeding_budget or available_budget > 1 ) and (utility >= 1 - self.budget_) tmp_queried_instances += queried[i] # get the indices instances that should be queried queried_indices = np.where(queried)[0] # queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : StreamRandomSampling The RandomSampling returns itself, after it is updated. """ # check if a random state is set self._validate_data([[0]], False) # update observed instances and queried instances queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += candidates.shape[0] self.queried_instances_ += np.sum(queried) # update the random state assuming, that query(..., simulate=True) was # used self.random_state_.random_sample(len(candidates)) return self def _validate_data( self, candidates, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray of shape (n_candidates, n_features) Checked candidate samples. return_utilities : bool, Checked boolean value of `return_utilities`. """ # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 check_scalar( self.allow_exceeding_budget, "allow_exceeding_budget", bool ) candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() return candidates, return_utilities class PeriodicSampling(SingleAnnotatorStreamQueryStrategy): """The PeriodicSampling samples instances periodically. The length of that period is determined by the budget specified in the budgetmanager. For instance, a budget of 25% would result in the PeriodicSampling sampling every fourth instance. The main idea behind this query strategy is to exhaust a given budget as soon it is available. Instances are queried regardless of their position in the feature space. As this query strategy disregards any information about the instance. Thus, it should only be used as a baseline strategy. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. """ def __init__(self, budget=None, random_state=None): super().__init__(budget=budget, random_state=random_state) def query(self, candidates, return_utilities=False): """Ask the query strategy which instances in candidates to acquire. This query strategy only evaluates the time each instance arrives at. The utilities returned, when return_utilities is set to True, are either 0 (the instance is not queried) or 1 (the instance is queried). Please note that, when the decisions from this function may differ from the final sampling, simulate=True can set, so that the query strategy can be updated later with update(...) with the final sampling. This is especially helpful, when developing wrapper query strategies. Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ candidates, return_utilities = self._validate_data( candidates, return_utilities ) utilities = np.zeros(candidates.shape[0]) # keep record if the instance is queried and if there was budget left, # when assessing the corresponding utilities queried = np.full(len(candidates), False) tmp_observed_instances = self.observed_instances_ tmp_queried_instances = self.queried_instances_ for i, x in enumerate(candidates): tmp_observed_instances += 1 remaining_budget = ( tmp_observed_instances * self.budget_ - tmp_queried_instances ) queried[i] = remaining_budget >= 1 if queried[i]: utilities[i] = 1 tmp_queried_instances += queried[i] # get the indices instances that should be queried queried_indices = np.where(queried)[0] # queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : array-like or sparse matrix of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : PeriodicSampling The PeriodicSampler returns itself, after it is updated. """ # check if a budgetmanager is set self._validate_data(np.array([[0]]), False) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += len(queried) self.queried_instances_ += np.sum(queried) return self def _validate_data( self, candidates, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray of shape (n_candidates, n_features) Checked candidate samples. batch_size : int Checked number of samples to be selected in one AL cycle. return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 return candidates, return_utilities
0.887887
0.620305
import numpy as np from sklearn import clone from sklearn.utils import check_array, check_consistent_length from ..classifier import ParzenWindowClassifier from .budgetmanager import BalancedIncrementalQuantileFilter from ..base import ( SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, BudgetManager, ) from ..pool import cost_reduction from ..utils import ( check_type, check_random_state, check_scalar, call_func, check_budget_manager, ) class StreamProbabilisticAL(SingleAnnotatorStreamQueryStrategy): """StreamProbabilisticAL Probabilistic Active Learning in Datastreams (StreamProbabilisticAL) is an extension to Multi-Class Probabilistic Active Learning (McPAL) (see pool.ProbabilisticAL). It assesses McPAL spatial to assess the spatial utility. The Balanced Incremental Quantile Filter (BalancedIncrementalQuantileFilter), that is implemented within the default budget manager, is used to evaluate the temporal utility (see stream.budgetmanager.BalancedIncrementalQuantileFilter). Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, BalancedIncrementalQuantileFilter will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. metric : str or callable, optional (default=None) The metric must a be None or a valid kernel as defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. The kernel is used to calculate the frequency of labels near the candidates and multiplied with the probabilities returned by the `clf` to get a kernel frequency estimate for each class. If metric is set to None, the `predict_freq` function of the `clf` will be used instead. If this is not defined, an Exception is raised. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. If metric_dict is None and metric is 'rbf' metric_dict is set to {'gamma': 'mean'}. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the query strategy. prior : float, optional (default=1.0e-3) The prior value that is passed onto ProbabilisticAL (see pool.ProbabilisticAL). m_max : float, optional (default=2) The m_max value that is passed onto ProbabilisticAL (see pool.ProbabilisticAL). References ---------- [1] Kottke, M. (2015). Probabilistic Active Learning in Datastreams. In Advances in Intelligent Data Analysis XIV (pp. 145–157). Springer. """ def __init__( self, budget_manager=None, budget=None, metric=None, metric_dict=None, random_state=None, prior=1.0e-3, m_max=2, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.prior = prior self.m_max = m_max self.metric = metric self.metric_dict = metric_dict def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, utility_weight=None, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. If `self.metric` is None, the `clf` must also implement `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional (default=None) Sample weights for X, used to fit the clf. fit_clf : bool,optional (default=False) If True, refit the classifier also requires X and y to be given. utility_weight : array-like of shape (n_candidate_samples), optional (default=None) Densities for each sample in `candidates`. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, ) = self._validate_data( candidates=candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, utility_weight=utility_weight, return_utilities=return_utilities, ) if self.metric is not None: if self.metric_dict is None and self.metric == "rbf": self.metric_dict = {"gamma": "mean"} pwc = ParzenWindowClassifier( metric=self.metric, metric_dict=self.metric_dict, missing_label=clf.missing_label, classes=clf.classes, ) pwc.fit(X=X, y=y, sample_weight=sample_weight) n = pwc.predict_freq(candidates).sum(axis=1, keepdims=True) pred_proba = clf.predict_proba(candidates) k_vec = n * pred_proba else: k_vec = clf.predict_freq(candidates) utilities = cost_reduction(k_vec, prior=self.prior, m_max=self.m_max) utilities *= utility_weight queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budgetmanager. Returns ------- self : StreamProbabilisticAL PALS returns itself, after it is updated. """ # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, BalancedIncrementalQuantileFilter, ) budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) call_func( self.budget_manager_.update, candidates=candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like, shape (n_candidates, n_features) Candidate samples. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. If `self.metric` is None, the `clf` must also implement `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. utility_weight: array-like of shape (n_candidate_samples) Densities for each sample in `candidates`. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` utility_weight: array-like of shape (n_candidate_samples) Checked densities for each sample in `candidates`. candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, BalancedIncrementalQuantileFilter, ) X, y, sample_weight = self._validate_X_y_sample_weight( X, y, sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) utility_weight = self._validate_utility_weight( utility_weight, candidates ) if self.metric is None and not hasattr(clf, "predict_freq"): raise TypeError( "clf has no predict_freq and metric was set to None" ) check_scalar( self.prior, "prior", float, min_val=0, min_inclusive=False ) check_scalar(self.m_max, "m_max", int, min_val=0, min_inclusive=False) self._validate_random_state() return ( candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, ) def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_utility_weight(self, utility_weight, candidates): """Validate if utility_weight is numeric and of equal length as candidates. Parameters ---------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples utility_weight: array-like of shape (n_candidate_samples) Densities for each sample in `candidates`. Returns ------- utility_weight : array-like of shape (n_candidate_samples) Checked densities for each sample in `candidates`. """ if utility_weight is None: utility_weight = np.ones(len(candidates)) utility_weight = check_array(utility_weight, ensure_2d=False) check_consistent_length(utility_weight, candidates) return utility_weight def _validate_random_state(self): """Creates a copy 'random_state_' if random_state is an instance of np.random_state. If not create a new random state. See also :func:`~sklearn.utils.check_random_state` """ if not hasattr(self, "random_state_"): self.random_state_ = self.random_state self.random_state_ = check_random_state(self.random_state_)
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/_stream_probabilistic_al.py
_stream_probabilistic_al.py
import numpy as np from sklearn import clone from sklearn.utils import check_array, check_consistent_length from ..classifier import ParzenWindowClassifier from .budgetmanager import BalancedIncrementalQuantileFilter from ..base import ( SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, BudgetManager, ) from ..pool import cost_reduction from ..utils import ( check_type, check_random_state, check_scalar, call_func, check_budget_manager, ) class StreamProbabilisticAL(SingleAnnotatorStreamQueryStrategy): """StreamProbabilisticAL Probabilistic Active Learning in Datastreams (StreamProbabilisticAL) is an extension to Multi-Class Probabilistic Active Learning (McPAL) (see pool.ProbabilisticAL). It assesses McPAL spatial to assess the spatial utility. The Balanced Incremental Quantile Filter (BalancedIncrementalQuantileFilter), that is implemented within the default budget manager, is used to evaluate the temporal utility (see stream.budgetmanager.BalancedIncrementalQuantileFilter). Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, BalancedIncrementalQuantileFilter will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. metric : str or callable, optional (default=None) The metric must a be None or a valid kernel as defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. The kernel is used to calculate the frequency of labels near the candidates and multiplied with the probabilities returned by the `clf` to get a kernel frequency estimate for each class. If metric is set to None, the `predict_freq` function of the `clf` will be used instead. If this is not defined, an Exception is raised. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. If metric_dict is None and metric is 'rbf' metric_dict is set to {'gamma': 'mean'}. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the query strategy. prior : float, optional (default=1.0e-3) The prior value that is passed onto ProbabilisticAL (see pool.ProbabilisticAL). m_max : float, optional (default=2) The m_max value that is passed onto ProbabilisticAL (see pool.ProbabilisticAL). References ---------- [1] Kottke, M. (2015). Probabilistic Active Learning in Datastreams. In Advances in Intelligent Data Analysis XIV (pp. 145–157). Springer. """ def __init__( self, budget_manager=None, budget=None, metric=None, metric_dict=None, random_state=None, prior=1.0e-3, m_max=2, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.prior = prior self.m_max = m_max self.metric = metric self.metric_dict = metric_dict def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, utility_weight=None, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. If `self.metric` is None, the `clf` must also implement `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional (default=None) Sample weights for X, used to fit the clf. fit_clf : bool,optional (default=False) If True, refit the classifier also requires X and y to be given. utility_weight : array-like of shape (n_candidate_samples), optional (default=None) Densities for each sample in `candidates`. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, ) = self._validate_data( candidates=candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, utility_weight=utility_weight, return_utilities=return_utilities, ) if self.metric is not None: if self.metric_dict is None and self.metric == "rbf": self.metric_dict = {"gamma": "mean"} pwc = ParzenWindowClassifier( metric=self.metric, metric_dict=self.metric_dict, missing_label=clf.missing_label, classes=clf.classes, ) pwc.fit(X=X, y=y, sample_weight=sample_weight) n = pwc.predict_freq(candidates).sum(axis=1, keepdims=True) pred_proba = clf.predict_proba(candidates) k_vec = n * pred_proba else: k_vec = clf.predict_freq(candidates) utilities = cost_reduction(k_vec, prior=self.prior, m_max=self.m_max) utilities *= utility_weight queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budgetmanager. Returns ------- self : StreamProbabilisticAL PALS returns itself, after it is updated. """ # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, BalancedIncrementalQuantileFilter, ) budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) call_func( self.budget_manager_.update, candidates=candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like, shape (n_candidates, n_features) Candidate samples. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_proba`. If `self.metric` is None, the `clf` must also implement `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. utility_weight: array-like of shape (n_candidate_samples) Densities for each sample in `candidates`. return_utilities : bool, If true, also return the utilities based on the query strategy. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` utility_weight: array-like of shape (n_candidate_samples) Checked densities for each sample in `candidates`. candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, BalancedIncrementalQuantileFilter, ) X, y, sample_weight = self._validate_X_y_sample_weight( X, y, sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) utility_weight = self._validate_utility_weight( utility_weight, candidates ) if self.metric is None and not hasattr(clf, "predict_freq"): raise TypeError( "clf has no predict_freq and metric was set to None" ) check_scalar( self.prior, "prior", float, min_val=0, min_inclusive=False ) check_scalar(self.m_max, "m_max", int, min_val=0, min_inclusive=False) self._validate_random_state() return ( candidates, clf, X, y, sample_weight, fit_clf, utility_weight, return_utilities, ) def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_utility_weight(self, utility_weight, candidates): """Validate if utility_weight is numeric and of equal length as candidates. Parameters ---------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples utility_weight: array-like of shape (n_candidate_samples) Densities for each sample in `candidates`. Returns ------- utility_weight : array-like of shape (n_candidate_samples) Checked densities for each sample in `candidates`. """ if utility_weight is None: utility_weight = np.ones(len(candidates)) utility_weight = check_array(utility_weight, ensure_2d=False) check_consistent_length(utility_weight, candidates) return utility_weight def _validate_random_state(self): """Creates a copy 'random_state_' if random_state is an instance of np.random_state. If not create a new random state. See also :func:`~sklearn.utils.check_random_state` """ if not hasattr(self, "random_state_"): self.random_state_ = self.random_state self.random_state_ = check_random_state(self.random_state_)
0.912858
0.513912
import numpy as np from sklearn.base import clone from sklearn.utils import check_array, check_consistent_length from .budgetmanager import ( FixedUncertaintyBudgetManager, VariableUncertaintyBudgetManager, SplitBudgetManager, RandomVariableUncertaintyBudgetManager, ) from ..base import ( BudgetManager, SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, ) from ..utils import ( check_type, call_func, check_budget_manager, ) class UncertaintyZliobaite(SingleAnnotatorStreamQueryStrategy): """UncertaintyZliobaite The UncertaintyZliobaite class provides the base for query strategies proposed by Žliobaitė et al. in [1]. The strategies evaluate the classifier's uncertainty based on its predictions and instances' labels are queried when the uncertainty exceeds a specific threshold. Žliobaitė et al. propose various techniques to calculate such a threshold. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def __init__( self, budget_manager=None, budget=None, random_state=None, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional (default=None) Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) predict_proba = clf.predict_proba(candidates) confidence = np.max(predict_proba, axis=1) utilities = 1 - confidence queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget manager. Returns ------- self : UncertaintyZliobaite The UncertaintyZliobaite returns itself, after it is updated. """ # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) call_func( self.budget_manager_.update, candidates=candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight class FixedUncertainty(UncertaintyZliobaite): """FixedUncertainty The FixedUncertainty (Fixed-Uncertainty in [1]) query strategy samples instances based on the classifiers uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a threshold calculated based on the budget and the number of classes. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budget manager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return FixedUncertaintyBudgetManager class VariableUncertainty(UncertaintyZliobaite): """VariableUncertainty The VariableUncertainty (Var-Uncertainty in [1]) query strategy samples instances based on the classifiers uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, VariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budgetmanager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return VariableUncertaintyBudgetManager class RandomVariableUncertainty(UncertaintyZliobaite): """RandomVariableUncertainty The RandomVariableUncertainty (Ran-Var-Uncertainty in [1]) query strategy samples instances based on the classifier's uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. To better adapt at change detection the threshold is multiplied by a random number generator with N(1,delta). Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomVariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budgetmanager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager class Split(UncertaintyZliobaite): """Split The Split [1] query strategy samples in 100*v% of instances randomly and in 100*(1-v)% of cases according to VariableUncertainty. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, SplitBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): return SplitBudgetManager
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/_uncertainty_zliobaite.py
_uncertainty_zliobaite.py
import numpy as np from sklearn.base import clone from sklearn.utils import check_array, check_consistent_length from .budgetmanager import ( FixedUncertaintyBudgetManager, VariableUncertaintyBudgetManager, SplitBudgetManager, RandomVariableUncertaintyBudgetManager, ) from ..base import ( BudgetManager, SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, ) from ..utils import ( check_type, call_func, check_budget_manager, ) class UncertaintyZliobaite(SingleAnnotatorStreamQueryStrategy): """UncertaintyZliobaite The UncertaintyZliobaite class provides the base for query strategies proposed by Žliobaitė et al. in [1]. The strategies evaluate the classifier's uncertainty based on its predictions and instances' labels are queried when the uncertainty exceeds a specific threshold. Žliobaitė et al. propose various techniques to calculate such a threshold. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def __init__( self, budget_manager=None, budget=None, random_state=None, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional (default=None) Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) predict_proba = clf.predict_proba(candidates) confidence = np.max(predict_proba, axis=1) utilities = 1 - confidence queried_indices = self.budget_manager_.query_by_utility(utilities) if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget manager. Returns ------- self : UncertaintyZliobaite The UncertaintyZliobaite returns itself, after it is updated. """ # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) call_func( self.budget_manager_.update, candidates=candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check if a budgetmanager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight class FixedUncertainty(UncertaintyZliobaite): """FixedUncertainty The FixedUncertainty (Fixed-Uncertainty in [1]) query strategy samples instances based on the classifiers uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a threshold calculated based on the budget and the number of classes. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budget manager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return FixedUncertaintyBudgetManager class VariableUncertainty(UncertaintyZliobaite): """VariableUncertainty The VariableUncertainty (Var-Uncertainty in [1]) query strategy samples instances based on the classifiers uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, VariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budgetmanager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return VariableUncertaintyBudgetManager class RandomVariableUncertainty(UncertaintyZliobaite): """RandomVariableUncertainty The RandomVariableUncertainty (Ran-Var-Uncertainty in [1]) query strategy samples instances based on the classifier's uncertainty assessed based on the classifier's predictions. The instance is queried when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. To better adapt at change detection the threshold is multiplied by a random number generator with N(1,delta). Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomVariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budgetmanager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budgetmanager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager class Split(UncertaintyZliobaite): """Split The Split [1] query strategy samples in 100*v% of instances randomly and in 100*(1-v)% of cases according to VariableUncertainty. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budgetmanager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, SplitBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budgetmanager is given use the budgetmanager. If both are not given the default budgetmanager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. References ---------- [1] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def _get_default_budget_manager(self): return SplitBudgetManager
0.898817
0.412471
from collections import deque from copy import copy import warnings import numpy as np from sklearn.utils import check_array, check_consistent_length, check_scalar from sklearn.base import clone from sklearn.metrics.pairwise import pairwise_distances from skactiveml.base import ( BudgetManager, SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, ) from skactiveml.utils import ( check_type, call_func, check_budget_manager, ) from skactiveml.stream.budgetmanager import ( FixedUncertaintyBudgetManager, DensityBasedSplitBudgetManager, VariableUncertaintyBudgetManager, RandomBudgetManager, RandomVariableUncertaintyBudgetManager, ) class StreamDensityBasedAL(SingleAnnotatorStreamQueryStrategy): """StreamDensityBasedAL The StreamDensityBasedAL [1] query strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2]. In addition to the uncertainty assessment, StreamDensityBasedAL assesses the local density and only allows querying the label for a candidate if that local density is sufficiently high. The local density is measured using a sliding window. The local density is represented by the number of instances, the new instance is the new nearest neighbor from. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, DensityBasedBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. window_size : int, optional (default=100) Determines the sliding window size of the local density window. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None, `sklearn.metrics.pairwise.pairwise_distances` will be used by default dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. References ---------- [1] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def __init__( self, budget_manager=None, budget=None, random_state=None, window_size=1000, dist_func=None, dist_func_dict=None, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.window_size = window_size self.dist_func = dist_func self.dist_func_dict = dist_func_dict def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) # calculate the margin used as utillities predict_proba = clf.predict_proba(candidates) utilities_index = np.argpartition(predict_proba, -2)[:, -2:] confidence = ( np.take_along_axis(predict_proba, utilities_index[:, [1]], 1) - np.take_along_axis(predict_proba, utilities_index[:, [0]], 1) ).reshape([-1]) utilities = 1 - confidence tmp_min_dist = copy(self.min_dist_) tmp_window = copy(self.window_) queried_indices = [] for t, (u, x_cand) in enumerate(zip(utilities, candidates)): local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor > 0: queried_indice = self.budget_manager_.query_by_utility( np.array([u]) ) if len(queried_indice) > 0: queried_indices.append(t) else: self.budget_manager_.query_by_utility(np.array([np.nan])) self.window_.append(x_cand) self.min_dist_ = tmp_min_dist self.window_ = tmp_window if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget_manager. Returns ------- self : StreamDensityBasedAL The StreamDensityBasedAL returns itself, after it is updated. """ # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if not hasattr(self, "window_"): self.window_ = deque(maxlen=self.window_size) if not hasattr(self, "min_dist_"): self.min_dist_ = deque(maxlen=self.window_size) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) new_candidates = [] for x_cand in candidates: local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor > 0: new_candidates.append(x_cand) else: new_candidates.append(np.nan) self.window_.append(x_cand) call_func( self.budget_manager_.update, candidates=new_candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _calculate_ldf(self, candidates): """Calculate the number of new nearest neighbor for candidates in the sliding window. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. Returns ------- ldf: array-like of shape (n_candiates) Numbers of new nearest neighbor for candidates """ ldf = 0 if len(self.window_) >= 1: distances = self.dist_func_(self.window_, candidates).ravel() is_new_nn = distances < np.array(self.min_dist_) ldf = np.sum(is_new_nn) for i in np.where(is_new_nn)[0]: self.min_dist_[i] = distances[i] self.min_dist_.append(np.min(distances)) else: self.min_dist_.append(np.inf) return ldf def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("dist_func_ needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") # check density_threshold check_scalar(self.window_size, "window_size", int, min_val=1) if not hasattr(self, "window_"): self.window_ = deque(maxlen=self.window_size) if not hasattr(self, "min_dist_"): self.min_dist_ = deque(maxlen=self.window_size) return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) (default=None) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) (default=None) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return DensityBasedSplitBudgetManager class CognitiveDualQueryStrategy(SingleAnnotatorStreamQueryStrategy): """CognitiveDualQueryStrategy This class is the base for the CognitiveDualQueryStrategy query strategy proposed in [1]. To use this strategy, refer to `CognitiveDualQueryStrategyRan`, `CognitiveDualQueryStrategyRanVarUn`, `CognitiveDualQueryStrategyVarUn` , and `CognitiveDualQueryStrategyFixUn`. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, a default budget manager will be used that is defined in the class inheriting from CognitiveDualQueryStrategy. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.EstimatedBudgetZliobaite : BudgetManager implementing the base class for Zliobaite based budget managers CognitiveDualQueryStrategyRan : CognitiveDualQueryStrategy using the RandomBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyFixUn : CognitiveDualQueryStrategy using the FixedUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyVarUn : VariableUncertaintyBudgetManager using the VariableUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyRanVarUn : CognitiveDualQueryStrategy using the RandomVariableUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget_manager=None, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.density_threshold = density_threshold self.dist_func = dist_func self.dist_func_dict = dist_func_dict self.cognition_window_size = cognition_window_size self.force_full_budget = force_full_budget def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Please note that, when the decisions from this function may differ from the final sampling, so the query strategy can be updated later with update(...) with the final sampling. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) # its the margin but used as utillities predict_proba = clf.predict_proba(candidates) confidence = np.max(predict_proba, axis=1) utilities = 1 - confidence # copy variables tmp_cognition_window = copy(self.cognition_window_) tmp_theta = copy(self.theta_) tmp_s = copy(self.s_) tmp_t_x = copy(self.t_x_) f = copy(self.f_) min_dist = copy(self.min_dist_) t = copy(self.t_) queried_indices = [] for i, (u, x_cand) in enumerate(zip(utilities, candidates)): local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor >= self.density_threshold: queried_indice = self.budget_manager_.query_by_utility( np.array([u]) ) if len(queried_indice) > 0: queried_indices.append(i) elif self.force_full_budget: self.budget_manager_.query_by_utility(np.array([np.nan])) self.t_ += 1 # overwrite changes self.cognition_window_ = tmp_cognition_window self.theta_ = tmp_theta self.s_ = tmp_s self.t_x_ = tmp_t_x self.f_ = f self.min_dist_ = min_dist self.t_ = t if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget_manager. Returns ------- self : CognitiveDualQueryStrategy The CognitiveDualQueryStrategy returns itself, after it is updated. """ self._validate_force_full_budget() # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) # _init_members if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") if not hasattr(self, "min_dist_"): self.min_dist_ = [] if not hasattr(self, "t_"): self.t_ = 0 if not hasattr(self, "cognition_window_"): self.cognition_window_ = [] if not hasattr(self, "f_"): self.f_ = [] if not hasattr(self, "theta_"): self.theta_ = [] if not hasattr(self, "s_"): self.s_ = [] if not hasattr(self, "t_x_"): self.t_x_ = [] budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) new_candidates = [] for x_cand in candidates: local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor >= self.density_threshold: new_candidates.append(x_cand) elif self.force_full_budget: new_candidates.append(np.nan) self.t_ += 1 call_func( self.budget_manager_.update, candidates=new_candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _calculate_ldf(self, candidates): """Calculate the number of new nearest neighbor for candiates in the cognition_window. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. Returns ------- ldf: array-like of shape (n_candiates) Numbers of new nearest neighbor for candidates """ ldf = 0 f = 1 t_x = self.t_ s = 1 theta = 0 if len(self.cognition_window_) >= 1: distances = self.dist_func_( self.cognition_window_, candidates ).ravel() is_new_nn = distances < np.array(self.min_dist_) ldf = np.sum(is_new_nn) for i in np.where(is_new_nn)[0]: self.t_x_[i] = t_x self.theta_[i] += 1 self.min_dist_[i] = distances[i] self.min_dist_.append(np.min(distances)) else: self.min_dist_.append(np.inf) for t, _ in enumerate(self.cognition_window_): self.f_[t] = 1 / (self.theta_[t] + 1) tmp = -self.f_[t] * (t_x - self.t_x_[t]) self.s_[t] = np.exp(tmp) if len(self.cognition_window_) > self.cognition_window_size: # remove element with the smallest memory strength remove_index = np.argmin(self.s_) self.cognition_window_.pop(remove_index) self.theta_.pop(remove_index) self.s_.pop(remove_index) self.t_x_.pop(remove_index) self.f_.pop(remove_index) self.min_dist_.pop(remove_index) self.cognition_window_.extend(candidates) self.theta_.append(theta) self.s_.append(s) self.t_x_.append(t_x) self.f_.append(f) return ldf def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check density_threshold check_scalar( self.density_threshold, "density_threshold", int, min_val=0 ) check_scalar( self.cognition_window_size, "cognition_window_size", int, min_val=1 ) self._validate_force_full_budget() # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") if not hasattr(self, "min_dist_"): self.min_dist_ = [] if not hasattr(self, "t_"): self.t_ = 0 if not hasattr(self, "cognition_window_"): self.cognition_window_ = [] if not hasattr(self, "f_"): self.f_ = [] if not hasattr(self, "theta_"): self.theta_ = [] if not hasattr(self, "s_"): self.s_ = [] if not hasattr(self, "t_x_"): self.t_x_ = [] return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_force_full_budget(self): # check force_full_budget check_type(self.force_full_budget, "force_full_budget", bool) if not hasattr(self, "budget_manager_") and not self.force_full_budget: warnings.warn( "force_full_budget is set to False. " "Therefore the full budget may not be utilised." ) def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager class CognitiveDualQueryStrategyRan(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyRan This class implements the CognitiveDualQueryStrategy strategy with Random Sampling. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.RandomBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for RandomBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomBudgetManager class CognitiveDualQueryStrategyFixUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyFixUn This class implements the CognitiveDualQueryStrategy strategy with FixedUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.FixedUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for FixedUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return FixedUncertaintyBudgetManager class CognitiveDualQueryStrategyVarUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyVarUn This class implements the CognitiveDualQueryStrategy strategy with VariableUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, VariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.VariableUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for VariableUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return VariableUncertaintyBudgetManager class CognitiveDualQueryStrategyRanVarUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyRanVarUn This class implements the CognitiveDualQueryStrategy strategy with RandomVariableUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomVariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.RandomVariableUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for RandomVariableUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/_density_uncertainty.py
_density_uncertainty.py
from collections import deque from copy import copy import warnings import numpy as np from sklearn.utils import check_array, check_consistent_length, check_scalar from sklearn.base import clone from sklearn.metrics.pairwise import pairwise_distances from skactiveml.base import ( BudgetManager, SingleAnnotatorStreamQueryStrategy, SkactivemlClassifier, ) from skactiveml.utils import ( check_type, call_func, check_budget_manager, ) from skactiveml.stream.budgetmanager import ( FixedUncertaintyBudgetManager, DensityBasedSplitBudgetManager, VariableUncertaintyBudgetManager, RandomBudgetManager, RandomVariableUncertaintyBudgetManager, ) class StreamDensityBasedAL(SingleAnnotatorStreamQueryStrategy): """StreamDensityBasedAL The StreamDensityBasedAL [1] query strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2]. In addition to the uncertainty assessment, StreamDensityBasedAL assesses the local density and only allows querying the label for a candidate if that local density is sufficiently high. The local density is measured using a sliding window. The local density is represented by the number of instances, the new instance is the new nearest neighbor from. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, DensityBasedBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. window_size : int, optional (default=100) Determines the sliding window size of the local density window. random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None, `sklearn.metrics.pairwise.pairwise_distances` will be used by default dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. References ---------- [1] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. """ def __init__( self, budget_manager=None, budget=None, random_state=None, window_size=1000, dist_func=None, dist_func_dict=None, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.window_size = window_size self.dist_func = dist_func self.dist_func_dict = dist_func_dict def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) # calculate the margin used as utillities predict_proba = clf.predict_proba(candidates) utilities_index = np.argpartition(predict_proba, -2)[:, -2:] confidence = ( np.take_along_axis(predict_proba, utilities_index[:, [1]], 1) - np.take_along_axis(predict_proba, utilities_index[:, [0]], 1) ).reshape([-1]) utilities = 1 - confidence tmp_min_dist = copy(self.min_dist_) tmp_window = copy(self.window_) queried_indices = [] for t, (u, x_cand) in enumerate(zip(utilities, candidates)): local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor > 0: queried_indice = self.budget_manager_.query_by_utility( np.array([u]) ) if len(queried_indice) > 0: queried_indices.append(t) else: self.budget_manager_.query_by_utility(np.array([np.nan])) self.window_.append(x_cand) self.min_dist_ = tmp_min_dist self.window_ = tmp_window if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget_manager. Returns ------- self : StreamDensityBasedAL The StreamDensityBasedAL returns itself, after it is updated. """ # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if not hasattr(self, "window_"): self.window_ = deque(maxlen=self.window_size) if not hasattr(self, "min_dist_"): self.min_dist_ = deque(maxlen=self.window_size) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) new_candidates = [] for x_cand in candidates: local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor > 0: new_candidates.append(x_cand) else: new_candidates.append(np.nan) self.window_.append(x_cand) call_func( self.budget_manager_.update, candidates=new_candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _calculate_ldf(self, candidates): """Calculate the number of new nearest neighbor for candidates in the sliding window. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. Returns ------- ldf: array-like of shape (n_candiates) Numbers of new nearest neighbor for candidates """ ldf = 0 if len(self.window_) >= 1: distances = self.dist_func_(self.window_, candidates).ravel() is_new_nn = distances < np.array(self.min_dist_) ldf = np.sum(is_new_nn) for i in np.where(is_new_nn)[0]: self.min_dist_[i] = distances[i] self.min_dist_.append(np.min(distances)) else: self.min_dist_.append(np.inf) return ldf def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, optional (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("dist_func_ needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") # check density_threshold check_scalar(self.window_size, "window_size", int, min_val=1) if not hasattr(self, "window_"): self.window_ = deque(maxlen=self.window_size) if not hasattr(self, "min_dist_"): self.min_dist_ = deque(maxlen=self.window_size) return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) (default=None) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) (default=None) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return DensityBasedSplitBudgetManager class CognitiveDualQueryStrategy(SingleAnnotatorStreamQueryStrategy): """CognitiveDualQueryStrategy This class is the base for the CognitiveDualQueryStrategy query strategy proposed in [1]. To use this strategy, refer to `CognitiveDualQueryStrategyRan`, `CognitiveDualQueryStrategyRanVarUn`, `CognitiveDualQueryStrategyVarUn` , and `CognitiveDualQueryStrategyFixUn`. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, a default budget manager will be used that is defined in the class inheriting from CognitiveDualQueryStrategy. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.EstimatedBudgetZliobaite : BudgetManager implementing the base class for Zliobaite based budget managers CognitiveDualQueryStrategyRan : CognitiveDualQueryStrategy using the RandomBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyFixUn : CognitiveDualQueryStrategy using the FixedUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyVarUn : VariableUncertaintyBudgetManager using the VariableUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite CognitiveDualQueryStrategyRanVarUn : CognitiveDualQueryStrategy using the RandomVariableUncertaintyBudgetManager that is based on EstimatedBudgetZliobaite References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget_manager=None, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__(budget=budget, random_state=random_state) self.budget_manager = budget_manager self.density_threshold = density_threshold self.dist_func = dist_func self.dist_func_dict = dist_func_dict self.cognition_window_size = cognition_window_size self.force_full_budget = force_full_budget def query( self, candidates, clf, X=None, y=None, sample_weight=None, fit_clf=False, return_utilities=False, ): """Ask the query strategy which instances in candidates to acquire. Please note that, when the decisions from this function may differ from the final sampling, so the query strategy can be updated later with update(...) with the final sampling. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features), optional (default=None) Input samples used to fit the classifier. y : array-like of shape (n_samples), optional (default=None) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,), optional Sample weights for X, used to fit the clf. fit_clf : bool, optional (default=False) If true, refit the classifier also requires X and y to be given. return_utilities : bool, optional (default=False) If true, also return the utilities based on the query strategy. The default is False. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances in candidates which should be queried, with 0 <= n_queried_instances <= n_samples. utilities: ndarray of shape (n_samples,), optional The utilities based on the query strategy. Only provided if return_utilities is True. """ ( candidates, clf, X, y, sample_weight, fit_clf, return_utilities, ) = self._validate_data( candidates, clf=clf, X=X, y=y, sample_weight=sample_weight, fit_clf=fit_clf, return_utilities=return_utilities, ) # its the margin but used as utillities predict_proba = clf.predict_proba(candidates) confidence = np.max(predict_proba, axis=1) utilities = 1 - confidence # copy variables tmp_cognition_window = copy(self.cognition_window_) tmp_theta = copy(self.theta_) tmp_s = copy(self.s_) tmp_t_x = copy(self.t_x_) f = copy(self.f_) min_dist = copy(self.min_dist_) t = copy(self.t_) queried_indices = [] for i, (u, x_cand) in enumerate(zip(utilities, candidates)): local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor >= self.density_threshold: queried_indice = self.budget_manager_.query_by_utility( np.array([u]) ) if len(queried_indice) > 0: queried_indices.append(i) elif self.force_full_budget: self.budget_manager_.query_by_utility(np.array([np.nan])) self.t_ += 1 # overwrite changes self.cognition_window_ = tmp_cognition_window self.theta_ = tmp_theta self.s_ = tmp_s self.t_x_ = tmp_t_x self.f_ = f self.min_dist_ = min_dist self.t_ = t if return_utilities: return queried_indices, utilities else: return queried_indices def update( self, candidates, queried_indices, budget_manager_param_dict=None ): """Updates the budget manager and the count for seen and queried instances Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. budget_manager_param_dict : kwargs, optional (default=None) Optional kwargs for budget_manager. Returns ------- self : CognitiveDualQueryStrategy The CognitiveDualQueryStrategy returns itself, after it is updated. """ self._validate_force_full_budget() # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) # _init_members if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") if not hasattr(self, "min_dist_"): self.min_dist_ = [] if not hasattr(self, "t_"): self.t_ = 0 if not hasattr(self, "cognition_window_"): self.cognition_window_ = [] if not hasattr(self, "f_"): self.f_ = [] if not hasattr(self, "theta_"): self.theta_ = [] if not hasattr(self, "s_"): self.s_ = [] if not hasattr(self, "t_x_"): self.t_x_ = [] budget_manager_param_dict = ( {} if budget_manager_param_dict is None else budget_manager_param_dict ) new_candidates = [] for x_cand in candidates: local_density_factor = self._calculate_ldf([x_cand]) if local_density_factor >= self.density_threshold: new_candidates.append(x_cand) elif self.force_full_budget: new_candidates.append(np.nan) self.t_ += 1 call_func( self.budget_manager_.update, candidates=new_candidates, queried_indices=queried_indices, **budget_manager_param_dict ) return self def _calculate_ldf(self, candidates): """Calculate the number of new nearest neighbor for candiates in the cognition_window. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. Returns ------- ldf: array-like of shape (n_candiates) Numbers of new nearest neighbor for candidates """ ldf = 0 f = 1 t_x = self.t_ s = 1 theta = 0 if len(self.cognition_window_) >= 1: distances = self.dist_func_( self.cognition_window_, candidates ).ravel() is_new_nn = distances < np.array(self.min_dist_) ldf = np.sum(is_new_nn) for i in np.where(is_new_nn)[0]: self.t_x_[i] = t_x self.theta_[i] += 1 self.min_dist_[i] = distances[i] self.min_dist_.append(np.min(distances)) else: self.min_dist_.append(np.inf) for t, _ in enumerate(self.cognition_window_): self.f_[t] = 1 / (self.theta_[t] + 1) tmp = -self.f_[t] * (t_x - self.t_x_[t]) self.s_[t] = np.exp(tmp) if len(self.cognition_window_) > self.cognition_window_size: # remove element with the smallest memory strength remove_index = np.argmin(self.s_) self.cognition_window_.pop(remove_index) self.theta_.pop(remove_index) self.s_.pop(remove_index) self.t_x_.pop(remove_index) self.f_.pop(remove_index) self.min_dist_.pop(remove_index) self.cognition_window_.extend(candidates) self.theta_.append(theta) self.s_.append(s) self.t_x_.append(t_x) self.f_.append(f) return ldf def _validate_data( self, candidates, clf, X, y, sample_weight, fit_clf, return_utilities, reset=True, **check_candidates_params ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- candidates: array-like of shape (n_candidates, n_features) The instances which may be queried. Sparse matrices are accepted only if they are supported by the base query strategy. clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. return_utilities : bool, If true, also return the utilities based on the query strategy. fit_clf : bool, If true, refit the classifier also requires X and y to be given. reset : bool, (default=True) Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. **check_candidates_params : kwargs Parameters passed to :func:`sklearn.utils.check_array`. Returns ------- candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. X: np.ndarray, shape (n_samples, n_features) Checked training samples y: np.ndarray, shape (n_candidates) Checked training labels sampling_weight: np.ndarray, shape (n_candidates) Checked training sample weight fit_clf : bool, Checked boolean value of `fit_clf` candidates: np.ndarray, shape (n_candidates, n_features) Checked candidate samples return_utilities : bool, Checked boolean value of `return_utilities`. """ candidates, return_utilities = super()._validate_data( candidates, return_utilities, reset=reset, **check_candidates_params ) self._validate_random_state() X, y, sample_weight = self._validate_X_y_sample_weight( X=X, y=y, sample_weight=sample_weight ) clf = self._validate_clf(clf, X, y, sample_weight, fit_clf) # check density_threshold check_scalar( self.density_threshold, "density_threshold", int, min_val=0 ) check_scalar( self.cognition_window_size, "cognition_window_size", int, min_val=1 ) self._validate_force_full_budget() # check if a budget_manager is set if not hasattr(self, "budget_manager_"): check_type( self.budget_manager, "budget_manager_", BudgetManager, type(None), ) self.budget_manager_ = check_budget_manager( self.budget, self.budget_manager, self._get_default_budget_manager(), ) if self.dist_func is None: self.dist_func_ = pairwise_distances else: self.dist_func_ = self.dist_func if not callable(self.dist_func_): raise TypeError("frequency_estimation needs to be a callable") self.dist_func_dict_ = ( self.dist_func_dict if self.dist_func_dict is not None else {} ) if not isinstance(self.dist_func_dict_, dict): raise TypeError("'dist_func_dict' must be a Python dictionary.") if not hasattr(self, "min_dist_"): self.min_dist_ = [] if not hasattr(self, "t_"): self.t_ = 0 if not hasattr(self, "cognition_window_"): self.cognition_window_ = [] if not hasattr(self, "f_"): self.f_ = [] if not hasattr(self, "theta_"): self.theta_ = [] if not hasattr(self, "s_"): self.s_ = [] if not hasattr(self, "t_x_"): self.t_x_ = [] return candidates, clf, X, y, sample_weight, fit_clf, return_utilities def _validate_clf(self, clf, X, y, sample_weight, fit_clf): """Validate if clf is a valid SkactivemlClassifier. If clf is untrained, clf is trained using X, y and sample_weight. Parameters ---------- clf : SkactivemlClassifier Model implementing the methods `fit` and `predict_freq`. X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. fit_clf : bool, If true, refit the classifier also requires X and y to be given. Returns ------- clf : SkactivemlClassifier Checked model implementing the methods `fit` and `predict_freq`. """ # Check if the classifier and its arguments are valid. check_type(clf, "clf", SkactivemlClassifier) check_type(fit_clf, "fit_clf", bool) if fit_clf: clf = clone(clf).fit(X, y, sample_weight) return clf def _validate_force_full_budget(self): # check force_full_budget check_type(self.force_full_budget, "force_full_budget", bool) if not hasattr(self, "budget_manager_") and not self.force_full_budget: warnings.warn( "force_full_budget is set to False. " "Therefore the full budget may not be utilised." ) def _validate_X_y_sample_weight(self, X, y, sample_weight): """Validate if X, y and sample_weight are numeric and of equal length. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples used to fit the classifier. y : array-like of shape (n_samples) Labels of the input samples 'X'. There may be missing labels. sample_weight : array-like of shape (n_samples,) Sample weights for X, used to fit the clf. Returns ------- X : array-like of shape (n_samples, n_features) Checked Input samples. y : array-like of shape (n_samples) Checked Labels of the input samples 'X'. Converts y to a numpy array """ if sample_weight is not None: sample_weight = np.array(sample_weight) check_consistent_length(sample_weight, y) if X is not None and y is not None: X = check_array(X) y = np.array(y) check_consistent_length(X, y) return X, y, sample_weight def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager class CognitiveDualQueryStrategyRan(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyRan This class implements the CognitiveDualQueryStrategy strategy with Random Sampling. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.RandomBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for RandomBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomBudgetManager class CognitiveDualQueryStrategyFixUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyFixUn This class implements the CognitiveDualQueryStrategy strategy with FixedUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, FixedUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.FixedUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for FixedUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return FixedUncertaintyBudgetManager class CognitiveDualQueryStrategyVarUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyVarUn This class implements the CognitiveDualQueryStrategy strategy with VariableUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, VariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.VariableUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for VariableUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return VariableUncertaintyBudgetManager class CognitiveDualQueryStrategyRanVarUn(CognitiveDualQueryStrategy): """CognitiveDualQueryStrategyRanVarUn This class implements the CognitiveDualQueryStrategy strategy with RandomVariableUncertainty. The CognitiveDualQueryStrategy strategy is an extension to the uncertainty based query strategies proposed by Žliobaitė et al. [2] and follows the same idea as StreamDensityBasedAL [3] where queries for labels is only allowed if the local density around the corresponding instance is sufficiently high. The authors propose the use of a cognitive window that monitors the most representative samples within a data stream. Parameters ---------- budget : float, optional (default=None) The budget which models the budgeting constraint used in the stream-based active learning setting. budget_manager : BudgetManager, optional (default=None) The BudgetManager which models the budgeting constraint used in the stream-based active learning setting. if set to None, RandomVariableUncertaintyBudgetManager will be used by default. The budget manager will be initialized based on the following conditions: If only a budget is given the default budget manager is initialized with the given budget. If only a budget manager is given use the budget manager. If both are not given the default budget manager with the default budget. If both are given and the budget differs from budgetmanager.budget a warning is thrown. density_threshold : int, optional (default=1) Determines the local density factor size that needs to be reached in order to sample the candidate. cognition_window_size : int, optional (default=10) Determines the size of the cognition window random_state : int, RandomState instance, optional (default=None) Controls the randomness of the estimator. dist_func : callable, optional (default=None) The distance function used to calculate the distances within the local density window. If None use `sklearn.metrics.pairwise.pairwise_distances` dist_func_dict : dict, optional (default=None) Additional parameters for `dist_func`. force_full_budget : bool, optional (default=False) If true, tries to utilize the full budget. The paper doesn't update the budget manager if the locale density factor is 0 See Also -------- .budgetmanager.RandomVariableUncertaintyBudgetManager : The default budget manager .budgetmanager.EstimatedBudgetZliobaite : The base class for RandomVariableUncertaintyBudgetManager References ---------- [1] Liu, S., Xue, S., Wu, J., Zhou, C., Yang, J., Li, Z., & Cao, J. (2021). Online Active Learning for Drifting Data Streams. IEEE Transactions on Neural Networks and Learning Systems, 1-15. [2] Žliobaitė, I., Bifet, A., Pfahringer, B., & Holmes, G. (2014). Active Learning With Drifting Streaming Data. IEEE Transactions on Neural Networks and Learning Systems, 25(1), 27-39. [3] Ienco, D., Pfahringer, B., & Zliobaitė, I. (2014). High density-focused uncertainty sampling for active learning over evolving stream data. In BigMine 2014 (pp. 133-148). """ def __init__( self, budget=None, density_threshold=1, cognition_window_size=10, dist_func=None, dist_func_dict=None, random_state=None, force_full_budget=False, ): super().__init__( budget=budget, random_state=random_state, budget_manager=None, density_threshold=density_threshold, dist_func=dist_func, dist_func_dict=dist_func_dict, cognition_window_size=cognition_window_size, force_full_budget=force_full_budget, ) def _get_default_budget_manager(self): """Provide the budget manager that will be used as default. Returns ------- budget_manager : BudgetManager The BudgetManager that should be used by default. """ return RandomVariableUncertaintyBudgetManager
0.875041
0.461745
import numpy as np from copy import deepcopy from skactiveml.base import ( BudgetManager, ) from skactiveml.utils import check_scalar, check_random_state class DensityBasedSplitBudgetManager(BudgetManager): """Budget manager which checks, whether the specified budget has been exhausted already. If not, an instance is queried, when the utility is higher than the specified budget and when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. This class`s logic is the same as compared to SplitBudgetManager except for how available budget is calculated. This budget manager calculates the fixed budget spent and compares that to the budget. If the ratio is smaller than the specified budget, i.e., budget - u / t > 0 , the budget manager samples an instance when its utility is higher than the budget. u is the number of queried instances within t observed instances. Parameters ---------- budget : float, optional (default=None) Specifies the ratio of instances which are allowed to be queried, with 0 <= budget <= 1. See Also :class:`BudgetManager`. theta : float, optional (default=1.0) Specifies the starting threshold in wich instances are purchased. This value of theta will recalculated after each instance. Default = 1 s : float, optional (default=0.01) Specifies the value in wich theta is decresed or increased based on the purchase of the given label. Default = 0.01 delta : float, optional (default=1.0) Specifies the standart deviation of the distribution. Default 1.0 random_state : int | np.random.RandomState, optional (default=None) Random state for candidate selection. See Also -------- EstimatedBudgetZliobaite : BudgetManager implementing the base class for Zliobaite based budget managers SplitBudgetManager : BudgetManager that is using EstimatedBudgetZliobaite. """ def __init__( self, budget=None, theta=1.0, s=0.01, delta=1.0, random_state=None, ): super().__init__(budget) self.theta = theta self.s = s self.delta = delta self.random_state = random_state def query_by_utility(self, utilities): """Ask the budget manager which utilities are sufficient to query the corresponding instance. Parameters ---------- utilities : ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy, which are used to determine whether sampling an instance is worth it given the budgeting constraint. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances represented by utilities which should be queried, with 0 <= n_queried_instances <= n_samples. """ utilities = self._validate_data(utilities) confidence = 1 - utilities # intialize return parameters queried_indices = [] tmp_u = self.u_ tmp_t = self.t_ tmp_theta = self.theta_ prior_random_state = self.random_state_.get_state() # get confidence for i, u in enumerate(confidence): tmp_t += 1 budget_left = self.budget_ > tmp_u / tmp_t if not budget_left: sample = False else: eta = self.random_state_.normal(1, self.delta) theta_random = tmp_theta * eta sample = u < theta_random # get the indices instances that should be queried if sample: tmp_theta *= 1 - self.s queried_indices.append(i) else: tmp_theta *= 1 + self.s tmp_u += sample self.random_state_.set_state(prior_random_state) return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : DensityBasedBudgetManager The DensityBasedBudgetManager returns itself, after it is updated. """ self._validate_data(np.array([])) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.random_state_.random_sample(len(candidates)) for s in queried: self.t_ += 1 if self.budget_ > self.u_ / self.t_: if s: self.theta_ *= 1 - self.s else: self.theta_ *= 1 + self.s self.u_ += s return self def _validate_data(self, utilities): """Validate input data. Parameters ---------- utilities: ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy. Returns ------- utilities : ndarray of shape (n_samples,) Checked utilities. """ utilities = super()._validate_data(utilities) # Check theta self._validate_theta() # Chack s check_scalar( self.s, "s", float, min_val=0, min_inclusive=False, max_val=1 ) # Check delta check_scalar( self.delta, "delta", float, min_val=0, min_inclusive=False ) # check if calculation of estimate bought/true lables has begun if not hasattr(self, "u_"): self.u_ = 0 if not hasattr(self, "t_"): self.t_ = 0 self._validate_random_state() return utilities def _validate_theta(self): """Validate if theta is set as a float.""" check_scalar(self.theta, "theta", float) # check if theta exists if not hasattr(self, "theta_"): self.theta_ = self.theta def _validate_random_state(self): """Creates a copy 'random_state_' if random_state is an instance of np.random_state. If not create a new random state. See also :func:`~sklearn.utils.check_random_state` """ if not hasattr(self, "random_state_"): self.random_state_ = deepcopy(self.random_state) self.random_state_ = check_random_state(self.random_state_)
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/budgetmanager/_threshold_budget.py
_threshold_budget.py
import numpy as np from copy import deepcopy from skactiveml.base import ( BudgetManager, ) from skactiveml.utils import check_scalar, check_random_state class DensityBasedSplitBudgetManager(BudgetManager): """Budget manager which checks, whether the specified budget has been exhausted already. If not, an instance is queried, when the utility is higher than the specified budget and when the probability of the most likely class exceeds a time-dependent threshold calculated based on the budget, the number of classes and the number of observed and acquired samples. This class`s logic is the same as compared to SplitBudgetManager except for how available budget is calculated. This budget manager calculates the fixed budget spent and compares that to the budget. If the ratio is smaller than the specified budget, i.e., budget - u / t > 0 , the budget manager samples an instance when its utility is higher than the budget. u is the number of queried instances within t observed instances. Parameters ---------- budget : float, optional (default=None) Specifies the ratio of instances which are allowed to be queried, with 0 <= budget <= 1. See Also :class:`BudgetManager`. theta : float, optional (default=1.0) Specifies the starting threshold in wich instances are purchased. This value of theta will recalculated after each instance. Default = 1 s : float, optional (default=0.01) Specifies the value in wich theta is decresed or increased based on the purchase of the given label. Default = 0.01 delta : float, optional (default=1.0) Specifies the standart deviation of the distribution. Default 1.0 random_state : int | np.random.RandomState, optional (default=None) Random state for candidate selection. See Also -------- EstimatedBudgetZliobaite : BudgetManager implementing the base class for Zliobaite based budget managers SplitBudgetManager : BudgetManager that is using EstimatedBudgetZliobaite. """ def __init__( self, budget=None, theta=1.0, s=0.01, delta=1.0, random_state=None, ): super().__init__(budget) self.theta = theta self.s = s self.delta = delta self.random_state = random_state def query_by_utility(self, utilities): """Ask the budget manager which utilities are sufficient to query the corresponding instance. Parameters ---------- utilities : ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy, which are used to determine whether sampling an instance is worth it given the budgeting constraint. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances represented by utilities which should be queried, with 0 <= n_queried_instances <= n_samples. """ utilities = self._validate_data(utilities) confidence = 1 - utilities # intialize return parameters queried_indices = [] tmp_u = self.u_ tmp_t = self.t_ tmp_theta = self.theta_ prior_random_state = self.random_state_.get_state() # get confidence for i, u in enumerate(confidence): tmp_t += 1 budget_left = self.budget_ > tmp_u / tmp_t if not budget_left: sample = False else: eta = self.random_state_.normal(1, self.delta) theta_random = tmp_theta * eta sample = u < theta_random # get the indices instances that should be queried if sample: tmp_theta *= 1 - self.s queried_indices.append(i) else: tmp_theta *= 1 + self.s tmp_u += sample self.random_state_.set_state(prior_random_state) return queried_indices def update(self, candidates, queried_indices): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. Returns ------- self : DensityBasedBudgetManager The DensityBasedBudgetManager returns itself, after it is updated. """ self._validate_data(np.array([])) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.random_state_.random_sample(len(candidates)) for s in queried: self.t_ += 1 if self.budget_ > self.u_ / self.t_: if s: self.theta_ *= 1 - self.s else: self.theta_ *= 1 + self.s self.u_ += s return self def _validate_data(self, utilities): """Validate input data. Parameters ---------- utilities: ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy. Returns ------- utilities : ndarray of shape (n_samples,) Checked utilities. """ utilities = super()._validate_data(utilities) # Check theta self._validate_theta() # Chack s check_scalar( self.s, "s", float, min_val=0, min_inclusive=False, max_val=1 ) # Check delta check_scalar( self.delta, "delta", float, min_val=0, min_inclusive=False ) # check if calculation of estimate bought/true lables has begun if not hasattr(self, "u_"): self.u_ = 0 if not hasattr(self, "t_"): self.t_ = 0 self._validate_random_state() return utilities def _validate_theta(self): """Validate if theta is set as a float.""" check_scalar(self.theta, "theta", float) # check if theta exists if not hasattr(self, "theta_"): self.theta_ = self.theta def _validate_random_state(self): """Creates a copy 'random_state_' if random_state is an instance of np.random_state. If not create a new random state. See also :func:`~sklearn.utils.check_random_state` """ if not hasattr(self, "random_state_"): self.random_state_ = deepcopy(self.random_state) self.random_state_ = check_random_state(self.random_state_)
0.819605
0.588594
from collections import deque from copy import copy import numpy as np from ...base import BudgetManager from ...utils import check_scalar class BalancedIncrementalQuantileFilter(BudgetManager): """ The Balanced Incremental Quantile Filter has been proposed together with Probabilistic Active Learning for Datastreams [1]. It assesses whether a given spatial utility (i.e., obtained via ProbabilisticAL) warrants to query the label in question. The spatial ultilities are compared against a threshold that is derived from a quantile (budget) of the last w observed utilities. To balance the number of queries, w_tol is used to increase or decrease the threshold based on the number of available acquisitions. Parameters ---------- w : int, optional (default=100) The number of observed utilities that are used to infer the threshold. w should be higher than 0. w_tol : int, optional (default=50) The window in which the number of acquisitions should stay within the budget. w_tol should be higher than 0. budget : float, optional (default=None) Specifies the ratio of instances which are allowed to be queried, with 0 <= budget <= 1. See Also :class:`BudgetManager`. References ---------- [1] Kottke D., Krempl G., Spiliopoulou M. (2015) Probabilistic Active Learning in Datastreams. In: Fromont E., De Bie T., van Leeuwen M. (eds) Advances in Intelligent Data Analysis XIV. IDA 2015. Lecture Notes in Computer Science, vol 9385. Springer, Cham. """ def __init__(self, w=100, w_tol=50, budget=None): super().__init__(budget) self.w = w self.w_tol = w_tol def query_by_utility(self, utilities): """Ask the budget manager which utilities are sufficient to query the corresponding instance. Parameters ---------- utilities : ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy, which are used to determine whether sampling an instance is worth it given the budgeting constraint. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances represented by utilities which should be queried, with 0 <= n_queried_instances <= n_samples. """ utilities = self._validate_data(utilities) # intialize return parameters queried_indices = [] tmp_queried_instances_ = self.queried_instances_ tmp_observed_instances_ = self.observed_instances_ tmp_history_sorted_ = copy(self.history_sorted_) for i, u in enumerate(utilities): tmp_observed_instances_ += 1 tmp_history_sorted_.append(u) theta = np.quantile(tmp_history_sorted_, (1 - self.budget_)) min_ranking = np.min(tmp_history_sorted_) max_ranking = np.max(tmp_history_sorted_) range_ranking = max_ranking - min_ranking acq_left = ( self.budget_ * tmp_observed_instances_ - tmp_queried_instances_ ) theta_bal = theta - (range_ranking * (acq_left / self.w_tol)) sample = u >= theta_bal if sample: tmp_queried_instances_ += 1 queried_indices.append(i) return queried_indices def update(self, candidates, queried_indices, utilities): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like Indicates which instances from candidates have been queried. utilities : ndarray of shape (n_samples,) The utilities based on the query strategy. Returns ------- self : EstimatedBudget The EstimatedBudget returns itself, after it is updated. """ self._validate_data(np.array([0])) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += len(queried) self.queried_instances_ += np.sum(queried) self.history_sorted_.extend(utilities) return self def _validate_data(self, utilities): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- utilities : ndarray of shape (n_samples,) candidate samples Returns ------- utilities : ndarray of shape (n_samples,) Checked candidate samples """ utilities = super()._validate_data(utilities) check_scalar(self.w, "w", int, min_val=0, min_inclusive=False) check_scalar( self.w_tol, "w_tol", (float, int), min_val=0, min_inclusive=False ) # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 if not hasattr(self, "history_sorted_"): self.history_sorted_ = deque(maxlen=self.w) return utilities
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/stream/budgetmanager/_balanced_incremental_quantile_filter.py
_balanced_incremental_quantile_filter.py
from collections import deque from copy import copy import numpy as np from ...base import BudgetManager from ...utils import check_scalar class BalancedIncrementalQuantileFilter(BudgetManager): """ The Balanced Incremental Quantile Filter has been proposed together with Probabilistic Active Learning for Datastreams [1]. It assesses whether a given spatial utility (i.e., obtained via ProbabilisticAL) warrants to query the label in question. The spatial ultilities are compared against a threshold that is derived from a quantile (budget) of the last w observed utilities. To balance the number of queries, w_tol is used to increase or decrease the threshold based on the number of available acquisitions. Parameters ---------- w : int, optional (default=100) The number of observed utilities that are used to infer the threshold. w should be higher than 0. w_tol : int, optional (default=50) The window in which the number of acquisitions should stay within the budget. w_tol should be higher than 0. budget : float, optional (default=None) Specifies the ratio of instances which are allowed to be queried, with 0 <= budget <= 1. See Also :class:`BudgetManager`. References ---------- [1] Kottke D., Krempl G., Spiliopoulou M. (2015) Probabilistic Active Learning in Datastreams. In: Fromont E., De Bie T., van Leeuwen M. (eds) Advances in Intelligent Data Analysis XIV. IDA 2015. Lecture Notes in Computer Science, vol 9385. Springer, Cham. """ def __init__(self, w=100, w_tol=50, budget=None): super().__init__(budget) self.w = w self.w_tol = w_tol def query_by_utility(self, utilities): """Ask the budget manager which utilities are sufficient to query the corresponding instance. Parameters ---------- utilities : ndarray of shape (n_samples,) The utilities provided by the stream-based active learning strategy, which are used to determine whether sampling an instance is worth it given the budgeting constraint. Returns ------- queried_indices : ndarray of shape (n_queried_instances,) The indices of instances represented by utilities which should be queried, with 0 <= n_queried_instances <= n_samples. """ utilities = self._validate_data(utilities) # intialize return parameters queried_indices = [] tmp_queried_instances_ = self.queried_instances_ tmp_observed_instances_ = self.observed_instances_ tmp_history_sorted_ = copy(self.history_sorted_) for i, u in enumerate(utilities): tmp_observed_instances_ += 1 tmp_history_sorted_.append(u) theta = np.quantile(tmp_history_sorted_, (1 - self.budget_)) min_ranking = np.min(tmp_history_sorted_) max_ranking = np.max(tmp_history_sorted_) range_ranking = max_ranking - min_ranking acq_left = ( self.budget_ * tmp_observed_instances_ - tmp_queried_instances_ ) theta_bal = theta - (range_ranking * (acq_left / self.w_tol)) sample = u >= theta_bal if sample: tmp_queried_instances_ += 1 queried_indices.append(i) return queried_indices def update(self, candidates, queried_indices, utilities): """Updates the budget manager. Parameters ---------- candidates : {array-like, sparse matrix} of shape (n_samples, n_features) The instances which could be queried. Sparse matrices are accepted only if they are supported by the base query strategy. queried_indices : array-like Indicates which instances from candidates have been queried. utilities : ndarray of shape (n_samples,) The utilities based on the query strategy. Returns ------- self : EstimatedBudget The EstimatedBudget returns itself, after it is updated. """ self._validate_data(np.array([0])) queried = np.zeros(len(candidates)) queried[queried_indices] = 1 self.observed_instances_ += len(queried) self.queried_instances_ += np.sum(queried) self.history_sorted_.extend(utilities) return self def _validate_data(self, utilities): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- utilities : ndarray of shape (n_samples,) candidate samples Returns ------- utilities : ndarray of shape (n_samples,) Checked candidate samples """ utilities = super()._validate_data(utilities) check_scalar(self.w, "w", int, min_val=0, min_inclusive=False) check_scalar( self.w_tol, "w_tol", (float, int), min_val=0, min_inclusive=False ) # check if counting of instances has begun if not hasattr(self, "observed_instances_"): self.observed_instances_ = 0 if not hasattr(self, "queried_instances_"): self.queried_instances_ = 0 if not hasattr(self, "history_sorted_"): self.history_sorted_ = deque(maxlen=self.w) return utilities
0.94795
0.701209
import copy import warnings from collections.abc import Iterable from inspect import Parameter, signature import numpy as np from sklearn.utils.validation import ( check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn, ) from ._label import MISSING_LABEL, check_missing_label, is_unlabeled def check_scalar( x, name, target_type, min_inclusive=True, max_inclusive=True, min_val=None, max_val=None, ): """Validate scalar parameters type and value. Parameters ---------- x : object The scalar parameter to validate. name : str The name of the parameter to be printed in error messages. target_type : type or tuple Acceptable data types for the parameter. min_val : float or int, optional (default=None) The minimum valid value the parameter can take. If None (default) it is implied that the parameter does not have a lower bound. min_inclusive : bool, optional (default=True) If true, the minimum valid value is inclusive, otherwise exclusive. max_val : float or int, optional (default=None) The maximum valid value the parameter can take. If None (default) it is implied that the parameter does not have an upper bound. max_inclusive : bool, optional (default=True) If true, the maximum valid value is inclusive, otherwise exclusive. Raises ------- TypeError If the parameter's type does not match the desired type. ValueError If the parameter's value violates the given bounds. """ if not isinstance(x, target_type): raise TypeError( "`{}` must be an instance of {}, not {}.".format( name, target_type, type(x) ) ) if min_inclusive: if min_val is not None and (x < min_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be >= " "{}.".format(name, x, min_val) ) else: if min_val is not None and (x <= min_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be > " "{}.".format(name, x, min_val) ) if max_inclusive: if max_val is not None and (x > max_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be <= " "{}.".format(name, x, max_val) ) else: if max_val is not None and (x >= max_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be < " "{}.".format(name, x, max_val) ) def check_classifier_params(classes, missing_label, cost_matrix=None): """Check whether the parameters are compatible to each other (only if `classes` is not None). Parameters ---------- classes : array-like, shape (n_classes) Array of class labels. missing_label : {number, str, None, np.nan} Symbol to represent a missing label. cost_matrix : array-like, shape (n_classes, n_classes), default=None Cost matrix. If None, cost matrix will be not checked. """ check_missing_label(missing_label) if classes is not None: check_classes(classes) dtype = np.array(classes).dtype check_missing_label(missing_label, target_type=dtype, name="classes") n_labeled = is_unlabeled(y=classes, missing_label=missing_label).sum() if n_labeled > 0: raise ValueError( f"`classes={classes}` contains " f"`missing_label={missing_label}.`" ) if cost_matrix is not None: check_cost_matrix(cost_matrix=cost_matrix, n_classes=len(classes)) else: if cost_matrix is not None: raise ValueError( "You cannot specify 'cost_matrix' without " "specifying 'classes'." ) def check_classes(classes): """Check whether class labels are uniformly strings or numbers. Parameters ---------- classes : array-like, shape (n_classes) Array of class labels. """ if not isinstance(classes, Iterable): raise TypeError( "'classes' is not iterable. Got {}".format(type(classes)) ) try: classes_sorted = np.array(sorted(set(classes))) if len(classes) != len(classes_sorted): raise ValueError("Duplicate entries in 'classes'.") except TypeError: types = sorted(t.__qualname__ for t in set(type(v) for v in classes)) raise TypeError( "'classes' must be uniformly strings or numbers. Got {}".format( types ) ) def check_class_prior(class_prior, n_classes): """Check if the class_prior is a valid prior. Parameters ---------- class_prior : numeric | array_like, shape (n_classes) A class prior. n_classes : int The number of classes. Returns ------- class_prior : np.ndarray, shape (n_classes) Numpy array as prior. """ if class_prior is None: raise TypeError("'class_prior' must not be None.") check_scalar(n_classes, name="n_classes", target_type=int, min_val=1) if np.isscalar(class_prior): check_scalar( class_prior, name="class_prior", target_type=(int, float), min_val=0, ) class_prior = np.array([class_prior] * n_classes) else: class_prior = check_array(class_prior, ensure_2d=False) is_negative = np.sum(class_prior < 0) if class_prior.shape != (n_classes,) or is_negative: raise ValueError( "`class_prior` must be either a non-negative" "float or a list of `n_classes` non-negative " "floats." ) return class_prior.reshape(-1) def check_cost_matrix( cost_matrix, n_classes, only_non_negative=False, contains_non_zero=False, diagonal_is_zero=False, ): """Check whether cost matrix has shape `(n_classes, n_classes)`. Parameters ---------- cost_matrix : array-like, shape (n_classes, n_classes) Cost matrix. n_classes : int Number of classes. only_non_negative : bool, optional (default=True) This parameter determines whether the matrix must contain only non negative cost entries. contains_non_zero : bool, optional (default=True) This parameter determines whether the matrix must contain at least on non-zero cost entry. diagonal_is_zero : bool, optional (default=True) This parameter determines whether the diagonal cost entries must be zero. Returns ------- cost_matrix_new : np.ndarray, shape (n_classes, n_classes) Numpy array as cost matrix. """ check_scalar(n_classes, target_type=int, name="n_classes", min_val=1) cost_matrix_new = check_array( np.array(cost_matrix, dtype=float), ensure_2d=True ) if cost_matrix_new.shape != (n_classes, n_classes): raise ValueError( "'cost_matrix' must have shape ({}, {}). " "Got {}.".format(n_classes, n_classes, cost_matrix_new.shape) ) if np.sum(cost_matrix_new < 0) > 0: if only_non_negative: raise ValueError( "'cost_matrix' must contain only non-negative cost entries." ) else: warnings.warn("'cost_matrix' contains negative cost entries.") if n_classes != 1 and np.sum(cost_matrix_new != 0) == 0: if contains_non_zero: raise ValueError( "'cost_matrix' must contain at least one non-zero cost " "entry." ) else: warnings.warn( "'cost_matrix' contains contains no non-zero cost entry." ) if np.sum(np.diag(cost_matrix_new) != 0) > 0: if diagonal_is_zero: raise ValueError( "'cost_matrix' must contain only cost entries being zero on " "its diagonal." ) else: warnings.warn( "'cost_matrix' contains non-zero cost entries on its diagonal." ) return cost_matrix_new def check_X_y( X=None, y=None, X_cand=None, sample_weight=None, sample_weight_cand=None, accept_sparse=False, *, accept_large_sparse=True, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, allow_nan=None, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, estimator=None, missing_label=MISSING_LABEL, ): """Input validation for standard estimators. Checks X and y for consistent length, enforces X to be 2D and y 1D. By default, X is checked to be non-empty and containing only finite values. Standard input checks are also applied to y, such as checking that y does not have np.nan or np.inf targets. For multi-label y, set multi_output=True to allow 2D and sparse y. If the dtype of X is object, attempt converting to float, raising on failure. Parameters ---------- X : nd-array, list or sparse matrix Labeled input data. y : nd-array, list or sparse matrix Labels for X. X_cand : nd-array, list or sparse matrix (default=None) Unlabeled input data sample_weight : array-like of shape (n_samples,) (default=None) Sample weights. sample_weight_cand : array-like of shape (n_candidates,) (default=None) Sample weights of the candidates. accept_sparse : string, boolean or list of string (default=False) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. accept_large_sparse : bool (default=True) If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by accept_sparse, accept_large_sparse will cause it to be accepted only if its indices are stored with a 32-bit dtype. .. versionadded:: 0.20 dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter does not influence whether y can have np.inf, np.nan, pd.NA values. The possibilities are: - True: Force all values of X to be finite. - False: accepts np.inf, np.nan, pd.NA in X. - 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` ensure_2d : boolean (default=True) Whether to raise a value error if X is not 2D. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. multi_output : boolean (default=False) Whether to allow 2D y (array or sparse matrix). If false, y will be validated as a vector. y cannot have np.nan or np.inf values if multi_output=True. allow_nan : boolean (default=None) Whether to allow np.nan in y. ensure_min_samples : int (default=1) Make sure that X has a minimum number of samples in its first axis (rows for a 2D array). ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when X has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. y_numeric : boolean (default=False) Whether to ensure that y has a numeric type. If dtype of y is object, it is converted to float64. Should only be used for regression algorithms. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. missing_label : {scalar, string, np.nan, None}, (default=np.nan) Value to represent a missing label. Returns ------- X_converted : object The converted and validated X. y_converted : object The converted and validated y. candidates : object The converted and validated candidates Only returned if candidates is not None. sample_weight : np.ndarray The converted and validated sample_weight. sample_weight_cand : np.ndarray The converted and validated sample_weight_cand. Only returned if candidates is not None. """ if allow_nan is None: allow_nan = True if missing_label is np.nan else False if X is not None: X = check_array( X, accept_sparse=accept_sparse, accept_large_sparse=accept_large_sparse, dtype=dtype, order=order, copy=copy, force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=ensure_min_features, estimator=estimator, ) if y is not None: if multi_output: y = check_array( y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None, ) else: y = column_or_1d(y, warn=True) assert_all_finite(y, allow_nan=allow_nan) if y_numeric and y.dtype.kind == "O": y = y.astype(np.float64) if X is not None and y is not None: check_consistent_length(X, y) if sample_weight is None: sample_weight = np.ones(y.shape) sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) if ( y.ndim > 1 and y.shape[1] > 1 or sample_weight.ndim > 1 and sample_weight.shape[1] > 1 ): check_consistent_length(y.T, sample_weight.T) if X_cand is not None: X_cand = check_array( X_cand, accept_sparse=accept_sparse, accept_large_sparse=accept_large_sparse, dtype=dtype, order=order, copy=copy, force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=ensure_min_features, estimator=estimator, ) if X is not None and X_cand.shape[1] != X.shape[1]: raise ValueError( "The number of features of candidates does not match" "the number of features of X" ) if sample_weight_cand is None: sample_weight_cand = np.ones(len(X_cand)) sample_weight_cand = check_array(sample_weight_cand, ensure_2d=False) check_consistent_length(X_cand, sample_weight_cand) if X_cand is None: return X, y, sample_weight else: return X, y, X_cand, sample_weight, sample_weight_cand def check_random_state(random_state, seed_multiplier=None): """Check validity of the given random state. Parameters ---------- random_state : None | int | instance of RandomState If random_state is None, return the RandomState singleton used by np.random. If random_state is an int, return a new RandomState. If random_state is already a RandomState instance, return it. Otherwise raise ValueError. seed_multiplier : None | int, optional (default=None) If the random_state and seed_multiplier are not None, draw a new int from the random state, multiply it with the multiplier, and use the product as the seed of a new random state. Returns ------- random_state: instance of RandomState The validated random state. """ if random_state is None or seed_multiplier is None: return check_random_state_sklearn(random_state) check_scalar( seed_multiplier, name="seed_multiplier", target_type=int, min_val=1 ) random_state = copy.deepcopy(random_state) random_state = check_random_state_sklearn(random_state) seed = (random_state.randint(1, 2**31) * seed_multiplier) % (2**31) return np.random.RandomState(seed) def check_indices(indices, A, dim="adaptive", unique=True): """Check if indices fit to array. Parameters ---------- indices : array-like of shape (n_indices, n_dim) or (n_indices,) The considered indices, where for every `i = 0, ..., n_indices - 1` `indices[i]` is interpreted as an index to the array `A`. A : array-like The array that is indexed. dim : int or tuple of ints The dimensions of the array that are indexed. If `dim` equals `'adaptive'`, `dim` is set to first indices corresponding to the shape of `indices`. E.g., if `indices` is of shape (n_indices,), `dim` is set `0`. unique: bool or `check_unique` If `unique` is `True` unique indices are returned. If `unique` is `'check_unique'` an exception is raised if the indices are not unique. Returns ------- indices: tuple of np.ndarrays or np.ndarray The validated indices. """ indices = check_array(indices, dtype=int, ensure_2d=False) A = check_array(A, allow_nd=True, force_all_finite=False, ensure_2d=False) if unique == "check_unique": if indices.ndim == 1: n_unique_indices = len(np.unique(indices)) else: n_unique_indices = len(np.unique(indices, axis=0)) if n_unique_indices < len(indices): raise ValueError( f"`indices` contains two different indices of the " f"same value." ) elif unique: if indices.ndim == 1: indices = np.unique(indices) else: indices = np.unique(indices, axis=0) check_type(dim, "dim", int, tuple, target_vals=["adaptive"]) if dim == "adaptive": if indices.ndim == 1: dim = 0 else: dim = tuple(range(indices.shape[1])) if isinstance(dim, tuple): for n in dim: check_type(n, "entry of `dim`", int) if A.ndim <= max(dim): raise ValueError( f"`dim` contains entry of value {max(dim)}, but all" f"entries of dim must be smaller than {A.ndim}." ) if len(dim) != indices.shape[1]: raise ValueError( f"shape of `indices` along dimension 1 is " f"{indices.shape[0]}, but must be {len(dim)}" ) indices = tuple(indices.T) for (i, n) in enumerate(indices): if np.any(indices[i] >= A.shape[dim[i]]): raise ValueError( f"`indices[{i}]` contains index of value " f"{np.max(indices[i])} but all indices must be" f" less than {A.shape[dim[i]]}." ) return indices else: if A.ndim <= dim: raise ValueError( f"`dim` has value {dim}, but must be smaller than " f"{A.ndim}." ) if np.any(indices >= A.shape[dim]): raise ValueError( f"`indices` contains index of value " f"{np.max(indices)} but all indices must be" f" less than {A.shape[dim]}." ) return indices def check_type( obj, name, *target_types, target_vals=None, indicator_funcs=None ): """Check if obj is one of the given types. It is also possible to allow specific values. Further it is possible to pass indicator functions that can also accept obj. Thereby obj must either have a correct type a correct value or be accepted by an indicator function. Parameters ---------- obj: object The object to be checked. name: str The variable name of the object. target_types : iterable The possible types. target_vals : iterable, optional (default=None) Possible further values that the object is allowed to equal. indicator_funcs : iterable, optional (default=None) Possible further custom indicator (boolean) functions that accept the object by returning `True` if the object is passed as a parameter. """ target_vals = target_vals if target_vals is not None else [] indicator_funcs = indicator_funcs if indicator_funcs is not None else [] wrong_type = not isinstance(obj, target_types) wrong_value = obj not in target_vals wrong_index = all(not i_func(obj) for i_func in indicator_funcs) if wrong_type and wrong_value and wrong_index: error_str = f"`{name}` " if len(target_types) == 0 and len(target_vals) == 0: error_str += f" must" if len(target_vals) == 0 and len(target_types) > 0: error_str += f" has type `{type(obj)}`, but must" elif len(target_vals) > 0 and len(target_types) == 0: error_str += f" has value `{obj}`, but must" else: error_str += f" has type `{type(obj)}` and value `{obj}`, but must" if len(target_types) == 1: error_str += f" have type `{target_types[0]}`" elif 1 <= len(target_types) <= 3: error_str += " have type" for i in range(len(target_types) - 1): error_str += f" `{target_types[i]}`," error_str += f" or `{target_types[len(target_types) - 1]}`" elif len(target_types) > 3: error_str += ( f" have one of the following types: {set(target_types)}" ) if len(target_vals) > 0: if len(target_types) > 0 and len(indicator_funcs) == 0: error_str += " or" elif len(target_types) > 0 and len(indicator_funcs) > 0: error_str += "," error_str += ( f" equal one of the following values: {set(target_vals)}" ) if len(indicator_funcs) > 0: if len(target_types) > 0 or len(target_vals) > 0: error_str += " or" error_str += ( f" be accepted by one of the following custom boolean " f"functions: {set(i_f.__name__ for i_f in indicator_funcs)}" ) raise TypeError(error_str + ".") def _check_callable(func, name, n_positional_parameters=None): """Checks if function is a callable and if the number of free parameters is correct. Parameters ---------- func: callable The functions to be validated. name: str The name of the function n_positional_parameters: int, optional (default=None) The number of free parameters. If `n_free_parameters` is `None`, `n_free_parameters` is set to `1`. """ if n_positional_parameters is None: n_positional_parameters = 1 if not callable(func): raise TypeError( f"`{name}` must be callable. " f"`{name}` is of type {type(func)}" ) # count the number of arguments that have no default value n_actual_positional_parameters = len( list( filter( lambda x: x.default == Parameter.empty, signature(func).parameters.values(), ) ) ) if n_actual_positional_parameters != n_positional_parameters: raise ValueError( f"The number of positional parameters of the callable has to " f"equal {n_positional_parameters}. " f"The number of positional parameters is " f"{n_actual_positional_parameters}." ) def check_bound( bound=None, X=None, ndim=2, epsilon=0, bound_must_be_given=False ): """Validates bound and returns the bound of X if bound is None. `bound` or `X` must not be None. Parameters ---------- bound: array-like, shape (2, ndim), optional (default=None) The given bound of shape [[x1_min, x2_min, ..., xndim_min], [x1_max, x2_max, ..., xndim_max]] X: matrix-like, shape (n_samples, ndim), optional (default=None) The sample matrix X is the feature matrix representing samples. ndim: int, optional (default=2) The number of dimensions. epsilon: float, optional (default=0) The minimal distance between the returned bound and the values of `X`, if `bound` is not specified. bound_must_be_given: bool, optional (default=False) Whether it is allowed for the bound to be `None` and to be inferred by `X`. Returns ------- bound: array-like, shape (2, ndim), optional (default=None) The given bound or bound of X. """ if X is not None: X = check_array(X) if X.shape[1] != ndim: raise ValueError( f"`X` along axis 1 must be of length {ndim}. " f"`X` along axis 1 is of length {X.shape[1]}." ) if bound is not None: bound = check_array(bound) if bound.shape != (2, ndim): raise ValueError( f"Shape of `bound` must be (2, {ndim}). " f"Shape of `bound` is {bound.shape}." ) elif bound_must_be_given: raise ValueError("`bound` must not be `None`.") if bound is None and X is not None: minima = np.nanmin(X, axis=0) - epsilon maxima = np.nanmax(X, axis=0) + epsilon bound = np.append(minima.reshape(1, -1), maxima.reshape(1, -1), axis=0) return bound elif bound is not None and X is not None: if np.any(np.logical_or(bound[0] > X, X > bound[1])): warnings.warn("`X` contains values not within range of `bound`.") return bound elif bound is not None: return bound else: raise ValueError("`X` or `bound` must not be None.") def check_budget_manager( budget, budget_manager, default_budget_manager_class, default_budget_manager_dict=None, ): """Validate if budget manager is a budgetmanager class and create a copy 'budget_manager_'. """ if default_budget_manager_dict is None: default_budget_manager_dict = {} if budget_manager is None: budget_manager_ = default_budget_manager_class( budget=budget, **default_budget_manager_dict ) else: if budget is not None and budget != budget_manager.budget: warnings.warn( "budgetmanager is already given such that the budget " "is not used. The given budget differs from the " "budget_managers budget." ) budget_manager_ = copy.deepcopy(budget_manager) return budget_manager_
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_validation.py
_validation.py
import copy import warnings from collections.abc import Iterable from inspect import Parameter, signature import numpy as np from sklearn.utils.validation import ( check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn, ) from ._label import MISSING_LABEL, check_missing_label, is_unlabeled def check_scalar( x, name, target_type, min_inclusive=True, max_inclusive=True, min_val=None, max_val=None, ): """Validate scalar parameters type and value. Parameters ---------- x : object The scalar parameter to validate. name : str The name of the parameter to be printed in error messages. target_type : type or tuple Acceptable data types for the parameter. min_val : float or int, optional (default=None) The minimum valid value the parameter can take. If None (default) it is implied that the parameter does not have a lower bound. min_inclusive : bool, optional (default=True) If true, the minimum valid value is inclusive, otherwise exclusive. max_val : float or int, optional (default=None) The maximum valid value the parameter can take. If None (default) it is implied that the parameter does not have an upper bound. max_inclusive : bool, optional (default=True) If true, the maximum valid value is inclusive, otherwise exclusive. Raises ------- TypeError If the parameter's type does not match the desired type. ValueError If the parameter's value violates the given bounds. """ if not isinstance(x, target_type): raise TypeError( "`{}` must be an instance of {}, not {}.".format( name, target_type, type(x) ) ) if min_inclusive: if min_val is not None and (x < min_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be >= " "{}.".format(name, x, min_val) ) else: if min_val is not None and (x <= min_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be > " "{}.".format(name, x, min_val) ) if max_inclusive: if max_val is not None and (x > max_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be <= " "{}.".format(name, x, max_val) ) else: if max_val is not None and (x >= max_val or np.isnan(x)): raise ValueError( "`{}`= {}, must be < " "{}.".format(name, x, max_val) ) def check_classifier_params(classes, missing_label, cost_matrix=None): """Check whether the parameters are compatible to each other (only if `classes` is not None). Parameters ---------- classes : array-like, shape (n_classes) Array of class labels. missing_label : {number, str, None, np.nan} Symbol to represent a missing label. cost_matrix : array-like, shape (n_classes, n_classes), default=None Cost matrix. If None, cost matrix will be not checked. """ check_missing_label(missing_label) if classes is not None: check_classes(classes) dtype = np.array(classes).dtype check_missing_label(missing_label, target_type=dtype, name="classes") n_labeled = is_unlabeled(y=classes, missing_label=missing_label).sum() if n_labeled > 0: raise ValueError( f"`classes={classes}` contains " f"`missing_label={missing_label}.`" ) if cost_matrix is not None: check_cost_matrix(cost_matrix=cost_matrix, n_classes=len(classes)) else: if cost_matrix is not None: raise ValueError( "You cannot specify 'cost_matrix' without " "specifying 'classes'." ) def check_classes(classes): """Check whether class labels are uniformly strings or numbers. Parameters ---------- classes : array-like, shape (n_classes) Array of class labels. """ if not isinstance(classes, Iterable): raise TypeError( "'classes' is not iterable. Got {}".format(type(classes)) ) try: classes_sorted = np.array(sorted(set(classes))) if len(classes) != len(classes_sorted): raise ValueError("Duplicate entries in 'classes'.") except TypeError: types = sorted(t.__qualname__ for t in set(type(v) for v in classes)) raise TypeError( "'classes' must be uniformly strings or numbers. Got {}".format( types ) ) def check_class_prior(class_prior, n_classes): """Check if the class_prior is a valid prior. Parameters ---------- class_prior : numeric | array_like, shape (n_classes) A class prior. n_classes : int The number of classes. Returns ------- class_prior : np.ndarray, shape (n_classes) Numpy array as prior. """ if class_prior is None: raise TypeError("'class_prior' must not be None.") check_scalar(n_classes, name="n_classes", target_type=int, min_val=1) if np.isscalar(class_prior): check_scalar( class_prior, name="class_prior", target_type=(int, float), min_val=0, ) class_prior = np.array([class_prior] * n_classes) else: class_prior = check_array(class_prior, ensure_2d=False) is_negative = np.sum(class_prior < 0) if class_prior.shape != (n_classes,) or is_negative: raise ValueError( "`class_prior` must be either a non-negative" "float or a list of `n_classes` non-negative " "floats." ) return class_prior.reshape(-1) def check_cost_matrix( cost_matrix, n_classes, only_non_negative=False, contains_non_zero=False, diagonal_is_zero=False, ): """Check whether cost matrix has shape `(n_classes, n_classes)`. Parameters ---------- cost_matrix : array-like, shape (n_classes, n_classes) Cost matrix. n_classes : int Number of classes. only_non_negative : bool, optional (default=True) This parameter determines whether the matrix must contain only non negative cost entries. contains_non_zero : bool, optional (default=True) This parameter determines whether the matrix must contain at least on non-zero cost entry. diagonal_is_zero : bool, optional (default=True) This parameter determines whether the diagonal cost entries must be zero. Returns ------- cost_matrix_new : np.ndarray, shape (n_classes, n_classes) Numpy array as cost matrix. """ check_scalar(n_classes, target_type=int, name="n_classes", min_val=1) cost_matrix_new = check_array( np.array(cost_matrix, dtype=float), ensure_2d=True ) if cost_matrix_new.shape != (n_classes, n_classes): raise ValueError( "'cost_matrix' must have shape ({}, {}). " "Got {}.".format(n_classes, n_classes, cost_matrix_new.shape) ) if np.sum(cost_matrix_new < 0) > 0: if only_non_negative: raise ValueError( "'cost_matrix' must contain only non-negative cost entries." ) else: warnings.warn("'cost_matrix' contains negative cost entries.") if n_classes != 1 and np.sum(cost_matrix_new != 0) == 0: if contains_non_zero: raise ValueError( "'cost_matrix' must contain at least one non-zero cost " "entry." ) else: warnings.warn( "'cost_matrix' contains contains no non-zero cost entry." ) if np.sum(np.diag(cost_matrix_new) != 0) > 0: if diagonal_is_zero: raise ValueError( "'cost_matrix' must contain only cost entries being zero on " "its diagonal." ) else: warnings.warn( "'cost_matrix' contains non-zero cost entries on its diagonal." ) return cost_matrix_new def check_X_y( X=None, y=None, X_cand=None, sample_weight=None, sample_weight_cand=None, accept_sparse=False, *, accept_large_sparse=True, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, allow_nan=None, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, estimator=None, missing_label=MISSING_LABEL, ): """Input validation for standard estimators. Checks X and y for consistent length, enforces X to be 2D and y 1D. By default, X is checked to be non-empty and containing only finite values. Standard input checks are also applied to y, such as checking that y does not have np.nan or np.inf targets. For multi-label y, set multi_output=True to allow 2D and sparse y. If the dtype of X is object, attempt converting to float, raising on failure. Parameters ---------- X : nd-array, list or sparse matrix Labeled input data. y : nd-array, list or sparse matrix Labels for X. X_cand : nd-array, list or sparse matrix (default=None) Unlabeled input data sample_weight : array-like of shape (n_samples,) (default=None) Sample weights. sample_weight_cand : array-like of shape (n_candidates,) (default=None) Sample weights of the candidates. accept_sparse : string, boolean or list of string (default=False) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. accept_large_sparse : bool (default=True) If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by accept_sparse, accept_large_sparse will cause it to be accepted only if its indices are stored with a 32-bit dtype. .. versionadded:: 0.20 dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter does not influence whether y can have np.inf, np.nan, pd.NA values. The possibilities are: - True: Force all values of X to be finite. - False: accepts np.inf, np.nan, pd.NA in X. - 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` ensure_2d : boolean (default=True) Whether to raise a value error if X is not 2D. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. multi_output : boolean (default=False) Whether to allow 2D y (array or sparse matrix). If false, y will be validated as a vector. y cannot have np.nan or np.inf values if multi_output=True. allow_nan : boolean (default=None) Whether to allow np.nan in y. ensure_min_samples : int (default=1) Make sure that X has a minimum number of samples in its first axis (rows for a 2D array). ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when X has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. y_numeric : boolean (default=False) Whether to ensure that y has a numeric type. If dtype of y is object, it is converted to float64. Should only be used for regression algorithms. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. missing_label : {scalar, string, np.nan, None}, (default=np.nan) Value to represent a missing label. Returns ------- X_converted : object The converted and validated X. y_converted : object The converted and validated y. candidates : object The converted and validated candidates Only returned if candidates is not None. sample_weight : np.ndarray The converted and validated sample_weight. sample_weight_cand : np.ndarray The converted and validated sample_weight_cand. Only returned if candidates is not None. """ if allow_nan is None: allow_nan = True if missing_label is np.nan else False if X is not None: X = check_array( X, accept_sparse=accept_sparse, accept_large_sparse=accept_large_sparse, dtype=dtype, order=order, copy=copy, force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=ensure_min_features, estimator=estimator, ) if y is not None: if multi_output: y = check_array( y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None, ) else: y = column_or_1d(y, warn=True) assert_all_finite(y, allow_nan=allow_nan) if y_numeric and y.dtype.kind == "O": y = y.astype(np.float64) if X is not None and y is not None: check_consistent_length(X, y) if sample_weight is None: sample_weight = np.ones(y.shape) sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) if ( y.ndim > 1 and y.shape[1] > 1 or sample_weight.ndim > 1 and sample_weight.shape[1] > 1 ): check_consistent_length(y.T, sample_weight.T) if X_cand is not None: X_cand = check_array( X_cand, accept_sparse=accept_sparse, accept_large_sparse=accept_large_sparse, dtype=dtype, order=order, copy=copy, force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=ensure_min_features, estimator=estimator, ) if X is not None and X_cand.shape[1] != X.shape[1]: raise ValueError( "The number of features of candidates does not match" "the number of features of X" ) if sample_weight_cand is None: sample_weight_cand = np.ones(len(X_cand)) sample_weight_cand = check_array(sample_weight_cand, ensure_2d=False) check_consistent_length(X_cand, sample_weight_cand) if X_cand is None: return X, y, sample_weight else: return X, y, X_cand, sample_weight, sample_weight_cand def check_random_state(random_state, seed_multiplier=None): """Check validity of the given random state. Parameters ---------- random_state : None | int | instance of RandomState If random_state is None, return the RandomState singleton used by np.random. If random_state is an int, return a new RandomState. If random_state is already a RandomState instance, return it. Otherwise raise ValueError. seed_multiplier : None | int, optional (default=None) If the random_state and seed_multiplier are not None, draw a new int from the random state, multiply it with the multiplier, and use the product as the seed of a new random state. Returns ------- random_state: instance of RandomState The validated random state. """ if random_state is None or seed_multiplier is None: return check_random_state_sklearn(random_state) check_scalar( seed_multiplier, name="seed_multiplier", target_type=int, min_val=1 ) random_state = copy.deepcopy(random_state) random_state = check_random_state_sklearn(random_state) seed = (random_state.randint(1, 2**31) * seed_multiplier) % (2**31) return np.random.RandomState(seed) def check_indices(indices, A, dim="adaptive", unique=True): """Check if indices fit to array. Parameters ---------- indices : array-like of shape (n_indices, n_dim) or (n_indices,) The considered indices, where for every `i = 0, ..., n_indices - 1` `indices[i]` is interpreted as an index to the array `A`. A : array-like The array that is indexed. dim : int or tuple of ints The dimensions of the array that are indexed. If `dim` equals `'adaptive'`, `dim` is set to first indices corresponding to the shape of `indices`. E.g., if `indices` is of shape (n_indices,), `dim` is set `0`. unique: bool or `check_unique` If `unique` is `True` unique indices are returned. If `unique` is `'check_unique'` an exception is raised if the indices are not unique. Returns ------- indices: tuple of np.ndarrays or np.ndarray The validated indices. """ indices = check_array(indices, dtype=int, ensure_2d=False) A = check_array(A, allow_nd=True, force_all_finite=False, ensure_2d=False) if unique == "check_unique": if indices.ndim == 1: n_unique_indices = len(np.unique(indices)) else: n_unique_indices = len(np.unique(indices, axis=0)) if n_unique_indices < len(indices): raise ValueError( f"`indices` contains two different indices of the " f"same value." ) elif unique: if indices.ndim == 1: indices = np.unique(indices) else: indices = np.unique(indices, axis=0) check_type(dim, "dim", int, tuple, target_vals=["adaptive"]) if dim == "adaptive": if indices.ndim == 1: dim = 0 else: dim = tuple(range(indices.shape[1])) if isinstance(dim, tuple): for n in dim: check_type(n, "entry of `dim`", int) if A.ndim <= max(dim): raise ValueError( f"`dim` contains entry of value {max(dim)}, but all" f"entries of dim must be smaller than {A.ndim}." ) if len(dim) != indices.shape[1]: raise ValueError( f"shape of `indices` along dimension 1 is " f"{indices.shape[0]}, but must be {len(dim)}" ) indices = tuple(indices.T) for (i, n) in enumerate(indices): if np.any(indices[i] >= A.shape[dim[i]]): raise ValueError( f"`indices[{i}]` contains index of value " f"{np.max(indices[i])} but all indices must be" f" less than {A.shape[dim[i]]}." ) return indices else: if A.ndim <= dim: raise ValueError( f"`dim` has value {dim}, but must be smaller than " f"{A.ndim}." ) if np.any(indices >= A.shape[dim]): raise ValueError( f"`indices` contains index of value " f"{np.max(indices)} but all indices must be" f" less than {A.shape[dim]}." ) return indices def check_type( obj, name, *target_types, target_vals=None, indicator_funcs=None ): """Check if obj is one of the given types. It is also possible to allow specific values. Further it is possible to pass indicator functions that can also accept obj. Thereby obj must either have a correct type a correct value or be accepted by an indicator function. Parameters ---------- obj: object The object to be checked. name: str The variable name of the object. target_types : iterable The possible types. target_vals : iterable, optional (default=None) Possible further values that the object is allowed to equal. indicator_funcs : iterable, optional (default=None) Possible further custom indicator (boolean) functions that accept the object by returning `True` if the object is passed as a parameter. """ target_vals = target_vals if target_vals is not None else [] indicator_funcs = indicator_funcs if indicator_funcs is not None else [] wrong_type = not isinstance(obj, target_types) wrong_value = obj not in target_vals wrong_index = all(not i_func(obj) for i_func in indicator_funcs) if wrong_type and wrong_value and wrong_index: error_str = f"`{name}` " if len(target_types) == 0 and len(target_vals) == 0: error_str += f" must" if len(target_vals) == 0 and len(target_types) > 0: error_str += f" has type `{type(obj)}`, but must" elif len(target_vals) > 0 and len(target_types) == 0: error_str += f" has value `{obj}`, but must" else: error_str += f" has type `{type(obj)}` and value `{obj}`, but must" if len(target_types) == 1: error_str += f" have type `{target_types[0]}`" elif 1 <= len(target_types) <= 3: error_str += " have type" for i in range(len(target_types) - 1): error_str += f" `{target_types[i]}`," error_str += f" or `{target_types[len(target_types) - 1]}`" elif len(target_types) > 3: error_str += ( f" have one of the following types: {set(target_types)}" ) if len(target_vals) > 0: if len(target_types) > 0 and len(indicator_funcs) == 0: error_str += " or" elif len(target_types) > 0 and len(indicator_funcs) > 0: error_str += "," error_str += ( f" equal one of the following values: {set(target_vals)}" ) if len(indicator_funcs) > 0: if len(target_types) > 0 or len(target_vals) > 0: error_str += " or" error_str += ( f" be accepted by one of the following custom boolean " f"functions: {set(i_f.__name__ for i_f in indicator_funcs)}" ) raise TypeError(error_str + ".") def _check_callable(func, name, n_positional_parameters=None): """Checks if function is a callable and if the number of free parameters is correct. Parameters ---------- func: callable The functions to be validated. name: str The name of the function n_positional_parameters: int, optional (default=None) The number of free parameters. If `n_free_parameters` is `None`, `n_free_parameters` is set to `1`. """ if n_positional_parameters is None: n_positional_parameters = 1 if not callable(func): raise TypeError( f"`{name}` must be callable. " f"`{name}` is of type {type(func)}" ) # count the number of arguments that have no default value n_actual_positional_parameters = len( list( filter( lambda x: x.default == Parameter.empty, signature(func).parameters.values(), ) ) ) if n_actual_positional_parameters != n_positional_parameters: raise ValueError( f"The number of positional parameters of the callable has to " f"equal {n_positional_parameters}. " f"The number of positional parameters is " f"{n_actual_positional_parameters}." ) def check_bound( bound=None, X=None, ndim=2, epsilon=0, bound_must_be_given=False ): """Validates bound and returns the bound of X if bound is None. `bound` or `X` must not be None. Parameters ---------- bound: array-like, shape (2, ndim), optional (default=None) The given bound of shape [[x1_min, x2_min, ..., xndim_min], [x1_max, x2_max, ..., xndim_max]] X: matrix-like, shape (n_samples, ndim), optional (default=None) The sample matrix X is the feature matrix representing samples. ndim: int, optional (default=2) The number of dimensions. epsilon: float, optional (default=0) The minimal distance between the returned bound and the values of `X`, if `bound` is not specified. bound_must_be_given: bool, optional (default=False) Whether it is allowed for the bound to be `None` and to be inferred by `X`. Returns ------- bound: array-like, shape (2, ndim), optional (default=None) The given bound or bound of X. """ if X is not None: X = check_array(X) if X.shape[1] != ndim: raise ValueError( f"`X` along axis 1 must be of length {ndim}. " f"`X` along axis 1 is of length {X.shape[1]}." ) if bound is not None: bound = check_array(bound) if bound.shape != (2, ndim): raise ValueError( f"Shape of `bound` must be (2, {ndim}). " f"Shape of `bound` is {bound.shape}." ) elif bound_must_be_given: raise ValueError("`bound` must not be `None`.") if bound is None and X is not None: minima = np.nanmin(X, axis=0) - epsilon maxima = np.nanmax(X, axis=0) + epsilon bound = np.append(minima.reshape(1, -1), maxima.reshape(1, -1), axis=0) return bound elif bound is not None and X is not None: if np.any(np.logical_or(bound[0] > X, X > bound[1])): warnings.warn("`X` contains values not within range of `bound`.") return bound elif bound is not None: return bound else: raise ValueError("`X` or `bound` must not be None.") def check_budget_manager( budget, budget_manager, default_budget_manager_class, default_budget_manager_dict=None, ): """Validate if budget manager is a budgetmanager class and create a copy 'budget_manager_'. """ if default_budget_manager_dict is None: default_budget_manager_dict = {} if budget_manager is None: budget_manager_ = default_budget_manager_class( budget=budget, **default_budget_manager_dict ) else: if budget is not None and budget != budget_manager.budget: warnings.warn( "budgetmanager is already given such that the budget " "is not used. The given budget differs from the " "budget_managers budget." ) budget_manager_ = copy.deepcopy(budget_manager) return budget_manager_
0.897243
0.548734
import operator import warnings from functools import reduce import numpy as np from scipy.stats import rankdata from sklearn.utils import check_array from ._validation import check_random_state, check_scalar, check_type def rand_argmin(a, random_state=None, **argmin_kwargs): """Returns index of minimum value. In case of ties, a randomly selected index of the minimum elements is returned. Parameters ---------- a: array-like Indexable data-structure of whose minimum element's index is to be determined. random_state: int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. argmin_kwargs: dict-like Keyword argument passed to numpy function argmin. Returns ------- index_array: ndarray of ints Array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. """ random_state = check_random_state(random_state) a = np.asarray(a) index_array = np.argmax( random_state.random(a.shape) * (a == np.nanmin(a, **argmin_kwargs, keepdims=True)), **argmin_kwargs, ) if np.isscalar(index_array) and a.ndim > 1: index_array = np.unravel_index(index_array, a.shape) index_array = np.atleast_1d(index_array) return index_array def rand_argmax(a, random_state=None, **argmax_kwargs): """Returns index of maximum value. In case of ties, a randomly selected index of the maximum elements is returned. Parameters ---------- a: array-like Indexable data-structure of whose maximum element's index is to be determined. random_state: int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. argmax_kwargs: dict-like Keyword argument passed to numpy function argmax. Returns ------- index_array: ndarray of ints Array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. """ random_state = check_random_state(random_state) a = np.asarray(a) index_array = np.argmax( random_state.random(a.shape) * (a == np.nanmax(a, **argmax_kwargs, keepdims=True)), **argmax_kwargs, ) if np.isscalar(index_array) and a.ndim > 1: index_array = np.unravel_index(index_array, a.shape) index_array = np.atleast_1d(index_array) return index_array def simple_batch( utilities, random_state=None, batch_size=1, return_utilities=False ): """Generates a batch by selecting the highest values in the 'utilities'. If utilities is an ND-array, the returned utilities will be an (N+1)D-array, with the shape batch_size x utilities.shape, filled the given utilities but set the n-th highest values in the n-th row to np.nan. Parameters ---------- utilities : np.ndarray The utilities to be used to create the batch. random_state : int | np.random.RandomState (default=None) The random state to use. If `random_state is None` random `random_state` is used. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool (default=False) If True, the utilities are returned. Returns ------- best_indices : np.ndarray, shape (batch_size) if ndim == 1 (batch_size, ndim) else The index of the batch instance. batch_utilities : np.ndarray, shape (batch_size, len(utilities)) The utilities of the batch (if return_utilities=True). """ # validation utilities = check_array( utilities, ensure_2d=False, dtype=float, force_all_finite="allow-nan", allow_nd=True, ) check_scalar(batch_size, target_type=int, name="batch_size", min_val=1) max_batch_size = np.sum(~np.isnan(utilities), dtype=int) if max_batch_size < batch_size: warnings.warn( "'batch_size={}' is larger than number of candidate samples " "in 'utilities'. Instead, 'batch_size={}' was set.".format( batch_size, max_batch_size ) ) batch_size = max_batch_size # generate batch batch_utilities = np.empty((batch_size,) + utilities.shape) best_indices = np.empty((batch_size, utilities.ndim), dtype=int) for i in range(batch_size): best_indices[i] = rand_argmax(utilities, random_state=random_state) batch_utilities[i] = utilities utilities[tuple(best_indices[i])] = np.nan # Check whether utilities are to be returned. if utilities.ndim == 1: best_indices = best_indices.flatten() if return_utilities: return best_indices, batch_utilities else: return best_indices def combine_ranking(*iter_ranking, rank_method=None, rank_per_batch=False): """Combine different rankings hierarchically to one ranking assignment. A ranking index i is ranked higher than index j iff ranking[i] > ranking[j]. For the combined ranking it will hold that the first ranking of iter_ranking always determines the ranking position at an index, and only when two ranking assignments are equal the second ranking will determine the ranking position and so forth. Parameters ---------- iter_ranking : iterable of array-like The different rankings. They must share a common shape in the sense that they have the same number of dimensions and are broadcastable by numpy. rank_method : string, optional (default = None) The method by which the utilities are ranked. See `scipy.rankdata`s argument `method` for details. rank_per_batch : bool, optional (default = False) Whether the first index determines the batch and is not used for ranking Returns ------- combined_ranking : np.ndarray The combined ranking. """ if rank_method is None: rank_method = "dense" check_type(rank_method, "rank_method", str) check_type(rank_per_batch, "rank_per_batch", bool) iter_ranking = list(iter_ranking) for idx, ranking in enumerate(iter_ranking): iter_ranking[idx] = check_array( ranking, allow_nd=True, ensure_2d=False, force_all_finite=False ).astype(float) if idx != 0 and iter_ranking[idx - 1].ndim != ranking.ndim: raise ValueError( f"The number of dimensions of the `ranking` in " f"`iter_ranking` must be the same, but " f"`iter_ranking[{idx}].ndim == {ranking.ndim}" f" and `iter_ranking[{idx-1}].ndim == " f"{iter_ranking[idx - 1].ndim}`." ) np.broadcast_shapes(*(u.shape for u in iter_ranking)) combined_ranking = iter_ranking[0] for idx in range(1, len(iter_ranking)): next_ranking = iter_ranking[idx] cr_shape = combined_ranking.shape if rank_per_batch: rank_shape = ( cr_shape[0], max(reduce(operator.mul, cr_shape[1:], 1), 1), ) rank_dict = {"method": rank_method, "axis": 1} else: rank_shape = reduce(operator.mul, cr_shape, 1) rank_dict = {"method": rank_method} combined_ranking = combined_ranking.reshape(rank_shape) # exchange nan values to make rankdata work. nan_values = np.isnan(combined_ranking) combined_ranking[nan_values] = -np.inf combined_ranking = rankdata(combined_ranking, **rank_dict).astype( float ) combined_ranking[nan_values] = np.nan combined_ranking = combined_ranking.reshape(cr_shape) combined_ranking = combined_ranking + 1 / ( 1 + np.exp(-next_ranking) ) # sigmoid return combined_ranking
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_selection.py
_selection.py
import operator import warnings from functools import reduce import numpy as np from scipy.stats import rankdata from sklearn.utils import check_array from ._validation import check_random_state, check_scalar, check_type def rand_argmin(a, random_state=None, **argmin_kwargs): """Returns index of minimum value. In case of ties, a randomly selected index of the minimum elements is returned. Parameters ---------- a: array-like Indexable data-structure of whose minimum element's index is to be determined. random_state: int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. argmin_kwargs: dict-like Keyword argument passed to numpy function argmin. Returns ------- index_array: ndarray of ints Array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. """ random_state = check_random_state(random_state) a = np.asarray(a) index_array = np.argmax( random_state.random(a.shape) * (a == np.nanmin(a, **argmin_kwargs, keepdims=True)), **argmin_kwargs, ) if np.isscalar(index_array) and a.ndim > 1: index_array = np.unravel_index(index_array, a.shape) index_array = np.atleast_1d(index_array) return index_array def rand_argmax(a, random_state=None, **argmax_kwargs): """Returns index of maximum value. In case of ties, a randomly selected index of the maximum elements is returned. Parameters ---------- a: array-like Indexable data-structure of whose maximum element's index is to be determined. random_state: int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. argmax_kwargs: dict-like Keyword argument passed to numpy function argmax. Returns ------- index_array: ndarray of ints Array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. """ random_state = check_random_state(random_state) a = np.asarray(a) index_array = np.argmax( random_state.random(a.shape) * (a == np.nanmax(a, **argmax_kwargs, keepdims=True)), **argmax_kwargs, ) if np.isscalar(index_array) and a.ndim > 1: index_array = np.unravel_index(index_array, a.shape) index_array = np.atleast_1d(index_array) return index_array def simple_batch( utilities, random_state=None, batch_size=1, return_utilities=False ): """Generates a batch by selecting the highest values in the 'utilities'. If utilities is an ND-array, the returned utilities will be an (N+1)D-array, with the shape batch_size x utilities.shape, filled the given utilities but set the n-th highest values in the n-th row to np.nan. Parameters ---------- utilities : np.ndarray The utilities to be used to create the batch. random_state : int | np.random.RandomState (default=None) The random state to use. If `random_state is None` random `random_state` is used. batch_size : int, optional (default=1) The number of samples to be selected in one AL cycle. return_utilities : bool (default=False) If True, the utilities are returned. Returns ------- best_indices : np.ndarray, shape (batch_size) if ndim == 1 (batch_size, ndim) else The index of the batch instance. batch_utilities : np.ndarray, shape (batch_size, len(utilities)) The utilities of the batch (if return_utilities=True). """ # validation utilities = check_array( utilities, ensure_2d=False, dtype=float, force_all_finite="allow-nan", allow_nd=True, ) check_scalar(batch_size, target_type=int, name="batch_size", min_val=1) max_batch_size = np.sum(~np.isnan(utilities), dtype=int) if max_batch_size < batch_size: warnings.warn( "'batch_size={}' is larger than number of candidate samples " "in 'utilities'. Instead, 'batch_size={}' was set.".format( batch_size, max_batch_size ) ) batch_size = max_batch_size # generate batch batch_utilities = np.empty((batch_size,) + utilities.shape) best_indices = np.empty((batch_size, utilities.ndim), dtype=int) for i in range(batch_size): best_indices[i] = rand_argmax(utilities, random_state=random_state) batch_utilities[i] = utilities utilities[tuple(best_indices[i])] = np.nan # Check whether utilities are to be returned. if utilities.ndim == 1: best_indices = best_indices.flatten() if return_utilities: return best_indices, batch_utilities else: return best_indices def combine_ranking(*iter_ranking, rank_method=None, rank_per_batch=False): """Combine different rankings hierarchically to one ranking assignment. A ranking index i is ranked higher than index j iff ranking[i] > ranking[j]. For the combined ranking it will hold that the first ranking of iter_ranking always determines the ranking position at an index, and only when two ranking assignments are equal the second ranking will determine the ranking position and so forth. Parameters ---------- iter_ranking : iterable of array-like The different rankings. They must share a common shape in the sense that they have the same number of dimensions and are broadcastable by numpy. rank_method : string, optional (default = None) The method by which the utilities are ranked. See `scipy.rankdata`s argument `method` for details. rank_per_batch : bool, optional (default = False) Whether the first index determines the batch and is not used for ranking Returns ------- combined_ranking : np.ndarray The combined ranking. """ if rank_method is None: rank_method = "dense" check_type(rank_method, "rank_method", str) check_type(rank_per_batch, "rank_per_batch", bool) iter_ranking = list(iter_ranking) for idx, ranking in enumerate(iter_ranking): iter_ranking[idx] = check_array( ranking, allow_nd=True, ensure_2d=False, force_all_finite=False ).astype(float) if idx != 0 and iter_ranking[idx - 1].ndim != ranking.ndim: raise ValueError( f"The number of dimensions of the `ranking` in " f"`iter_ranking` must be the same, but " f"`iter_ranking[{idx}].ndim == {ranking.ndim}" f" and `iter_ranking[{idx-1}].ndim == " f"{iter_ranking[idx - 1].ndim}`." ) np.broadcast_shapes(*(u.shape for u in iter_ranking)) combined_ranking = iter_ranking[0] for idx in range(1, len(iter_ranking)): next_ranking = iter_ranking[idx] cr_shape = combined_ranking.shape if rank_per_batch: rank_shape = ( cr_shape[0], max(reduce(operator.mul, cr_shape[1:], 1), 1), ) rank_dict = {"method": rank_method, "axis": 1} else: rank_shape = reduce(operator.mul, cr_shape, 1) rank_dict = {"method": rank_method} combined_ranking = combined_ranking.reshape(rank_shape) # exchange nan values to make rankdata work. nan_values = np.isnan(combined_ranking) combined_ranking[nan_values] = -np.inf combined_ranking = rankdata(combined_ranking, **rank_dict).astype( float ) combined_ranking[nan_values] = np.nan combined_ranking = combined_ranking.reshape(cr_shape) combined_ranking = combined_ranking + 1 / ( 1 + np.exp(-next_ranking) ) # sigmoid return combined_ranking
0.902655
0.599397
import numpy as np from sklearn.base import BaseEstimator from sklearn.preprocessing import LabelEncoder from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from ._label import MISSING_LABEL, is_labeled, check_missing_label from ._validation import check_classifier_params class ExtLabelEncoder(BaseEstimator): """Encode class labels with value between 0 and classes-1 and uses -1 for unlabeled samples. This transformer should be used to encode class labels, *i.e.* `y`, and not the input `X`. Parameters ---------- classes: array-like, shape (n_classes), default=None Holds the label for each class. missing_label: scalar|string|np.nan|None, default=np.nan Value to represent a missing label. Attributes ---------- classes_: array-like, shape (n_classes) Holds the label for each class. """ def __init__(self, classes=None, missing_label=MISSING_LABEL): self.classes = classes self.missing_label = missing_label def fit(self, y): """Fit label encoder. Parameters ---------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. Returns ------- self: returns an instance of self. """ check_classifier_params( classes=self.classes, missing_label=self.missing_label ) y = check_array(y, ensure_2d=False, force_all_finite=False, dtype=None) check_missing_label( missing_label=self.missing_label, target_type=y.dtype ) self._le = LabelEncoder() if self.classes is None: is_lbld = is_labeled(y, missing_label=self.missing_label) self._dtype = np.append(y, self.missing_label).dtype self._le.fit(y[is_lbld]) else: self._dtype = np.append(self.classes, self.missing_label).dtype self._le.fit(self.classes) self.classes_ = self._le.classes_ self.classes_ = self._le.classes_ return self def fit_transform(self, y): """Fit label encoder and return encoded labels. Parameters ---------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. Returns ------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. """ return self.fit(y).transform(y) def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples) Target values. Returns ------- y_enc : array-like of shape (n_samples """ check_is_fitted(self, attributes=["classes_"]) y = check_array( y, ensure_2d=False, force_all_finite=False, ensure_min_samples=0, dtype=None, ) is_lbld = is_labeled(y, missing_label=self.missing_label) y = np.asarray(y) y_enc = np.empty_like(y, dtype=int) y_enc[is_lbld] = self._le.transform(y[is_lbld].ravel()) y_enc[~is_lbld] = -1 return y_enc def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y_dec : numpy array of shape [n_samples] """ check_is_fitted(self, attributes=["classes_"]) y = check_array( y, ensure_2d=False, force_all_finite=False, ensure_min_samples=0, dtype=None, ) is_lbld = is_labeled(y, missing_label=-1) y = np.asarray(y) y_dec = np.empty_like(y, dtype=self._dtype) y_dec[is_lbld] = self._le.inverse_transform( np.array(y[is_lbld].ravel()) ) y_dec[~is_lbld] = self.missing_label return y_dec
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_label_encoder.py
_label_encoder.py
import numpy as np from sklearn.base import BaseEstimator from sklearn.preprocessing import LabelEncoder from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from ._label import MISSING_LABEL, is_labeled, check_missing_label from ._validation import check_classifier_params class ExtLabelEncoder(BaseEstimator): """Encode class labels with value between 0 and classes-1 and uses -1 for unlabeled samples. This transformer should be used to encode class labels, *i.e.* `y`, and not the input `X`. Parameters ---------- classes: array-like, shape (n_classes), default=None Holds the label for each class. missing_label: scalar|string|np.nan|None, default=np.nan Value to represent a missing label. Attributes ---------- classes_: array-like, shape (n_classes) Holds the label for each class. """ def __init__(self, classes=None, missing_label=MISSING_LABEL): self.classes = classes self.missing_label = missing_label def fit(self, y): """Fit label encoder. Parameters ---------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. Returns ------- self: returns an instance of self. """ check_classifier_params( classes=self.classes, missing_label=self.missing_label ) y = check_array(y, ensure_2d=False, force_all_finite=False, dtype=None) check_missing_label( missing_label=self.missing_label, target_type=y.dtype ) self._le = LabelEncoder() if self.classes is None: is_lbld = is_labeled(y, missing_label=self.missing_label) self._dtype = np.append(y, self.missing_label).dtype self._le.fit(y[is_lbld]) else: self._dtype = np.append(self.classes, self.missing_label).dtype self._le.fit(self.classes) self.classes_ = self._le.classes_ self.classes_ = self._le.classes_ return self def fit_transform(self, y): """Fit label encoder and return encoded labels. Parameters ---------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. Returns ------- y: array-like, shape (n_samples) or (n_samples, n_outputs) Class labels. """ return self.fit(y).transform(y) def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples) Target values. Returns ------- y_enc : array-like of shape (n_samples """ check_is_fitted(self, attributes=["classes_"]) y = check_array( y, ensure_2d=False, force_all_finite=False, ensure_min_samples=0, dtype=None, ) is_lbld = is_labeled(y, missing_label=self.missing_label) y = np.asarray(y) y_enc = np.empty_like(y, dtype=int) y_enc[is_lbld] = self._le.transform(y[is_lbld].ravel()) y_enc[~is_lbld] = -1 return y_enc def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y_dec : numpy array of shape [n_samples] """ check_is_fitted(self, attributes=["classes_"]) y = check_array( y, ensure_2d=False, force_all_finite=False, ensure_min_samples=0, dtype=None, ) is_lbld = is_labeled(y, missing_label=-1) y = np.asarray(y) y_dec = np.empty_like(y, dtype=self._dtype) y_dec[is_lbld] = self._le.inverse_transform( np.array(y[is_lbld].ravel()) ) y_dec[~is_lbld] = self.missing_label return y_dec
0.937861
0.638751
import inspect from functools import update_wrapper from operator import attrgetter def call_func( f_callable, only_mandatory=False, ignore_var_keyword=False, **kwargs ): """Calls a function with the given parameters given in kwargs if they exist as parameters in f_callable. Parameters ---------- f_callable : callable The function or object that is to be called only_mandatory : boolean If True only mandatory parameters are set. ignore_var_keyword : boolean If False all kwargs are passed when f_callable uses a parameter that is of kind Parameter.VAR_KEYWORD, i.e., **kwargs. For further reference see inspect package. kwargs : kwargs All parameters that could be used for calling f_callable. Returns ------- called object """ params = inspect.signature(f_callable).parameters param_keys = params.keys() if only_mandatory: param_keys = list( filter(lambda k: params[k].default == params[k].empty, param_keys) ) has_var_keyword = any( filter(lambda p: p.kind == p.VAR_KEYWORD, params.values()) ) if has_var_keyword and not ignore_var_keyword and not only_mandatory: vars = kwargs else: vars = dict(filter(lambda e: e[0] in param_keys, kwargs.items())) return f_callable(**vars) def _available_if(method_name, has_available_if): if has_available_if: from sklearn.utils.metaestimators import available_if decorator = available_if( lambda self: _hasattr_array_like(self.estimator, method_name) ) else: from sklearn.utils.metaestimators import if_delegate_has_method if not isinstance(method_name, (list, tuple)): decorator = if_delegate_has_method(delegate="estimator") else: decorator = _if_delegate_has_methods( delegate="estimator", method_names=method_name ) return decorator def _hasattr_array_like(obj, attribute_names): if not isinstance(attribute_names, (list, tuple)): attribute_names = [attribute_names] return any(hasattr(obj, attr) for attr in attribute_names) class _IffHasAMethod: def __init__(self, fn, delegate_name, method_names): self.fn = fn self.delegate_name = delegate_name self.method_names = method_names # update the docstring of the descriptor update_wrapper(self, fn) def __get__(self, obj, owner=None): delegate = attrgetter(self.delegate_name)(obj) if not _hasattr_array_like( delegate, attribute_names=self.method_names ): raise AttributeError def out(*args, **kwargs): return self.fn(obj, *args, **kwargs) # update the docstring of the returned function update_wrapper(out, self.fn) return out def _if_delegate_has_methods(delegate, method_names): return lambda fn: _IffHasAMethod( fn, delegate_name=delegate, method_names=method_names )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_functions.py
_functions.py
import inspect from functools import update_wrapper from operator import attrgetter def call_func( f_callable, only_mandatory=False, ignore_var_keyword=False, **kwargs ): """Calls a function with the given parameters given in kwargs if they exist as parameters in f_callable. Parameters ---------- f_callable : callable The function or object that is to be called only_mandatory : boolean If True only mandatory parameters are set. ignore_var_keyword : boolean If False all kwargs are passed when f_callable uses a parameter that is of kind Parameter.VAR_KEYWORD, i.e., **kwargs. For further reference see inspect package. kwargs : kwargs All parameters that could be used for calling f_callable. Returns ------- called object """ params = inspect.signature(f_callable).parameters param_keys = params.keys() if only_mandatory: param_keys = list( filter(lambda k: params[k].default == params[k].empty, param_keys) ) has_var_keyword = any( filter(lambda p: p.kind == p.VAR_KEYWORD, params.values()) ) if has_var_keyword and not ignore_var_keyword and not only_mandatory: vars = kwargs else: vars = dict(filter(lambda e: e[0] in param_keys, kwargs.items())) return f_callable(**vars) def _available_if(method_name, has_available_if): if has_available_if: from sklearn.utils.metaestimators import available_if decorator = available_if( lambda self: _hasattr_array_like(self.estimator, method_name) ) else: from sklearn.utils.metaestimators import if_delegate_has_method if not isinstance(method_name, (list, tuple)): decorator = if_delegate_has_method(delegate="estimator") else: decorator = _if_delegate_has_methods( delegate="estimator", method_names=method_name ) return decorator def _hasattr_array_like(obj, attribute_names): if not isinstance(attribute_names, (list, tuple)): attribute_names = [attribute_names] return any(hasattr(obj, attr) for attr in attribute_names) class _IffHasAMethod: def __init__(self, fn, delegate_name, method_names): self.fn = fn self.delegate_name = delegate_name self.method_names = method_names # update the docstring of the descriptor update_wrapper(self, fn) def __get__(self, obj, owner=None): delegate = attrgetter(self.delegate_name)(obj) if not _hasattr_array_like( delegate, attribute_names=self.method_names ): raise AttributeError def out(*args, **kwargs): return self.fn(obj, *args, **kwargs) # update the docstring of the returned function update_wrapper(out, self.fn) return out def _if_delegate_has_methods(delegate, method_names): return lambda fn: _IffHasAMethod( fn, delegate_name=delegate, method_names=method_names )
0.826852
0.171685
import numpy as np from sklearn.metrics import confusion_matrix from sklearn.utils.validation import ( check_consistent_length, column_or_1d, check_array, ) from ._label import MISSING_LABEL, is_labeled, is_unlabeled from ._label_encoder import ExtLabelEncoder def ext_confusion_matrix( y_true, y_pred, classes=None, missing_label=MISSING_LABEL, normalize=None ): """Compute confusion matrix to evaluate the accuracy of a classification. This is an extension of the 'sklearn.metric.confusion_matrix function' by allowing missing labels and labels predicted by multiple annotators. By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` and predicted to be in group :math:`j`. Thus in binary classification, the count of true negatives is :math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is :math:`C_{1,1}` and false positives is :math:`C_{0,1}`. Parameters ---------- y_true: array-like, shape (n_samples) Array of true labels. Is not allowed to contain any missing labels. y_pred: array-like, shape (n_samples) or (n_samples, n_annotators) Estimated targets as returned by multiple annotators. classes : array-like of shape (n_classes), default=None List of class labels to index the matrix. This may be used to reorder or select a subset of labels. If ``None`` is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. missing_label : {scalar, string, np.nan, None}, default=np.nan Value to represent a missing label. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Returns ------- conf_matrices : numpy.ndarray, shape (n_annotators, n_classes, n_classes) Confusion matrix whose i-th row and j-th column entry indicates the number of samples with true label being i-th class and prediced label being j-th class. References ---------- [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ (Wikipedia and other references may use a different convention for axes) [2] `Scikit-learn Confusion Matrix <https://scikit-learn.org/stable/modules/generated/sklearn.metrics. confusion_matrix.html>`_ """ # Check input. y_true = column_or_1d(y_true) y_pred = check_array( y_pred, force_all_finite=False, ensure_2d=False, dtype=None ) if y_pred.ndim == 1: y_pred = y_pred.reshape(-1, 1) check_consistent_length(y_true, y_pred) if normalize not in ["true", "pred", "all", None]: raise ValueError( "'normalize' must be one of {'true', 'pred', 'all', " "None}." ) le = ExtLabelEncoder(classes=classes, missing_label=missing_label) y = np.column_stack((y_true, y_pred)) y = le.fit_transform(y) if np.sum(is_unlabeled(y[:, 0], missing_label=-1)): raise ValueError("'y_true' is not allowed to contain missing labels.") n_classes = len(le.classes_) n_annotators = y_pred.shape[1] # Determine confusion matrix for each annotator. conf_matrices = np.zeros((n_annotators, n_classes, n_classes)) for a in range(n_annotators): is_not_nan_a = is_labeled(y[:, a + 1], missing_label=-1) if np.sum(is_not_nan_a) > 0: cm = confusion_matrix( y_true=y[is_not_nan_a, 0], y_pred=y[is_not_nan_a, a + 1], labels=np.arange(n_classes), ) else: cm = np.zeros((n_classes, n_classes)) with np.errstate(all="ignore"): if normalize == "true": cm = cm / cm.sum(axis=1, keepdims=True) conf_matrices[a] = np.nan_to_num(cm, nan=1 / n_classes) elif normalize == "pred": cm = cm / cm.sum(axis=0, keepdims=True) conf_matrices[a] = np.nan_to_num(cm, nan=1 / n_classes) elif normalize == "all": cm = cm / cm.sum() conf_matrices[a] = np.nan_to_num(cm, nan=1 / cm.size) return conf_matrices
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_multi_annot.py
_multi_annot.py
import numpy as np from sklearn.metrics import confusion_matrix from sklearn.utils.validation import ( check_consistent_length, column_or_1d, check_array, ) from ._label import MISSING_LABEL, is_labeled, is_unlabeled from ._label_encoder import ExtLabelEncoder def ext_confusion_matrix( y_true, y_pred, classes=None, missing_label=MISSING_LABEL, normalize=None ): """Compute confusion matrix to evaluate the accuracy of a classification. This is an extension of the 'sklearn.metric.confusion_matrix function' by allowing missing labels and labels predicted by multiple annotators. By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` and predicted to be in group :math:`j`. Thus in binary classification, the count of true negatives is :math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is :math:`C_{1,1}` and false positives is :math:`C_{0,1}`. Parameters ---------- y_true: array-like, shape (n_samples) Array of true labels. Is not allowed to contain any missing labels. y_pred: array-like, shape (n_samples) or (n_samples, n_annotators) Estimated targets as returned by multiple annotators. classes : array-like of shape (n_classes), default=None List of class labels to index the matrix. This may be used to reorder or select a subset of labels. If ``None`` is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. missing_label : {scalar, string, np.nan, None}, default=np.nan Value to represent a missing label. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Returns ------- conf_matrices : numpy.ndarray, shape (n_annotators, n_classes, n_classes) Confusion matrix whose i-th row and j-th column entry indicates the number of samples with true label being i-th class and prediced label being j-th class. References ---------- [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ (Wikipedia and other references may use a different convention for axes) [2] `Scikit-learn Confusion Matrix <https://scikit-learn.org/stable/modules/generated/sklearn.metrics. confusion_matrix.html>`_ """ # Check input. y_true = column_or_1d(y_true) y_pred = check_array( y_pred, force_all_finite=False, ensure_2d=False, dtype=None ) if y_pred.ndim == 1: y_pred = y_pred.reshape(-1, 1) check_consistent_length(y_true, y_pred) if normalize not in ["true", "pred", "all", None]: raise ValueError( "'normalize' must be one of {'true', 'pred', 'all', " "None}." ) le = ExtLabelEncoder(classes=classes, missing_label=missing_label) y = np.column_stack((y_true, y_pred)) y = le.fit_transform(y) if np.sum(is_unlabeled(y[:, 0], missing_label=-1)): raise ValueError("'y_true' is not allowed to contain missing labels.") n_classes = len(le.classes_) n_annotators = y_pred.shape[1] # Determine confusion matrix for each annotator. conf_matrices = np.zeros((n_annotators, n_classes, n_classes)) for a in range(n_annotators): is_not_nan_a = is_labeled(y[:, a + 1], missing_label=-1) if np.sum(is_not_nan_a) > 0: cm = confusion_matrix( y_true=y[is_not_nan_a, 0], y_pred=y[is_not_nan_a, a + 1], labels=np.arange(n_classes), ) else: cm = np.zeros((n_classes, n_classes)) with np.errstate(all="ignore"): if normalize == "true": cm = cm / cm.sum(axis=1, keepdims=True) conf_matrices[a] = np.nan_to_num(cm, nan=1 / n_classes) elif normalize == "pred": cm = cm / cm.sum(axis=0, keepdims=True) conf_matrices[a] = np.nan_to_num(cm, nan=1 / n_classes) elif normalize == "all": cm = cm / cm.sum() conf_matrices[a] = np.nan_to_num(cm, nan=1 / cm.size) return conf_matrices
0.912054
0.665686
import numpy as np from iteration_utilities import deepflatten # Define constant for missing label used throughout the package. MISSING_LABEL = np.nan def is_unlabeled(y, missing_label=MISSING_LABEL): """Creates a boolean mask indicating missing labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to missing labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- is_unlabeled : numpy.ndarray, shape (n_samples) or (n_samples, n_outputs) Boolean mask indicating missing labels in y. """ check_missing_label(missing_label) if len(y) == 0: return np.array(y, dtype=bool) if not isinstance(y, np.ndarray): types = set( t.__qualname__ for t in set(type(v) for v in deepflatten(y)) ) types.add(type(missing_label).__qualname__) is_number = False is_character = False for t in types: t = object if t == "NoneType" else t is_character = ( True if np.issubdtype(t, np.character) else is_character ) is_number = True if np.issubdtype(t, np.number) else is_number if is_character and is_number: raise TypeError( "'y' must be uniformly strings or numbers. " "'NoneType' is allowed. Got {}".format(types) ) y = np.asarray(y) target_type = np.append(y.ravel(), missing_label).dtype check_missing_label(missing_label, target_type=target_type, name="y") if (y.ndim == 2 and np.size(y, axis=1) == 0) or y.ndim > 2: raise ValueError( "'y' must be of shape (n_samples) or '(n_samples, " "n_features)' with 'n_samples > 0' and " "'n_features > 0'." ) if missing_label is np.nan: return np.isnan(y) else: # Todo check if solution is appropriate (see line 46) # y = np.hstack([[1.1, 2.1], np.full(8, np.nan)]) # is_unlabeled(y, 'sdhu') # Fails return y.astype(target_type) == missing_label def is_labeled(y, missing_label=MISSING_LABEL): """Creates a boolean mask indicating present labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- is_unlabeled : numpy.ndarray, shape (n_samples) or (n_samples, n_outputs) Boolean mask indicating present labels in y. """ return ~is_unlabeled(y, missing_label) def unlabeled_indices(y, missing_label=MISSING_LABEL): """Return an array of indices indicating missing labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- unlbld_indices : numpy.ndarray, shape (n_samples) or (n_samples, 2) Index array of missing labels. If y is a 2D-array, the indices have shape `(n_samples, 2), otherwise it has the shape `(n_samples)`. """ is_unlbld = is_unlabeled(y, missing_label) unlbld_indices = np.argwhere(is_unlbld) return unlbld_indices[:, 0] if is_unlbld.ndim == 1 else unlbld_indices def labeled_indices(y, missing_label=MISSING_LABEL): """Return an array of indices indicating present labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- lbld_indices : numpy.ndarray, shape (n_samples) or (n_samples, 2) Index array of present labels. If y is a 2D-array, the indices have shape `(n_samples, 2), otherwise it has the shape `(n_samples)`. """ is_lbld = is_labeled(y, missing_label) lbld_indices = np.argwhere(is_lbld) return lbld_indices[:, 0] if is_lbld.ndim == 1 else lbld_indices def check_missing_label(missing_label, target_type=None, name=None): """Check whether a missing label is compatible to a given target type. Parameters ---------- missing_label : number | str | None | np.nan Symbol to represent a missing label. target_type : type or tuple Acceptable data types for the parameter 'missing_label'. name : str The name of the variable to which 'missing_label' is not compatible. The name will be printed in error messages. """ is_None = missing_label is None is_character = np.issubdtype(type(missing_label), np.character) is_number = np.issubdtype(type(missing_label), np.number) if not is_number and not is_character and not is_None: raise TypeError( "'missing_label' has type '{}', but must be a either a number, " "a string, np.nan, or None.".format(type(missing_label)) ) if target_type is not None: is_object_type = np.issubdtype(target_type, np.object_) is_character_type = np.issubdtype(target_type, np.character) is_number_type = np.issubdtype(target_type, np.number) if ( (is_character_type and is_number) or (is_number_type and is_character) or (is_object_type and not is_None) ): name = "target object" if name is None else str(name) raise TypeError( "'missing_label' has type '{}' and is not compatible to the " "type '{}' of '{}'.".format( type(missing_label), target_type, name ) ) def check_equal_missing_label(missing_label1, missing_label2): """Check whether two missing label values are equal to each other. Parameters ---------- missing_label1 : number | str | None | np.nan Symbol to represent a missing label. missing_label2 : number | str | None | np.nan Other symbol to represent a missing label. Raises ------- ValueError If the parameter's value violates the given bounds. """ if not is_unlabeled([missing_label1], missing_label=missing_label2)[0]: raise ValueError( f"missing_label1={missing_label1} and " f"missing_label2={missing_label2} must be equal." )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_label.py
_label.py
import numpy as np from iteration_utilities import deepflatten # Define constant for missing label used throughout the package. MISSING_LABEL = np.nan def is_unlabeled(y, missing_label=MISSING_LABEL): """Creates a boolean mask indicating missing labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to missing labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- is_unlabeled : numpy.ndarray, shape (n_samples) or (n_samples, n_outputs) Boolean mask indicating missing labels in y. """ check_missing_label(missing_label) if len(y) == 0: return np.array(y, dtype=bool) if not isinstance(y, np.ndarray): types = set( t.__qualname__ for t in set(type(v) for v in deepflatten(y)) ) types.add(type(missing_label).__qualname__) is_number = False is_character = False for t in types: t = object if t == "NoneType" else t is_character = ( True if np.issubdtype(t, np.character) else is_character ) is_number = True if np.issubdtype(t, np.number) else is_number if is_character and is_number: raise TypeError( "'y' must be uniformly strings or numbers. " "'NoneType' is allowed. Got {}".format(types) ) y = np.asarray(y) target_type = np.append(y.ravel(), missing_label).dtype check_missing_label(missing_label, target_type=target_type, name="y") if (y.ndim == 2 and np.size(y, axis=1) == 0) or y.ndim > 2: raise ValueError( "'y' must be of shape (n_samples) or '(n_samples, " "n_features)' with 'n_samples > 0' and " "'n_features > 0'." ) if missing_label is np.nan: return np.isnan(y) else: # Todo check if solution is appropriate (see line 46) # y = np.hstack([[1.1, 2.1], np.full(8, np.nan)]) # is_unlabeled(y, 'sdhu') # Fails return y.astype(target_type) == missing_label def is_labeled(y, missing_label=MISSING_LABEL): """Creates a boolean mask indicating present labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- is_unlabeled : numpy.ndarray, shape (n_samples) or (n_samples, n_outputs) Boolean mask indicating present labels in y. """ return ~is_unlabeled(y, missing_label) def unlabeled_indices(y, missing_label=MISSING_LABEL): """Return an array of indices indicating missing labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- unlbld_indices : numpy.ndarray, shape (n_samples) or (n_samples, 2) Index array of missing labels. If y is a 2D-array, the indices have shape `(n_samples, 2), otherwise it has the shape `(n_samples)`. """ is_unlbld = is_unlabeled(y, missing_label) unlbld_indices = np.argwhere(is_unlbld) return unlbld_indices[:, 0] if is_unlbld.ndim == 1 else unlbld_indices def labeled_indices(y, missing_label=MISSING_LABEL): """Return an array of indices indicating present labels. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_outputs) Class labels to be checked w.r.t. to present labels. missing_label : number | str | None | np.nan, optional (default=np.nan) Symbol to represent a missing label. Returns ------- lbld_indices : numpy.ndarray, shape (n_samples) or (n_samples, 2) Index array of present labels. If y is a 2D-array, the indices have shape `(n_samples, 2), otherwise it has the shape `(n_samples)`. """ is_lbld = is_labeled(y, missing_label) lbld_indices = np.argwhere(is_lbld) return lbld_indices[:, 0] if is_lbld.ndim == 1 else lbld_indices def check_missing_label(missing_label, target_type=None, name=None): """Check whether a missing label is compatible to a given target type. Parameters ---------- missing_label : number | str | None | np.nan Symbol to represent a missing label. target_type : type or tuple Acceptable data types for the parameter 'missing_label'. name : str The name of the variable to which 'missing_label' is not compatible. The name will be printed in error messages. """ is_None = missing_label is None is_character = np.issubdtype(type(missing_label), np.character) is_number = np.issubdtype(type(missing_label), np.number) if not is_number and not is_character and not is_None: raise TypeError( "'missing_label' has type '{}', but must be a either a number, " "a string, np.nan, or None.".format(type(missing_label)) ) if target_type is not None: is_object_type = np.issubdtype(target_type, np.object_) is_character_type = np.issubdtype(target_type, np.character) is_number_type = np.issubdtype(target_type, np.number) if ( (is_character_type and is_number) or (is_number_type and is_character) or (is_object_type and not is_None) ): name = "target object" if name is None else str(name) raise TypeError( "'missing_label' has type '{}' and is not compatible to the " "type '{}' of '{}'.".format( type(missing_label), target_type, name ) ) def check_equal_missing_label(missing_label1, missing_label2): """Check whether two missing label values are equal to each other. Parameters ---------- missing_label1 : number | str | None | np.nan Symbol to represent a missing label. missing_label2 : number | str | None | np.nan Other symbol to represent a missing label. Raises ------- ValueError If the parameter's value violates the given bounds. """ if not is_unlabeled([missing_label1], missing_label=missing_label2)[0]: raise ValueError( f"missing_label1={missing_label1} and " f"missing_label2={missing_label2} must be equal." )
0.902791
0.686423
import numpy as np from sklearn.utils import check_array, check_consistent_length from ._label import is_labeled, is_unlabeled from ._label_encoder import ExtLabelEncoder from ._selection import rand_argmax def compute_vote_vectors(y, w=None, classes=None, missing_label=np.nan): """Counts number of votes per class label for each sample. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_annotators) Class labels. w : array-like, shape (n_samples) or (n_samples, n_annotators), default=np.ones_like(y) Class label weights. classes : array-like, shape (n_classes), default=None Holds the label for each class. missing_label : scalar|string|np.nan|None, default=np.nan Value to represent a missing label. Returns ------- v : array-like, shape (n_samples, n_classes) V[i,j] counts number of votes per class j for sample i. """ # check input parameters le = ExtLabelEncoder(classes=classes, missing_label=missing_label) y = le.fit_transform(y) n_classes = len(le.classes_) y = y if y.ndim == 2 else y.reshape((-1, 1)) is_unlabeled_y = is_unlabeled(y, missing_label=-1) y[is_unlabeled_y] = 0 y = y.astype(int) if n_classes == 0: raise ValueError( "Number of classes can not be inferred. " "There must be at least one assigned label or classes must not be" "None. " ) w = ( np.ones_like(y) if w is None else check_array( w, ensure_2d=False, force_all_finite=False, dtype=float, copy=True ) ) w = w if w.ndim == 2 else w.reshape((-1, 1)) check_consistent_length(y, w) check_consistent_length(y.T, w.T) w[is_unlabeled_y] = 1 # count class labels per class and weight by confidence scores w[np.logical_or(np.isnan(w), is_unlabeled_y)] = 0 y_off = y + np.arange(y.shape[0])[:, None] * n_classes v = np.bincount( y_off.ravel(), minlength=y.shape[0] * n_classes, weights=w.ravel() ) v = v.reshape(-1, n_classes) return v def majority_vote( y, w=None, classes=None, missing_label=np.nan, random_state=None ): """Assigns a label to each sample based on weighted voting. Samples with no labels are assigned with `missing_label`. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_annotators) Class labels. w : array-like, shape (n_samples) or (n_samples, n_annotators), default=np.ones_like(y) Class label weights. classes : array-like, shape (n_classes), default=None Holds the label for each class. missing_label : scalar|string|np.nan|None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. Returns ------- y_aggregated : array-like, shape (n_samples) Assigned labels for each sample. """ # check input parameters y = check_array(y, ensure_2d=False, dtype=None, force_all_finite=False) y = y if y.ndim == 2 else y.reshape((-1, 1)) n_samples = y.shape[0] w = ( np.ones_like(y) if w is None else check_array( w, ensure_2d=False, force_all_finite=False, dtype=None, copy=True ) ) # extract labeled samples is_labeled_y = np.any(is_labeled(y, missing_label), axis=1) y_labeled = y[is_labeled_y] # infer encoding le = ExtLabelEncoder(classes=classes, missing_label=missing_label) le.fit(y) y_aggregated = np.full((n_samples,), missing_label, dtype=le._dtype) if np.any(is_labeled_y): # transform labels y_labeled_transformed = le.transform(y_labeled) # perform voting vote_matrix = compute_vote_vectors( y_labeled_transformed, w=w[is_labeled_y], missing_label=-1, classes=np.arange(len(le.classes_)), ) vote_vector = rand_argmax(vote_matrix, random_state, axis=1) # inverse transform labels y_labeled_inverse_transformed = le.inverse_transform(vote_vector) # assign labels y_aggregated[is_labeled_y] = y_labeled_inverse_transformed return y_aggregated
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/utils/_aggregation.py
_aggregation.py
import numpy as np from sklearn.utils import check_array, check_consistent_length from ._label import is_labeled, is_unlabeled from ._label_encoder import ExtLabelEncoder from ._selection import rand_argmax def compute_vote_vectors(y, w=None, classes=None, missing_label=np.nan): """Counts number of votes per class label for each sample. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_annotators) Class labels. w : array-like, shape (n_samples) or (n_samples, n_annotators), default=np.ones_like(y) Class label weights. classes : array-like, shape (n_classes), default=None Holds the label for each class. missing_label : scalar|string|np.nan|None, default=np.nan Value to represent a missing label. Returns ------- v : array-like, shape (n_samples, n_classes) V[i,j] counts number of votes per class j for sample i. """ # check input parameters le = ExtLabelEncoder(classes=classes, missing_label=missing_label) y = le.fit_transform(y) n_classes = len(le.classes_) y = y if y.ndim == 2 else y.reshape((-1, 1)) is_unlabeled_y = is_unlabeled(y, missing_label=-1) y[is_unlabeled_y] = 0 y = y.astype(int) if n_classes == 0: raise ValueError( "Number of classes can not be inferred. " "There must be at least one assigned label or classes must not be" "None. " ) w = ( np.ones_like(y) if w is None else check_array( w, ensure_2d=False, force_all_finite=False, dtype=float, copy=True ) ) w = w if w.ndim == 2 else w.reshape((-1, 1)) check_consistent_length(y, w) check_consistent_length(y.T, w.T) w[is_unlabeled_y] = 1 # count class labels per class and weight by confidence scores w[np.logical_or(np.isnan(w), is_unlabeled_y)] = 0 y_off = y + np.arange(y.shape[0])[:, None] * n_classes v = np.bincount( y_off.ravel(), minlength=y.shape[0] * n_classes, weights=w.ravel() ) v = v.reshape(-1, n_classes) return v def majority_vote( y, w=None, classes=None, missing_label=np.nan, random_state=None ): """Assigns a label to each sample based on weighted voting. Samples with no labels are assigned with `missing_label`. Parameters ---------- y : array-like, shape (n_samples) or (n_samples, n_annotators) Class labels. w : array-like, shape (n_samples) or (n_samples, n_annotators), default=np.ones_like(y) Class label weights. classes : array-like, shape (n_classes), default=None Holds the label for each class. missing_label : scalar|string|np.nan|None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. Returns ------- y_aggregated : array-like, shape (n_samples) Assigned labels for each sample. """ # check input parameters y = check_array(y, ensure_2d=False, dtype=None, force_all_finite=False) y = y if y.ndim == 2 else y.reshape((-1, 1)) n_samples = y.shape[0] w = ( np.ones_like(y) if w is None else check_array( w, ensure_2d=False, force_all_finite=False, dtype=None, copy=True ) ) # extract labeled samples is_labeled_y = np.any(is_labeled(y, missing_label), axis=1) y_labeled = y[is_labeled_y] # infer encoding le = ExtLabelEncoder(classes=classes, missing_label=missing_label) le.fit(y) y_aggregated = np.full((n_samples,), missing_label, dtype=le._dtype) if np.any(is_labeled_y): # transform labels y_labeled_transformed = le.transform(y_labeled) # perform voting vote_matrix = compute_vote_vectors( y_labeled_transformed, w=w[is_labeled_y], missing_label=-1, classes=np.arange(len(le.classes_)), ) vote_vector = rand_argmax(vote_matrix, random_state, axis=1) # inverse transform labels y_labeled_inverse_transformed = le.inverse_transform(vote_vector) # assign labels y_aggregated[is_labeled_y] = y_labeled_inverse_transformed return y_aggregated
0.933043
0.599573
import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import Colormap from ..utils import check_scalar, check_type, check_bound def mesh(bound, res): """ Function to get instances of a mesh grid as well as x-mesh and y-mesh with given resolution in the specified bounds. Parameters ---------- bound: array-like, [[xmin, ymin], [xmax, ymax]] The bounds of the mesh grid. res: int, optional (default=21) The resolution of the plot. Returns ------- X_mesh: np.ndarray, shape (res, res) mesh grid over x Y_mesh: np.ndarray, shape (res, res) mesh grid over y mesh_instances: np.ndarray, shape (res*res,) instances of the mesh grid """ check_scalar(res, "res", int, min_val=1) check_bound(bound=bound, bound_must_be_given=True) xmin, ymin, xmax, ymax = np.ravel(bound) x_vec = np.linspace(xmin, xmax, res) y_vec = np.linspace(ymin, ymax, res) X_mesh, Y_mesh = np.meshgrid(x_vec, y_vec) mesh_instances = np.array([X_mesh.reshape(-1), Y_mesh.reshape(-1)]).T return X_mesh, Y_mesh, mesh_instances def _get_cmap(cmap): if isinstance(cmap, str): cmap = plt.cm.get_cmap(cmap) check_type(cmap, "cmap", Colormap, str) return cmap def _get_boundary_args(boundary_dict): boundary_args = {"colors": "k", "linewidths": [2], "zorder": 1} if boundary_dict is not None: check_type(boundary_dict, "boundary_dict", dict) boundary_args.update(boundary_dict) return boundary_args def _get_confidence_args(confidence_dict): confidence_args = { "linewidths": [2, 2], "linestyles": "--", "alpha": 0.9, "vmin": 0.2, "vmax": 0.8, "zorder": 1, } if confidence_dict is not None: check_type(confidence_dict, "confidence_dict", dict) confidence_args.update(confidence_dict) return confidence_args def _get_contour_args(contour_dict): contour_args = {"cmap": "Greens", "alpha": 0.75} if contour_dict is not None: check_type(contour_dict, "contour_dict", dict) contour_args.update(contour_dict) return contour_args
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/visualization/_misc.py
_misc.py
import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import Colormap from ..utils import check_scalar, check_type, check_bound def mesh(bound, res): """ Function to get instances of a mesh grid as well as x-mesh and y-mesh with given resolution in the specified bounds. Parameters ---------- bound: array-like, [[xmin, ymin], [xmax, ymax]] The bounds of the mesh grid. res: int, optional (default=21) The resolution of the plot. Returns ------- X_mesh: np.ndarray, shape (res, res) mesh grid over x Y_mesh: np.ndarray, shape (res, res) mesh grid over y mesh_instances: np.ndarray, shape (res*res,) instances of the mesh grid """ check_scalar(res, "res", int, min_val=1) check_bound(bound=bound, bound_must_be_given=True) xmin, ymin, xmax, ymax = np.ravel(bound) x_vec = np.linspace(xmin, xmax, res) y_vec = np.linspace(ymin, ymax, res) X_mesh, Y_mesh = np.meshgrid(x_vec, y_vec) mesh_instances = np.array([X_mesh.reshape(-1), Y_mesh.reshape(-1)]).T return X_mesh, Y_mesh, mesh_instances def _get_cmap(cmap): if isinstance(cmap, str): cmap = plt.cm.get_cmap(cmap) check_type(cmap, "cmap", Colormap, str) return cmap def _get_boundary_args(boundary_dict): boundary_args = {"colors": "k", "linewidths": [2], "zorder": 1} if boundary_dict is not None: check_type(boundary_dict, "boundary_dict", dict) boundary_args.update(boundary_dict) return boundary_args def _get_confidence_args(confidence_dict): confidence_args = { "linewidths": [2, 2], "linestyles": "--", "alpha": 0.9, "vmin": 0.2, "vmax": 0.8, "zorder": 1, } if confidence_dict is not None: check_type(confidence_dict, "confidence_dict", dict) confidence_args.update(confidence_dict) return confidence_args def _get_contour_args(contour_dict): contour_args = {"cmap": "Greens", "alpha": 0.75} if contour_dict is not None: check_type(contour_dict, "contour_dict", dict) contour_args.update(contour_dict) return contour_args
0.866698
0.725393
import warnings import numpy as np from matplotlib import lines, pyplot as plt from matplotlib.axes import Axes from sklearn.base import ClassifierMixin from sklearn.neighbors import KNeighborsRegressor from sklearn.utils.validation import ( check_array, check_consistent_length, column_or_1d, ) from ._misc import ( mesh, check_bound, _get_boundary_args, _get_confidence_args, _get_contour_args, _get_cmap, ) from ..base import ( QueryStrategy, SingleAnnotatorPoolQueryStrategy, MultiAnnotatorPoolQueryStrategy, ) from ..exceptions import MappingError from ..utils import ( check_scalar, unlabeled_indices, call_func, check_type, check_indices, ) def plot_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given single-annotator query strategy. Parameters ---------- qs : skactiveml.base.SingleAnnotatorPoolQueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the unlabeled samples from (X,y) are considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- ax : matplotlib.axes.Axes The axis on which the utilities were plotted. """ check_type(qs, "qs", SingleAnnotatorPoolQueryStrategy) return _general_plot_utilities( qs=qs, X=X, y=y, candidates=candidates, **kwargs ) def plot_annotator_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given query strategy. Parameters ---------- qs : skactiveml.base.QueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the unlabeled samples from (X,y) are considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. axes : array-like of matplotlib.axes.Axes, optional (default=None) The axes on which the utilities for the annotators are plotted. Only supported for y.ndim = 2 (multi annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. plot_annotators : None or array-like of shape (n_annotators_to_plot,), optional (default=None) Contains the indices of the annotators to be plotted. If it is None, all annotators are plotted. Only supported for y.ndim = 2 (multi annotator). **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ check_type(qs, "qs", MultiAnnotatorPoolQueryStrategy) return _general_plot_utilities( qs=qs, X=X, y=y, candidates=candidates, **kwargs ) def plot_decision_boundary( clf, feature_bound, ax=None, res=21, boundary_dict=None, confidence=0.75, cmap="coolwarm", confidence_dict=None, ): """Plot the decision boundary of the given classifier. Parameters ---------- clf: Sklearn classifier The fitted classifier whose decision boundary is plotted. If confidence is not None, the classifier must implement the predict_proba function. feature_bound: array-like, [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. ax: matplotlib.axes.Axes or List, optional (default=None) The axis on which the decision boundary is plotted. If ax is a List, each entry has to be an `matplotlib.axes.Axes`. res: int, optional (default=21) The resolution of the plot. boundary_dict: dict, optional (default=None) Additional parameters for the boundary contour. confidence: scalar | None, optional (default=0.5) The confidence interval plotted with dashed lines. It is not plotted if confidence is None. Must be in the open interval (0.5, 1). The value stands for the ratio best class / second best class. cmap: str | matplotlib.colors.Colormap, optional (default='coolwarm_r') The colormap for the confidence levels. confidence_dict: dict, optional (default=None) Additional parameters for the confidence contour. Must not contain a colormap because cmap is used. Returns ------- ax: matplotlib.axes.Axes or List The axis on which the boundary was plotted or the list of axis if ax was a list. """ check_type(clf, "clf", ClassifierMixin) check_scalar(res, "res", int, min_val=1) if ax is None: ax = plt.gca() check_type(ax, "ax", Axes) feature_bound = check_bound(bound=feature_bound) # Check and convert the colormap cmap = _get_cmap(cmap) if confidence is not None: check_scalar( confidence, "confidence", float, min_inclusive=False, max_inclusive=False, min_val=0.5, max_val=1, ) # Update additional arguments boundary_args = _get_boundary_args(boundary_dict) confidence_args = _get_confidence_args(confidence_dict) # Create mesh for plotting X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) # Calculate predictions if hasattr(clf, "predict_proba"): predictions = clf.predict_proba(mesh_instances) classes = np.arange(predictions.shape[1]) elif hasattr(clf, "predict"): if confidence is not None: warnings.warn( "The given classifier does not implement " "'predict_proba'. Thus, the confidence cannot be " "plotted." ) confidence = None predicted_classes = clf.predict(mesh_instances) classes = np.arange(len(np.unique(predicted_classes))) predictions = np.zeros((len(predicted_classes), len(classes))) for idx, y in enumerate(predicted_classes): predictions[idx, y] = 1 else: raise AttributeError( "'clf' must implement 'predict' or " "'predict_proba'" ) posterior_list = [] for y in classes: posteriors = predictions[:, y].reshape(X_mesh.shape) posterior_list.append(posteriors) norm = plt.Normalize(vmin=min(classes), vmax=max(classes)) for y in classes: posteriors = posterior_list[y] posteriors_best_alternative = np.zeros_like(posteriors) for y2 in np.setdiff1d(classes, [y]): posteriors_best_alternative = np.max( [posteriors_best_alternative, posterior_list[y2]], axis=0 ) posteriors = posteriors / (posteriors + posteriors_best_alternative) ax.contour(X_mesh, Y_mesh, posteriors, [0.5], **boundary_args) if confidence is not None: ax.contour( X_mesh, Y_mesh, posteriors, [confidence], colors=[cmap(norm(y))], **confidence_args, ) return ax def plot_contour_for_samples( X, values, replace_nan=0.0, feature_bound=None, ax=None, res=21, contour_dict=None, ): """Plot the utility for the given query strategy. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. values : array-like of shape (n_samples) Values to plot for samples `X` (may contain np.nan, can be replaced or ignored, see `replace_nan`). replace_nan : numeric or None, optional (default=0.0) If numeric, nan-values in `values` will be replaced by this number. If None, these samples will be ignored. feature_bound : array-like, [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. Returns ------- matplotlib.axes.Axes: The axis on which the utility was plotted. """ check_array(X, ensure_2d=True) check_array(values, ensure_2d=False, force_all_finite="allow-nan") feature_bound = check_bound(bound=feature_bound, X=X) X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) if ax is None: ax = plt.gca() if replace_nan is None: valid_idx = ~np.isnan(values) X = X[valid_idx] values = values[valid_idx] else: values = np.nan_to_num(values, nan=replace_nan) contour_args = _get_contour_args(contour_dict) neighbors = KNeighborsRegressor(n_neighbors=1) neighbors.fit(X, values) scores = neighbors.predict(mesh_instances).reshape(X_mesh.shape) ax.contourf(X_mesh, Y_mesh, scores, **contour_args) return ax def plot_stream_training_data( ax, X, y, queried_indices, classes, feature_bound, unlabeled_color="grey", cmap="coolwarm", alpha=0.2, linewidth=3, plot_cand_highlight=True, ): """Plot the utility for the given query strategy. Parameters ---------- ax : matplotlib.axes.Axes The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). X : array-like of shape (n_samples, 1) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. classes : array-like of shape (n_classes) Holds the label for each class. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. unlabeled_color: str | matplotlib.colors.Colormap, optional (default='grey') The color for the unlabled samples. cmap: str | matplotlib.colors.Colormap, optional (default='coolwarm_r') The colormap for the confidence levels. alpha: scalar Set the alpha value used for blending - not supported on all backends. linewidth: float Set the line width in points. plot_cand_highlight: bool The indicator to higlight the current candidate. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ column_or_1d(X) check_array(y, ensure_2d=False, force_all_finite="allow-nan") check_consistent_length(X, y) check_array(queried_indices, ensure_2d=False) check_array(classes, ensure_2d=False) check_type(unlabeled_color, "unlabeled_color", str) check_type(plot_cand_highlight, "plot_cand_highlight", bool) check_type(ax, "ax", Axes) data_lines = [] cmap = _get_cmap(cmap) norm = plt.Normalize(vmin=min(classes), vmax=max(classes)) highlight_color = ( cmap(norm(y[-1])) if queried_indices[-1] else unlabeled_color ) if plot_cand_highlight: data_lines.append( lines.Line2D( [0, feature_bound[0][1]], [X[-1], X[-1]], c=highlight_color, alpha=alpha, linewidth=linewidth * 2, ) ) for t, (x_t, a, y_t) in enumerate(zip(X, queried_indices, y)): line_color = cmap(norm(y_t)) if a else unlabeled_color zorder = 3 if a else 2 alpha_tmp = alpha * 2 if a else alpha data_lines.append( lines.Line2D( [t, len(X) - 1], [x_t, x_t], zorder=zorder, color=line_color, alpha=alpha_tmp, linewidth=linewidth, ) ) for d_line in data_lines: ax.add_line(d_line) return data_lines def plot_stream_decision_boundary( ax, t_x, plot_step, clf, X, pred_list, color="k", res=25, ): """Plot the decision boundary of the given classifier. Parameters ---------- ax: matplotlib.axes.Axes or List The axis on which the decision boundary is plotted. If ax is a List, each entry has to be an `matplotlib.axes.Axes`. t_x: int The position of the newest instance for the x axies. plot_step: int The interval in which the clf should predict new samples. clf: Sklearn classifier The fitted classifier whose decision boundary is plotted. X : array-like of shape (n_samples, 1) Training data set, usually complete, i.e. including the labeled and unlabeled samples. pred_list: array-like of shape (n_samples, ) The list containing classifier prediction for the last steps. color: str | matplotlib.colors.Colormap, optional (default='k') The color for the decision boundary. res : int, optional (default=25) The resolution of the plot. Returns ------- ax: matplotlib.axes.Axes or List The axis on which the boundary was plotted or the list of axis if ax was a list. pred_list: array-like of shape (n_samples, ) The list containing classifier prediction for the last steps. """ X = column_or_1d(X) check_array(pred_list, ensure_2d=False, ensure_min_samples=0) check_scalar(t_x, "t_x", int, min_val=0) check_scalar(plot_step, "plot_step", int, min_val=1) check_type(ax, "ax", Axes) check_type(clf, "clf", ClassifierMixin) x_vec = np.linspace(np.min(X), np.max(X), res) t_vec = np.arange(1, t_x // plot_step + 1) * plot_step t_mesh, x_mesh = np.meshgrid(t_vec, x_vec) predictions = np.array([clf.predict(x_vec.reshape([-1, 1]))]) pred_list.extend(predictions) if len(pred_list) > 2 and np.sum(pred_list) > 0: ax.contour( t_mesh, x_mesh, np.array(pred_list[1:]).T, levels=[0.5], colors=color, ) return ax, pred_list def _general_plot_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given query strategy. Parameters ---------- qs : skactiveml.base.QueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, a mesh with the specified resolution is generated and considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). axes : array-like of matplotlib.axes.Axes, optional (default=None) The axes on which the utilities for the annotators are plotted. Only supported for y.ndim = 2 (multi annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. plot_annotators : None or array-like of shape (n_annotators_to_plot,), optional (default=None) Contains the indices of the annotators to be plotted. If it is None, all annotators are plotted. Only supported for y.ndim = 2 (multi annotator). **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ replace_nan = kwargs.pop("replace_nan", 0.0) ignore_undefined_query_params = kwargs.pop( "ignore_undefined_query_params", False ) feature_bound = kwargs.pop("feature_bound", None) ax = kwargs.pop("ax", None) axes = kwargs.pop("axes", None) res = kwargs.pop("res", 21) contour_dict = kwargs.pop("contour_dict", None) plot_annotators = kwargs.pop("plot_annotators", None) check_type(qs, "qs", QueryStrategy) X = check_array(X, allow_nd=False, ensure_2d=True) if X.shape[1] != 2: raise ValueError("Samples in `X` must have 2 features.") # Check labels y = check_array(y, ensure_2d=False, force_all_finite="allow-nan") check_consistent_length(X, y) if y.ndim == 2: if plot_annotators is None: n_annotators = y.shape[1] plot_annotators = np.arange(n_annotators) else: plot_annotators = column_or_1d(plot_annotators) check_indices(plot_annotators, y, dim=1) n_annotators = len(plot_annotators) else: n_annotators = None if plot_annotators is not None: raise TypeError( "`plot_annotator` can be only used in the multi-annotator " "setting." ) else: plot_annotators = np.arange(1) if n_annotators is None: if axes is not None: raise TypeError( "`axes` can be only used in the multi-annotator setting. " "Use `ax` instead." ) if ax is None: axes = np.array([plt.subplots(1, 1)[1]]) else: check_type(ax, "ax", Axes) axes = np.array([ax]) else: if ax is not None: raise ValueError( "`ax` can be only used in the single-annotator setting. " "Use `axes` instead." ) if axes is None: axes = plt.subplots(1, n_annotators)[1] else: [check_type(ax_, "ax", Axes) for ax_ in axes] if n_annotators is not None and len(axes) != n_annotators: raise ValueError( "`axes` must contain one `Axes` object for each " "annotator to be plotted (indicated by `plot_annotators`)." ) # ensure that utilities are returned kwargs["return_utilities"] = True if candidates is None: # plot mesh try: check_scalar(res, "res", int, min_val=1) feature_bound = check_bound(bound=feature_bound, X=X) X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) contour_args = _get_contour_args(contour_dict) if ignore_undefined_query_params: _, utilities = call_func( qs.query, X=X, y=y, candidates=mesh_instances, **kwargs ) else: _, utilities = qs.query( X=X, y=y, candidates=mesh_instances, **kwargs ) for a_idx, ax_ in zip(plot_annotators, axes): if n_annotators is not None: utilities_a_idx = utilities[0, :, a_idx] else: utilities_a_idx = utilities[0, :] utilities_a_idx = utilities_a_idx.reshape(X_mesh.shape) ax_.contourf(X_mesh, Y_mesh, utilities_a_idx, **contour_args) if n_annotators is None: return axes[0] else: return axes except MappingError: candidates = unlabeled_indices(y, missing_label=qs.missing_label) except BaseException as err: warnings.warn( f"Unable to create utility plot with mesh because " f"of the following error. Trying plotting over " f"candidates. \n\n Unexpected {err.__repr__()}" ) candidates = unlabeled_indices(y, missing_label=qs.missing_label) candidates = check_array( candidates, allow_nd=False, ensure_2d=False, force_all_finite="allow-nan", ) if candidates.ndim == 1: X_utils = X candidates = check_indices(candidates, X) else: X_utils = candidates if ignore_undefined_query_params: _, utilities = call_func( qs.query, X=X, y=y, candidates=candidates, **kwargs ) else: _, utilities = qs.query(X=X, y=y, candidates=candidates, **kwargs) for a_idx, ax_ in zip(plot_annotators, axes): if n_annotators is not None: utilities_a_idx = utilities[0, :, a_idx] else: utilities_a_idx = utilities[0, :] plot_contour_for_samples( X_utils, utilities_a_idx, replace_nan=replace_nan, feature_bound=feature_bound, ax=ax_, res=res, contour_dict=contour_dict, ) if n_annotators is None: return axes[0] else: return axes
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/visualization/_feature_space.py
_feature_space.py
import warnings import numpy as np from matplotlib import lines, pyplot as plt from matplotlib.axes import Axes from sklearn.base import ClassifierMixin from sklearn.neighbors import KNeighborsRegressor from sklearn.utils.validation import ( check_array, check_consistent_length, column_or_1d, ) from ._misc import ( mesh, check_bound, _get_boundary_args, _get_confidence_args, _get_contour_args, _get_cmap, ) from ..base import ( QueryStrategy, SingleAnnotatorPoolQueryStrategy, MultiAnnotatorPoolQueryStrategy, ) from ..exceptions import MappingError from ..utils import ( check_scalar, unlabeled_indices, call_func, check_type, check_indices, ) def plot_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given single-annotator query strategy. Parameters ---------- qs : skactiveml.base.SingleAnnotatorPoolQueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the unlabeled samples from (X,y) are considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- ax : matplotlib.axes.Axes The axis on which the utilities were plotted. """ check_type(qs, "qs", SingleAnnotatorPoolQueryStrategy) return _general_plot_utilities( qs=qs, X=X, y=y, candidates=candidates, **kwargs ) def plot_annotator_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given query strategy. Parameters ---------- qs : skactiveml.base.QueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, the unlabeled samples from (X,y) are considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. axes : array-like of matplotlib.axes.Axes, optional (default=None) The axes on which the utilities for the annotators are plotted. Only supported for y.ndim = 2 (multi annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. plot_annotators : None or array-like of shape (n_annotators_to_plot,), optional (default=None) Contains the indices of the annotators to be plotted. If it is None, all annotators are plotted. Only supported for y.ndim = 2 (multi annotator). **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ check_type(qs, "qs", MultiAnnotatorPoolQueryStrategy) return _general_plot_utilities( qs=qs, X=X, y=y, candidates=candidates, **kwargs ) def plot_decision_boundary( clf, feature_bound, ax=None, res=21, boundary_dict=None, confidence=0.75, cmap="coolwarm", confidence_dict=None, ): """Plot the decision boundary of the given classifier. Parameters ---------- clf: Sklearn classifier The fitted classifier whose decision boundary is plotted. If confidence is not None, the classifier must implement the predict_proba function. feature_bound: array-like, [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. ax: matplotlib.axes.Axes or List, optional (default=None) The axis on which the decision boundary is plotted. If ax is a List, each entry has to be an `matplotlib.axes.Axes`. res: int, optional (default=21) The resolution of the plot. boundary_dict: dict, optional (default=None) Additional parameters for the boundary contour. confidence: scalar | None, optional (default=0.5) The confidence interval plotted with dashed lines. It is not plotted if confidence is None. Must be in the open interval (0.5, 1). The value stands for the ratio best class / second best class. cmap: str | matplotlib.colors.Colormap, optional (default='coolwarm_r') The colormap for the confidence levels. confidence_dict: dict, optional (default=None) Additional parameters for the confidence contour. Must not contain a colormap because cmap is used. Returns ------- ax: matplotlib.axes.Axes or List The axis on which the boundary was plotted or the list of axis if ax was a list. """ check_type(clf, "clf", ClassifierMixin) check_scalar(res, "res", int, min_val=1) if ax is None: ax = plt.gca() check_type(ax, "ax", Axes) feature_bound = check_bound(bound=feature_bound) # Check and convert the colormap cmap = _get_cmap(cmap) if confidence is not None: check_scalar( confidence, "confidence", float, min_inclusive=False, max_inclusive=False, min_val=0.5, max_val=1, ) # Update additional arguments boundary_args = _get_boundary_args(boundary_dict) confidence_args = _get_confidence_args(confidence_dict) # Create mesh for plotting X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) # Calculate predictions if hasattr(clf, "predict_proba"): predictions = clf.predict_proba(mesh_instances) classes = np.arange(predictions.shape[1]) elif hasattr(clf, "predict"): if confidence is not None: warnings.warn( "The given classifier does not implement " "'predict_proba'. Thus, the confidence cannot be " "plotted." ) confidence = None predicted_classes = clf.predict(mesh_instances) classes = np.arange(len(np.unique(predicted_classes))) predictions = np.zeros((len(predicted_classes), len(classes))) for idx, y in enumerate(predicted_classes): predictions[idx, y] = 1 else: raise AttributeError( "'clf' must implement 'predict' or " "'predict_proba'" ) posterior_list = [] for y in classes: posteriors = predictions[:, y].reshape(X_mesh.shape) posterior_list.append(posteriors) norm = plt.Normalize(vmin=min(classes), vmax=max(classes)) for y in classes: posteriors = posterior_list[y] posteriors_best_alternative = np.zeros_like(posteriors) for y2 in np.setdiff1d(classes, [y]): posteriors_best_alternative = np.max( [posteriors_best_alternative, posterior_list[y2]], axis=0 ) posteriors = posteriors / (posteriors + posteriors_best_alternative) ax.contour(X_mesh, Y_mesh, posteriors, [0.5], **boundary_args) if confidence is not None: ax.contour( X_mesh, Y_mesh, posteriors, [confidence], colors=[cmap(norm(y))], **confidence_args, ) return ax def plot_contour_for_samples( X, values, replace_nan=0.0, feature_bound=None, ax=None, res=21, contour_dict=None, ): """Plot the utility for the given query strategy. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. values : array-like of shape (n_samples) Values to plot for samples `X` (may contain np.nan, can be replaced or ignored, see `replace_nan`). replace_nan : numeric or None, optional (default=0.0) If numeric, nan-values in `values` will be replaced by this number. If None, these samples will be ignored. feature_bound : array-like, [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. Returns ------- matplotlib.axes.Axes: The axis on which the utility was plotted. """ check_array(X, ensure_2d=True) check_array(values, ensure_2d=False, force_all_finite="allow-nan") feature_bound = check_bound(bound=feature_bound, X=X) X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) if ax is None: ax = plt.gca() if replace_nan is None: valid_idx = ~np.isnan(values) X = X[valid_idx] values = values[valid_idx] else: values = np.nan_to_num(values, nan=replace_nan) contour_args = _get_contour_args(contour_dict) neighbors = KNeighborsRegressor(n_neighbors=1) neighbors.fit(X, values) scores = neighbors.predict(mesh_instances).reshape(X_mesh.shape) ax.contourf(X_mesh, Y_mesh, scores, **contour_args) return ax def plot_stream_training_data( ax, X, y, queried_indices, classes, feature_bound, unlabeled_color="grey", cmap="coolwarm", alpha=0.2, linewidth=3, plot_cand_highlight=True, ): """Plot the utility for the given query strategy. Parameters ---------- ax : matplotlib.axes.Axes The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). X : array-like of shape (n_samples, 1) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). queried_indices : array-like of shape (n_samples,) Indicates which instances from candidates have been queried. classes : array-like of shape (n_classes) Holds the label for each class. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]] Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. unlabeled_color: str | matplotlib.colors.Colormap, optional (default='grey') The color for the unlabled samples. cmap: str | matplotlib.colors.Colormap, optional (default='coolwarm_r') The colormap for the confidence levels. alpha: scalar Set the alpha value used for blending - not supported on all backends. linewidth: float Set the line width in points. plot_cand_highlight: bool The indicator to higlight the current candidate. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ column_or_1d(X) check_array(y, ensure_2d=False, force_all_finite="allow-nan") check_consistent_length(X, y) check_array(queried_indices, ensure_2d=False) check_array(classes, ensure_2d=False) check_type(unlabeled_color, "unlabeled_color", str) check_type(plot_cand_highlight, "plot_cand_highlight", bool) check_type(ax, "ax", Axes) data_lines = [] cmap = _get_cmap(cmap) norm = plt.Normalize(vmin=min(classes), vmax=max(classes)) highlight_color = ( cmap(norm(y[-1])) if queried_indices[-1] else unlabeled_color ) if plot_cand_highlight: data_lines.append( lines.Line2D( [0, feature_bound[0][1]], [X[-1], X[-1]], c=highlight_color, alpha=alpha, linewidth=linewidth * 2, ) ) for t, (x_t, a, y_t) in enumerate(zip(X, queried_indices, y)): line_color = cmap(norm(y_t)) if a else unlabeled_color zorder = 3 if a else 2 alpha_tmp = alpha * 2 if a else alpha data_lines.append( lines.Line2D( [t, len(X) - 1], [x_t, x_t], zorder=zorder, color=line_color, alpha=alpha_tmp, linewidth=linewidth, ) ) for d_line in data_lines: ax.add_line(d_line) return data_lines def plot_stream_decision_boundary( ax, t_x, plot_step, clf, X, pred_list, color="k", res=25, ): """Plot the decision boundary of the given classifier. Parameters ---------- ax: matplotlib.axes.Axes or List The axis on which the decision boundary is plotted. If ax is a List, each entry has to be an `matplotlib.axes.Axes`. t_x: int The position of the newest instance for the x axies. plot_step: int The interval in which the clf should predict new samples. clf: Sklearn classifier The fitted classifier whose decision boundary is plotted. X : array-like of shape (n_samples, 1) Training data set, usually complete, i.e. including the labeled and unlabeled samples. pred_list: array-like of shape (n_samples, ) The list containing classifier prediction for the last steps. color: str | matplotlib.colors.Colormap, optional (default='k') The color for the decision boundary. res : int, optional (default=25) The resolution of the plot. Returns ------- ax: matplotlib.axes.Axes or List The axis on which the boundary was plotted or the list of axis if ax was a list. pred_list: array-like of shape (n_samples, ) The list containing classifier prediction for the last steps. """ X = column_or_1d(X) check_array(pred_list, ensure_2d=False, ensure_min_samples=0) check_scalar(t_x, "t_x", int, min_val=0) check_scalar(plot_step, "plot_step", int, min_val=1) check_type(ax, "ax", Axes) check_type(clf, "clf", ClassifierMixin) x_vec = np.linspace(np.min(X), np.max(X), res) t_vec = np.arange(1, t_x // plot_step + 1) * plot_step t_mesh, x_mesh = np.meshgrid(t_vec, x_vec) predictions = np.array([clf.predict(x_vec.reshape([-1, 1]))]) pred_list.extend(predictions) if len(pred_list) > 2 and np.sum(pred_list) > 0: ax.contour( t_mesh, x_mesh, np.array(pred_list[1:]).T, levels=[0.5], colors=color, ) return ax, pred_list def _general_plot_utilities(qs, X, y, candidates=None, **kwargs): """Plot the utility for the given query strategy. Parameters ---------- qs : skactiveml.base.QueryStrategy The query strategy for which the utility is plotted. X : array-like of shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like of shape (n_samples, ) or (n_samples, n_annotators) Labels of the training data set (possibly including unlabeled ones indicated by self.MISSING_LABEL). candidates : None or array-like of shape (n_candidates,), dtype=int or array-like of shape (n_candidates, n_features), optional (default=None) If `candidates` is None, a mesh with the specified resolution is generated and considered as candidates. If `candidates` is of shape (n_candidates,) and of type int, candidates is considered as the indices of the samples in (X,y). If `candidates` is of shape (n_candidates, n_features), the candidates are directly given in candidates (not necessarily contained in X). This is not supported by all query strategies. Other Parameters ---------------- replace_nan : numeric or None, optional (default=0.0) Only used if plotting with mesh instances is not possible. If numeric, the utility of labeled instances will be plotted with value `replace_nan`. If None, these samples will be ignored. ignore_undefined_query_params : bool, optional (default=False) If True, query parameters that are not defined in the query function are ignored and will not raise an exception. feature_bound : array-like of shape [[xmin, ymin], [xmax, ymax]], optional (default=None) Determines the area in which the boundary is plotted. If candidates is not given, bound must not be None. Otherwise, the bound is determined based on the data. ax : matplotlib.axes.Axes, optional (default=None) The axis on which the utility is plotted. Only if y.ndim = 1 (single annotator). axes : array-like of matplotlib.axes.Axes, optional (default=None) The axes on which the utilities for the annotators are plotted. Only supported for y.ndim = 2 (multi annotator). res : int, optional (default=21) The resolution of the plot. contour_dict : dict, optional (default=None) Additional parameters for the utility contour. plot_annotators : None or array-like of shape (n_annotators_to_plot,), optional (default=None) Contains the indices of the annotators to be plotted. If it is None, all annotators are plotted. Only supported for y.ndim = 2 (multi annotator). **kwargs Remaining keyword arguments are passed the query function of the query strategy. Returns ------- axes : array-like of shape (n_annotators_to_plot,) The axes on which the utilities were plotted. """ replace_nan = kwargs.pop("replace_nan", 0.0) ignore_undefined_query_params = kwargs.pop( "ignore_undefined_query_params", False ) feature_bound = kwargs.pop("feature_bound", None) ax = kwargs.pop("ax", None) axes = kwargs.pop("axes", None) res = kwargs.pop("res", 21) contour_dict = kwargs.pop("contour_dict", None) plot_annotators = kwargs.pop("plot_annotators", None) check_type(qs, "qs", QueryStrategy) X = check_array(X, allow_nd=False, ensure_2d=True) if X.shape[1] != 2: raise ValueError("Samples in `X` must have 2 features.") # Check labels y = check_array(y, ensure_2d=False, force_all_finite="allow-nan") check_consistent_length(X, y) if y.ndim == 2: if plot_annotators is None: n_annotators = y.shape[1] plot_annotators = np.arange(n_annotators) else: plot_annotators = column_or_1d(plot_annotators) check_indices(plot_annotators, y, dim=1) n_annotators = len(plot_annotators) else: n_annotators = None if plot_annotators is not None: raise TypeError( "`plot_annotator` can be only used in the multi-annotator " "setting." ) else: plot_annotators = np.arange(1) if n_annotators is None: if axes is not None: raise TypeError( "`axes` can be only used in the multi-annotator setting. " "Use `ax` instead." ) if ax is None: axes = np.array([plt.subplots(1, 1)[1]]) else: check_type(ax, "ax", Axes) axes = np.array([ax]) else: if ax is not None: raise ValueError( "`ax` can be only used in the single-annotator setting. " "Use `axes` instead." ) if axes is None: axes = plt.subplots(1, n_annotators)[1] else: [check_type(ax_, "ax", Axes) for ax_ in axes] if n_annotators is not None and len(axes) != n_annotators: raise ValueError( "`axes` must contain one `Axes` object for each " "annotator to be plotted (indicated by `plot_annotators`)." ) # ensure that utilities are returned kwargs["return_utilities"] = True if candidates is None: # plot mesh try: check_scalar(res, "res", int, min_val=1) feature_bound = check_bound(bound=feature_bound, X=X) X_mesh, Y_mesh, mesh_instances = mesh(feature_bound, res) contour_args = _get_contour_args(contour_dict) if ignore_undefined_query_params: _, utilities = call_func( qs.query, X=X, y=y, candidates=mesh_instances, **kwargs ) else: _, utilities = qs.query( X=X, y=y, candidates=mesh_instances, **kwargs ) for a_idx, ax_ in zip(plot_annotators, axes): if n_annotators is not None: utilities_a_idx = utilities[0, :, a_idx] else: utilities_a_idx = utilities[0, :] utilities_a_idx = utilities_a_idx.reshape(X_mesh.shape) ax_.contourf(X_mesh, Y_mesh, utilities_a_idx, **contour_args) if n_annotators is None: return axes[0] else: return axes except MappingError: candidates = unlabeled_indices(y, missing_label=qs.missing_label) except BaseException as err: warnings.warn( f"Unable to create utility plot with mesh because " f"of the following error. Trying plotting over " f"candidates. \n\n Unexpected {err.__repr__()}" ) candidates = unlabeled_indices(y, missing_label=qs.missing_label) candidates = check_array( candidates, allow_nd=False, ensure_2d=False, force_all_finite="allow-nan", ) if candidates.ndim == 1: X_utils = X candidates = check_indices(candidates, X) else: X_utils = candidates if ignore_undefined_query_params: _, utilities = call_func( qs.query, X=X, y=y, candidates=candidates, **kwargs ) else: _, utilities = qs.query(X=X, y=y, candidates=candidates, **kwargs) for a_idx, ax_ in zip(plot_annotators, axes): if n_annotators is not None: utilities_a_idx = utilities[0, :, a_idx] else: utilities_a_idx = utilities[0, :] plot_contour_for_samples( X_utils, utilities_a_idx, replace_nan=replace_nan, feature_bound=feature_bound, ax=ax_, res=res, contour_dict=contour_dict, ) if n_annotators is None: return axes[0] else: return axes
0.856512
0.632233
import numpy as np import warnings from sklearn.metrics.pairwise import pairwise_kernels, KERNEL_PARAMS from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted, check_scalar from ..base import ClassFrequencyEstimator from ..utils import MISSING_LABEL, compute_vote_vectors, is_labeled class ParzenWindowClassifier(ClassFrequencyEstimator): """ParzenWindowClassifier The Parzen window classifier (PWC) is a simple and probabilistic classifier. This classifier is based on a non-parametric density estimation obtained by applying a kernel function. Parameters ---------- classes : array-like of shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]`. Can be only set, if `classes` is not none. class_prior : float or array-like of shape (n_classes,), default=0 Prior observations of the class frequency estimates. If `class_prior` is an array, the entry `class_prior[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. If `class_prior` is a float, `class_prior` indicates the non-negative prior number of samples per class. metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. n_neighbors : int or None, default=None Number of nearest neighbours. Default is None, which means all available samples are considered. metric_dict : dict, Any further parameters are passed directly to the kernel function. For the the kernel 'rbf' we allow the use of mean kernel [2] and use it when gamma is set to 'mean' (i.e., {'gamma': 'mean'}). While N is defined as the labeled data the variance is calculated over all X. Attributes ---------- classes_ : array-like of shape (n_classes,) Holds the label for each class after fitting. class_prior : np.ndarray of shape (n_classes) Prior observations of the class frequency estimates. The entry `class_prior_[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. cost_matrix_ : np.ndarray of shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]`. X_ : np.ndarray of shape (n_samples, n_features) The sample matrix `X` is the feature matrix representing the samples. V_ : np.ndarray of shape (n_samples, classes) The class labels are represented by counting vectors. An entry `V[i,j]` indicates how many class labels of `classes[j]` were provided for training sample `X_[i]`. References ---------- .. [1] O. Chapelle, "Active Learning for Parzen Window Classifier", Proceedings of the Tenth International Workshop Artificial Intelligence and Statistics, 2005. [2] Chaudhuri, A., Kakde, D., Sadek, C., Gonzalez, L., & Kong, S., "The Mean and Median Criteria for Kernel Bandwidth Selection for Support Vector Data Description" IEEE International Conference on Data Mining Workshops (ICDMW), 2017. """ METRICS = list(KERNEL_PARAMS.keys()) + ["precomputed"] def __init__( self, n_neighbors=None, metric="rbf", metric_dict=None, classes=None, missing_label=MISSING_LABEL, cost_matrix=None, class_prior=0.0, random_state=None, ): super().__init__( classes=classes, class_prior=class_prior, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.metric = metric self.n_neighbors = n_neighbors self.metric_dict = metric_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : array-like of shape (n_samples, n_features) The sample matrix `X` is the feature matrix representing the samples. y : array-like of shape (n_samples) It contains the class labels of the training samples. sample_weight : array-like of shape (n_samples) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ------- self: ParzenWindowClassifier, The ParzenWindowClassifier is fitted on the training data. """ # Check input parameters. X, y, sample_weight = self._validate_data(X, y, sample_weight) # Check whether metric is available. if self.metric not in ParzenWindowClassifier.METRICS and not callable( self.metric ): raise ValueError( "The parameter 'metric' must be callable or " "in {}".format(KERNEL_PARAMS.keys()) ) # Check number of neighbors which must be a positive integer. if self.n_neighbors is not None: check_scalar( self.n_neighbors, name="n_neighbors", min_val=1, target_type=int, ) # Ensure that metric_dict is a Python dictionary. self.metric_dict_ = ( self.metric_dict if self.metric_dict is not None else {} ) if ( "gamma" in self.metric_dict_ and self.metric_dict["gamma"] == "mean" and self.metric == "rbf" ): is_lbld = is_labeled(y, missing_label=1) N = np.max([2, np.sum(is_lbld)]) variance = np.var(X, axis=0) n_features = X.shape[1] self.metric_dict_[ "gamma" ] = ParzenWindowClassifier._calculate_mean_gamma( N, variance, n_features ) if not isinstance(self.metric_dict_, dict): raise TypeError("'metric_dict' must be a Python dictionary.") self._check_n_features(X, reset=True) # Store train samples. self.X_ = X.copy() # Convert labels to count vectors. if self.n_features_in_ is None: self.V_ = 0 else: self.V_ = compute_vote_vectors( y=y, w=sample_weight, classes=np.arange(len(self.classes_)), missing_label=-1, ) return self def predict_freq(self, X): """Return class frequency estimates for the input samples 'X'. Parameters ---------- X: array-like or shape (n_samples, n_features) or shape (n_samples, m_samples) if metric == 'precomputed' Input samples. Returns ------- F: array-like of shape (n_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X, force_all_finite=(self.metric != "precomputed")) # Predict zeros because of missing training data. if self.n_features_in_ is None: return np.zeros((len(X), len(self.classes_))) # Compute kernel (metric) matrix. if self.metric == "precomputed": K = X if np.size(K, 0) != np.size(X, 0) or np.size(K, 1) != np.size( self.X_, 0 ): raise ValueError( "The kernel matrix 'X' must have the shape " "(n_test_samples, n_train_samples)." ) else: self._check_n_features(X, reset=False) K = pairwise_kernels( X, self.X_, metric=self.metric, **self.metric_dict_ ) # computing class frequency estimates if self.n_neighbors is None or np.size(self.X_, 0) <= self.n_neighbors: F = K @ self.V_ else: indices = np.argpartition(K, -self.n_neighbors, axis=1) indices = indices[:, -self.n_neighbors :] F = np.empty((np.size(X, 0), len(self.classes_))) for i in range(np.size(X, 0)): F[i, :] = K[i, indices[i]] @ self.V_[indices[i], :] return F def _calculate_mean_gamma( N, variance, n_features, delta=(np.sqrt(2) * 1e-6) ): denominator = 2 * N * np.sum(variance) numerator = (N - 1) * np.log((N - 1) / delta**2) if denominator <= 0: gamma = 1 / n_features warnings.warn( "The variance of the provided data is 0. Bandwidth of " + f"1/n_features={gamma} is used instead." ) else: gamma = 0.5 * numerator / denominator return gamma
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/classifier/_parzen_window_classifier.py
_parzen_window_classifier.py
import numpy as np import warnings from sklearn.metrics.pairwise import pairwise_kernels, KERNEL_PARAMS from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted, check_scalar from ..base import ClassFrequencyEstimator from ..utils import MISSING_LABEL, compute_vote_vectors, is_labeled class ParzenWindowClassifier(ClassFrequencyEstimator): """ParzenWindowClassifier The Parzen window classifier (PWC) is a simple and probabilistic classifier. This classifier is based on a non-parametric density estimation obtained by applying a kernel function. Parameters ---------- classes : array-like of shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]`. Can be only set, if `classes` is not none. class_prior : float or array-like of shape (n_classes,), default=0 Prior observations of the class frequency estimates. If `class_prior` is an array, the entry `class_prior[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. If `class_prior` is a float, `class_prior` indicates the non-negative prior number of samples per class. metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. n_neighbors : int or None, default=None Number of nearest neighbours. Default is None, which means all available samples are considered. metric_dict : dict, Any further parameters are passed directly to the kernel function. For the the kernel 'rbf' we allow the use of mean kernel [2] and use it when gamma is set to 'mean' (i.e., {'gamma': 'mean'}). While N is defined as the labeled data the variance is calculated over all X. Attributes ---------- classes_ : array-like of shape (n_classes,) Holds the label for each class after fitting. class_prior : np.ndarray of shape (n_classes) Prior observations of the class frequency estimates. The entry `class_prior_[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. cost_matrix_ : np.ndarray of shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]`. X_ : np.ndarray of shape (n_samples, n_features) The sample matrix `X` is the feature matrix representing the samples. V_ : np.ndarray of shape (n_samples, classes) The class labels are represented by counting vectors. An entry `V[i,j]` indicates how many class labels of `classes[j]` were provided for training sample `X_[i]`. References ---------- .. [1] O. Chapelle, "Active Learning for Parzen Window Classifier", Proceedings of the Tenth International Workshop Artificial Intelligence and Statistics, 2005. [2] Chaudhuri, A., Kakde, D., Sadek, C., Gonzalez, L., & Kong, S., "The Mean and Median Criteria for Kernel Bandwidth Selection for Support Vector Data Description" IEEE International Conference on Data Mining Workshops (ICDMW), 2017. """ METRICS = list(KERNEL_PARAMS.keys()) + ["precomputed"] def __init__( self, n_neighbors=None, metric="rbf", metric_dict=None, classes=None, missing_label=MISSING_LABEL, cost_matrix=None, class_prior=0.0, random_state=None, ): super().__init__( classes=classes, class_prior=class_prior, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.metric = metric self.n_neighbors = n_neighbors self.metric_dict = metric_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : array-like of shape (n_samples, n_features) The sample matrix `X` is the feature matrix representing the samples. y : array-like of shape (n_samples) It contains the class labels of the training samples. sample_weight : array-like of shape (n_samples) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ------- self: ParzenWindowClassifier, The ParzenWindowClassifier is fitted on the training data. """ # Check input parameters. X, y, sample_weight = self._validate_data(X, y, sample_weight) # Check whether metric is available. if self.metric not in ParzenWindowClassifier.METRICS and not callable( self.metric ): raise ValueError( "The parameter 'metric' must be callable or " "in {}".format(KERNEL_PARAMS.keys()) ) # Check number of neighbors which must be a positive integer. if self.n_neighbors is not None: check_scalar( self.n_neighbors, name="n_neighbors", min_val=1, target_type=int, ) # Ensure that metric_dict is a Python dictionary. self.metric_dict_ = ( self.metric_dict if self.metric_dict is not None else {} ) if ( "gamma" in self.metric_dict_ and self.metric_dict["gamma"] == "mean" and self.metric == "rbf" ): is_lbld = is_labeled(y, missing_label=1) N = np.max([2, np.sum(is_lbld)]) variance = np.var(X, axis=0) n_features = X.shape[1] self.metric_dict_[ "gamma" ] = ParzenWindowClassifier._calculate_mean_gamma( N, variance, n_features ) if not isinstance(self.metric_dict_, dict): raise TypeError("'metric_dict' must be a Python dictionary.") self._check_n_features(X, reset=True) # Store train samples. self.X_ = X.copy() # Convert labels to count vectors. if self.n_features_in_ is None: self.V_ = 0 else: self.V_ = compute_vote_vectors( y=y, w=sample_weight, classes=np.arange(len(self.classes_)), missing_label=-1, ) return self def predict_freq(self, X): """Return class frequency estimates for the input samples 'X'. Parameters ---------- X: array-like or shape (n_samples, n_features) or shape (n_samples, m_samples) if metric == 'precomputed' Input samples. Returns ------- F: array-like of shape (n_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X, force_all_finite=(self.metric != "precomputed")) # Predict zeros because of missing training data. if self.n_features_in_ is None: return np.zeros((len(X), len(self.classes_))) # Compute kernel (metric) matrix. if self.metric == "precomputed": K = X if np.size(K, 0) != np.size(X, 0) or np.size(K, 1) != np.size( self.X_, 0 ): raise ValueError( "The kernel matrix 'X' must have the shape " "(n_test_samples, n_train_samples)." ) else: self._check_n_features(X, reset=False) K = pairwise_kernels( X, self.X_, metric=self.metric, **self.metric_dict_ ) # computing class frequency estimates if self.n_neighbors is None or np.size(self.X_, 0) <= self.n_neighbors: F = K @ self.V_ else: indices = np.argpartition(K, -self.n_neighbors, axis=1) indices = indices[:, -self.n_neighbors :] F = np.empty((np.size(X, 0), len(self.classes_))) for i in range(np.size(X, 0)): F[i, :] = K[i, indices[i]] @ self.V_[indices[i], :] return F def _calculate_mean_gamma( N, variance, n_features, delta=(np.sqrt(2) * 1e-6) ): denominator = 2 * N * np.sum(variance) numerator = (N - 1) * np.log((N - 1) / delta**2) if denominator <= 0: gamma = 1 / n_features warnings.warn( "The variance of the provided data is 0. Bandwidth of " + f"1/n_features={gamma} is used instead." ) else: gamma = 0.5 * numerator / denominator return gamma
0.94822
0.724261
from copy import deepcopy import numpy as np from scipy.spatial.distance import cdist from sklearn.mixture import GaussianMixture, BayesianGaussianMixture from sklearn.utils.validation import ( check_array, check_is_fitted, NotFittedError, ) from ..base import ClassFrequencyEstimator from ..utils import MISSING_LABEL, compute_vote_vectors class MixtureModelClassifier(ClassFrequencyEstimator): """MixtureModelClassifier The classifier based on a mixture model (MixtureModelClassifier) is a generative classifier based on a (Bayesian) Gaussian mixture model (GMM). Parameters ---------- mixture_model : sklearn.mixture.GaussianMixture or sklearn.mixture.BayesianGaussianMixture or None, default=None (Bayesian) Gaussian Mixture model that is trained with unsupervised algorithm on train data. If the initial mixture model is not fitted, it will be refitted in each call of the 'fit' method. If None, mixture_model=BayesianMixtureModel(n_components=n_classes) will be used. weight_mode : {'responsibilities', 'similarities'}, default='responsibilities' Determines whether the responsibilities outputted by the `mixture_model` or the exponentials of the Mahalanobis distances as similarities are used to compute the class frequency estimates. classes : array-like, shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or str or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like, shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]`. Can be only set, if `classes` is not none. class_prior : float or array-like of shape (n_classes), default=0 Prior observations of the class frequency estimates. If `class_prior` is an array, the entry `class_prior[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. If `class_prior` is a float, `class_prior` indicates the non-negative prior number of samples per class. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- classes_ : array-like, shape (n_classes) Holds the label for each class after fitting. class_prior : np.ndarray, shape (n_classes) Prior observations of the class frequency estimates. The entry `class_prior_[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. cost_matrix_ : np.ndarray, shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]`. F_components_ : numpy.ndarray, shape (n_components, n_classes) `F[j,c]` is a proxy for the number of sample of class c belonging to component j. mixture_model_ : sklearn.mixture.GaussianMixture or sklearn.mixture.BayesianGaussianMixture (Bayesian) Gaussian Mixture model that is trained with unsupervised algorithm on train data. """ def __init__( self, mixture_model=None, weight_mode="responsibilities", classes=None, missing_label=MISSING_LABEL, cost_matrix=None, class_prior=0.0, random_state=None, ): super().__init__( classes=classes, class_prior=class_prior, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.mixture_model = mixture_model self.weight_mode = weight_mode def fit(self, X, y, sample_weight=None): """Fit the model using `X` as training samples and `y` as class labels. Parameters ---------- X : matrix-like of shape (n_samples, n_features) The samples matrix `X` is the feature matrix representing the samples. y : array-like of shape (n_samples,) It contains the class labels of the training samples. sample_weight : array-like, shape (n_samples,) It contains the weights of the training samples' class labels. It must have the same shape as `y`. Returns ------- self: skactiveml.classifier.MixtureModelClassifier, `skactiveml.classifier.MixtureModelClassifier` object fitted on the training data. """ # Check input parameters. X, y, sample_weight = self._validate_data(X, y, sample_weight) self._check_n_features(X, reset=True) # Check mixture model. if self.mixture_model is None: bgm = BayesianGaussianMixture( n_components=len(self.classes_), random_state=self.random_state_, ) self.mixture_model_ = bgm else: if not isinstance( self.mixture_model, (GaussianMixture, BayesianGaussianMixture) ): raise TypeError( f"`mixture_model` is of the type `{self.mixture_model}` " f"but must be of the type " f"`sklearn.mixture.GaussianMixture` or " f"'sklearn.mixture.BayesianGaussianMixture'." ) self.mixture_model_ = deepcopy(self.mixture_model) # Check weight mode. if self.weight_mode not in ["responsibilities", "similarities"]: raise ValueError( f"`weight_mode` must be either 'responsibilities' or " f"'similarities', got {self.weight_mode} instead." ) if self.n_features_in_ is None: self.F_components_ = 0 else: # Refit model if desired. try: check_is_fitted(self.mixture_model_) except NotFittedError: self.mixture_model_ = self.mixture_model_.fit(X) # Counts number of votes per class label for each sample. V = compute_vote_vectors( y=y, w=sample_weight, classes=np.arange(len(self.classes_)), missing_label=-1, ) # Stores responsibility for every given sample of training set. R = self.mixture_model_.predict_proba(X) # Stores class frequency estimates per component. self.F_components_ = R.T @ V return self def predict_freq(self, X): """Return class frequency estimates for the input data `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. Returns ------- F : array-like of shape (n_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X) self._check_n_features(X, reset=False) if np.sum(self.F_components_) > 0: if self.weight_mode == "similarities": S = np.exp( -np.array( [ cdist( X, [self.mixture_model_.means_[j]], metric="mahalanobis", VI=self.mixture_model_.precisions_[j], ).ravel() for j in range(self.mixture_model_.n_components) ] ) ).T else: S = self.mixture_model_.predict_proba(X) F = S @ self.F_components_ else: F = np.zeros((len(X), len(self.classes_))) return F
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/classifier/_mixture_model_classifier.py
_mixture_model_classifier.py
from copy import deepcopy import numpy as np from scipy.spatial.distance import cdist from sklearn.mixture import GaussianMixture, BayesianGaussianMixture from sklearn.utils.validation import ( check_array, check_is_fitted, NotFittedError, ) from ..base import ClassFrequencyEstimator from ..utils import MISSING_LABEL, compute_vote_vectors class MixtureModelClassifier(ClassFrequencyEstimator): """MixtureModelClassifier The classifier based on a mixture model (MixtureModelClassifier) is a generative classifier based on a (Bayesian) Gaussian mixture model (GMM). Parameters ---------- mixture_model : sklearn.mixture.GaussianMixture or sklearn.mixture.BayesianGaussianMixture or None, default=None (Bayesian) Gaussian Mixture model that is trained with unsupervised algorithm on train data. If the initial mixture model is not fitted, it will be refitted in each call of the 'fit' method. If None, mixture_model=BayesianMixtureModel(n_components=n_classes) will be used. weight_mode : {'responsibilities', 'similarities'}, default='responsibilities' Determines whether the responsibilities outputted by the `mixture_model` or the exponentials of the Mahalanobis distances as similarities are used to compute the class frequency estimates. classes : array-like, shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or str or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like, shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]`. Can be only set, if `classes` is not none. class_prior : float or array-like of shape (n_classes), default=0 Prior observations of the class frequency estimates. If `class_prior` is an array, the entry `class_prior[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. If `class_prior` is a float, `class_prior` indicates the non-negative prior number of samples per class. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- classes_ : array-like, shape (n_classes) Holds the label for each class after fitting. class_prior : np.ndarray, shape (n_classes) Prior observations of the class frequency estimates. The entry `class_prior_[i]` indicates the non-negative prior number of samples belonging to class `classes_[i]`. cost_matrix_ : np.ndarray, shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]`. F_components_ : numpy.ndarray, shape (n_components, n_classes) `F[j,c]` is a proxy for the number of sample of class c belonging to component j. mixture_model_ : sklearn.mixture.GaussianMixture or sklearn.mixture.BayesianGaussianMixture (Bayesian) Gaussian Mixture model that is trained with unsupervised algorithm on train data. """ def __init__( self, mixture_model=None, weight_mode="responsibilities", classes=None, missing_label=MISSING_LABEL, cost_matrix=None, class_prior=0.0, random_state=None, ): super().__init__( classes=classes, class_prior=class_prior, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.mixture_model = mixture_model self.weight_mode = weight_mode def fit(self, X, y, sample_weight=None): """Fit the model using `X` as training samples and `y` as class labels. Parameters ---------- X : matrix-like of shape (n_samples, n_features) The samples matrix `X` is the feature matrix representing the samples. y : array-like of shape (n_samples,) It contains the class labels of the training samples. sample_weight : array-like, shape (n_samples,) It contains the weights of the training samples' class labels. It must have the same shape as `y`. Returns ------- self: skactiveml.classifier.MixtureModelClassifier, `skactiveml.classifier.MixtureModelClassifier` object fitted on the training data. """ # Check input parameters. X, y, sample_weight = self._validate_data(X, y, sample_weight) self._check_n_features(X, reset=True) # Check mixture model. if self.mixture_model is None: bgm = BayesianGaussianMixture( n_components=len(self.classes_), random_state=self.random_state_, ) self.mixture_model_ = bgm else: if not isinstance( self.mixture_model, (GaussianMixture, BayesianGaussianMixture) ): raise TypeError( f"`mixture_model` is of the type `{self.mixture_model}` " f"but must be of the type " f"`sklearn.mixture.GaussianMixture` or " f"'sklearn.mixture.BayesianGaussianMixture'." ) self.mixture_model_ = deepcopy(self.mixture_model) # Check weight mode. if self.weight_mode not in ["responsibilities", "similarities"]: raise ValueError( f"`weight_mode` must be either 'responsibilities' or " f"'similarities', got {self.weight_mode} instead." ) if self.n_features_in_ is None: self.F_components_ = 0 else: # Refit model if desired. try: check_is_fitted(self.mixture_model_) except NotFittedError: self.mixture_model_ = self.mixture_model_.fit(X) # Counts number of votes per class label for each sample. V = compute_vote_vectors( y=y, w=sample_weight, classes=np.arange(len(self.classes_)), missing_label=-1, ) # Stores responsibility for every given sample of training set. R = self.mixture_model_.predict_proba(X) # Stores class frequency estimates per component. self.F_components_ = R.T @ V return self def predict_freq(self, X): """Return class frequency estimates for the input data `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. Returns ------- F : array-like of shape (n_samples, classes) The class frequency estimates of the input samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X) self._check_n_features(X, reset=False) if np.sum(self.F_components_) > 0: if self.weight_mode == "similarities": S = np.exp( -np.array( [ cdist( X, [self.mixture_model_.means_[j]], metric="mahalanobis", VI=self.mixture_model_.precisions_[j], ).ravel() for j in range(self.mixture_model_.n_components) ] ) ).T else: S = self.mixture_model_.predict_proba(X) F = S @ self.F_components_ else: F = np.zeros((len(X), len(self.classes_))) return F
0.95903
0.693398
from copy import deepcopy import numpy as np from sklearn.ensemble._base import _BaseHeterogeneousEnsemble from sklearn.utils.validation import check_array, check_is_fitted from ...base import SkactivemlClassifier from ...utils import MISSING_LABEL, is_labeled, compute_vote_vectors class AnnotatorEnsembleClassifier( _BaseHeterogeneousEnsemble, SkactivemlClassifier ): """AnnotatorEnsembleClassifier This strategy consists of fitting one classifier per annotator. Parameters ---------- estimators : list of (str, estimator) tuples The ensemble of estimators to use in the ensemble. Each element of the list is defined as a tuple of string (i.e. name of the estimator) and an estimator instance. voting : 'hard' or 'soft', default='hard' If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. classes : array-like of shape (n_classes,), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or str or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]. Can be only set, if classes is not none. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- classes_ : np.ndarray of shape (n_classes,) Holds the label for each class after fitting. cost_matrix_ : np.ndarray of shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]. estimators_ : list of estimators The elements of the estimators parameter, having been fitted on the training data. If an estimator has been set to `'drop'`, it will not appear in `estimators_`. """ def __init__( self, estimators, voting="hard", classes=None, missing_label=MISSING_LABEL, cost_matrix=None, random_state=None, ): _BaseHeterogeneousEnsemble.__init__(self, estimators=estimators) SkactivemlClassifier.__init__( self, classes=classes, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.voting = voting def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : array-like of shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like of shape (n_samples, n_estimators) It contains the class labels of the training samples. The number of class labels may be variable for the samples, where missing labels are represented the attribute `missing_label`. sample_weight : array-like of shape (n_samples, n_estimators) It contains the weights of the training samples' class labels. It must have the same shape as `y`. Returns ------- self: skactiveml.classifier.multiannotator.AnnotatorEnsembleClassifier, The `AnnotatorEnsembleClassifier` object fitted on the training data. """ # Check estimators. self._validate_estimators() # Check input parameters. self.check_X_dict_ = { "ensure_min_samples": 0, "ensure_min_features": 0, "allow_nd": True, "dtype": None, } X, y, sample_weight = self._validate_data( X=X, y=y, sample_weight=sample_weight, check_X_dict=self.check_X_dict_, y_ensure_1d=False, ) self._check_n_features(X, reset=True) # Copy estimators self.estimators_ = deepcopy(self.estimators) # Check for empty training data. if self.n_features_in_ is None: return self # Check number of estimators. error_msg = ( f"'y' must have shape (n_samples={len(y)}, n_estimators=" f"{len(self.estimators)}) but has shape {y.shape}." ) if ( self.named_estimators is not None and y.ndim <= 1 or y.shape[1] != len(self.estimators) ): raise ValueError(error_msg) # Check voting scheme. if self.voting not in ("soft", "hard"): raise ValueError( f"Voting must be 'soft' or 'hard'; " f"got `voting='{self.voting}'`)" ) # Fit each estimator for i, est in enumerate(self.estimators_): est[1].set_params(missing_label=-1) if self.classes is None or est[1].classes is None: est[1].set_params(classes=np.arange(len(self.classes_))) if sample_weight is None: est[1].fit(X=X, y=y[:, i]) else: est[1].fit(X=X, y=y[:, i], sample_weight=sample_weight[:, i]) return self def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P : np.ndarray of shape (n_samples, classes) The class probabilities of the test samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X, **self.check_X_dict_) self._check_n_features(X, reset=False) if self.n_features_in_ is None: return np.ones((len(X), len(self.classes_))) / len(self.classes_) elif self.voting == "hard": y_pred = np.array( [est.predict(X) for _, est in self.estimators_] ).T V = compute_vote_vectors(y=y_pred, classes=self.classes_) P = V / np.sum(V, axis=1, keepdims=True) elif self.voting == "soft": P = np.array([est.predict_proba(X) for _, est in self.estimators_]) P = np.sum(P, axis=0) P /= np.sum(P, axis=1, keepdims=True) return P def _validate_estimators(self): _BaseHeterogeneousEnsemble._validate_estimators(self) for name, est in self.estimators: if not isinstance(est, SkactivemlClassifier): raise TypeError(f"'{est}' is not a 'SkactivemlClassifier'.") if self.voting == "soft" and not hasattr(est, "predict_proba"): raise ValueError( f"If 'voting' is soft, each classifier must " f"implement 'predict_proba' method. However, " f"{est} does not do so." ) error_msg = ( f"{est} of 'estimators' has 'missing_label=" f"{est.missing_label}' as attribute being unequal " f"to the given 'missing_label={self.missing_label}' " f"as parameter." ) try: if is_labeled([self.missing_label], est.missing_label)[0]: raise TypeError(error_msg) except TypeError: raise TypeError(error_msg) error_msg = ( f"{est} of 'estimators' has 'classes={est.classes}' " f"as attribute being unequal to the given 'classes=" f"{self.classes}' as parameter." ) classes_none = self.classes is None est_classes_none = est.classes is None if classes_none and not est_classes_none: raise ValueError(error_msg) if ( not classes_none and not est_classes_none and not np.array_equal(self.classes, est.classes) ): raise ValueError(error_msg)
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/classifier/multiannotator/_annotator_ensemble_classifier.py
_annotator_ensemble_classifier.py
from copy import deepcopy import numpy as np from sklearn.ensemble._base import _BaseHeterogeneousEnsemble from sklearn.utils.validation import check_array, check_is_fitted from ...base import SkactivemlClassifier from ...utils import MISSING_LABEL, is_labeled, compute_vote_vectors class AnnotatorEnsembleClassifier( _BaseHeterogeneousEnsemble, SkactivemlClassifier ): """AnnotatorEnsembleClassifier This strategy consists of fitting one classifier per annotator. Parameters ---------- estimators : list of (str, estimator) tuples The ensemble of estimators to use in the ensemble. Each element of the list is defined as a tuple of string (i.e. name of the estimator) and an estimator instance. voting : 'hard' or 'soft', default='hard' If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. classes : array-like of shape (n_classes,), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or str or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with `cost_matrix[i,j]` indicating cost of predicting class `classes[j]` for a sample of class `classes[i]. Can be only set, if classes is not none. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- classes_ : np.ndarray of shape (n_classes,) Holds the label for each class after fitting. cost_matrix_ : np.ndarray of shape (classes, classes) Cost matrix with `cost_matrix_[i,j]` indicating cost of predicting class `classes_[j]` for a sample of class `classes_[i]. estimators_ : list of estimators The elements of the estimators parameter, having been fitted on the training data. If an estimator has been set to `'drop'`, it will not appear in `estimators_`. """ def __init__( self, estimators, voting="hard", classes=None, missing_label=MISSING_LABEL, cost_matrix=None, random_state=None, ): _BaseHeterogeneousEnsemble.__init__(self, estimators=estimators) SkactivemlClassifier.__init__( self, classes=classes, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.voting = voting def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : array-like of shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like of shape (n_samples, n_estimators) It contains the class labels of the training samples. The number of class labels may be variable for the samples, where missing labels are represented the attribute `missing_label`. sample_weight : array-like of shape (n_samples, n_estimators) It contains the weights of the training samples' class labels. It must have the same shape as `y`. Returns ------- self: skactiveml.classifier.multiannotator.AnnotatorEnsembleClassifier, The `AnnotatorEnsembleClassifier` object fitted on the training data. """ # Check estimators. self._validate_estimators() # Check input parameters. self.check_X_dict_ = { "ensure_min_samples": 0, "ensure_min_features": 0, "allow_nd": True, "dtype": None, } X, y, sample_weight = self._validate_data( X=X, y=y, sample_weight=sample_weight, check_X_dict=self.check_X_dict_, y_ensure_1d=False, ) self._check_n_features(X, reset=True) # Copy estimators self.estimators_ = deepcopy(self.estimators) # Check for empty training data. if self.n_features_in_ is None: return self # Check number of estimators. error_msg = ( f"'y' must have shape (n_samples={len(y)}, n_estimators=" f"{len(self.estimators)}) but has shape {y.shape}." ) if ( self.named_estimators is not None and y.ndim <= 1 or y.shape[1] != len(self.estimators) ): raise ValueError(error_msg) # Check voting scheme. if self.voting not in ("soft", "hard"): raise ValueError( f"Voting must be 'soft' or 'hard'; " f"got `voting='{self.voting}'`)" ) # Fit each estimator for i, est in enumerate(self.estimators_): est[1].set_params(missing_label=-1) if self.classes is None or est[1].classes is None: est[1].set_params(classes=np.arange(len(self.classes_))) if sample_weight is None: est[1].fit(X=X, y=y[:, i]) else: est[1].fit(X=X, y=y[:, i], sample_weight=sample_weight[:, i]) return self def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P : np.ndarray of shape (n_samples, classes) The class probabilities of the test samples. Classes are ordered according to `classes_`. """ check_is_fitted(self) X = check_array(X, **self.check_X_dict_) self._check_n_features(X, reset=False) if self.n_features_in_ is None: return np.ones((len(X), len(self.classes_))) / len(self.classes_) elif self.voting == "hard": y_pred = np.array( [est.predict(X) for _, est in self.estimators_] ).T V = compute_vote_vectors(y=y_pred, classes=self.classes_) P = V / np.sum(V, axis=1, keepdims=True) elif self.voting == "soft": P = np.array([est.predict_proba(X) for _, est in self.estimators_]) P = np.sum(P, axis=0) P /= np.sum(P, axis=1, keepdims=True) return P def _validate_estimators(self): _BaseHeterogeneousEnsemble._validate_estimators(self) for name, est in self.estimators: if not isinstance(est, SkactivemlClassifier): raise TypeError(f"'{est}' is not a 'SkactivemlClassifier'.") if self.voting == "soft" and not hasattr(est, "predict_proba"): raise ValueError( f"If 'voting' is soft, each classifier must " f"implement 'predict_proba' method. However, " f"{est} does not do so." ) error_msg = ( f"{est} of 'estimators' has 'missing_label=" f"{est.missing_label}' as attribute being unequal " f"to the given 'missing_label={self.missing_label}' " f"as parameter." ) try: if is_labeled([self.missing_label], est.missing_label)[0]: raise TypeError(error_msg) except TypeError: raise TypeError(error_msg) error_msg = ( f"{est} of 'estimators' has 'classes={est.classes}' " f"as attribute being unequal to the given 'classes=" f"{self.classes}' as parameter." ) classes_none = self.classes is None est_classes_none = est.classes is None if classes_none and not est_classes_none: raise ValueError(error_msg) if ( not classes_none and not est_classes_none and not np.array_equal(self.classes, est.classes) ): raise ValueError(error_msg)
0.938906
0.681806
import warnings import numpy as np from scipy.optimize import minimize from scipy.special import softmax from scipy.stats import dirichlet from scipy.stats import multivariate_normal as multi_normal from sklearn.utils.validation import check_array, check_is_fitted, column_or_1d from ...base import SkactivemlClassifier, AnnotatorModelMixin from ...utils import ( MISSING_LABEL, compute_vote_vectors, rand_argmax, ext_confusion_matrix, ) class AnnotatorLogisticRegression(SkactivemlClassifier, AnnotatorModelMixin): """AnnotatorLogisticRegression Logistic Regression based on Raykar [1] is a classification algorithm that learns from multiple annotators. Besides, building a model for the classification task, the algorithm estimates the performance of the annotators. The performance of an annotator is assumed to only depend on the true label of a sample and not on the sample itself. Each annotator is assigned a confusion matrix, where each row is normalized. This contains the bias of the annotators decisions. These estimated biases are then used to refine the classifier itself. The classifier also supports a bayesian view on the problem, for this a prior distribution over an annotator's confusion matrix is assumed. It also assumes a prior distribution over the classifiers weight vectors corresponding to a regularization. Parameters ---------- tol : float, default=1.e-2 Threshold for stopping the EM-Algorithm, if the change of the expectation value between two steps is smaller than tol, the fit algorithm stops. max_iter : int, default=100 The maximum number of iterations of the EM-algorithm to be performed. fit_intercept : bool, default=True Specifies if a constant (a.k.a. bias or intercept) should be added to input samples. annot_prior_full : int or float or array-like, default=1 This parameter determines A as the Dirichlet prior for each annotator l (i.e., A[l] = annot_prior_full * np.ones(n_classes, n_classes) for numeric or A[l] = annot_prior_full[l] * np.ones(n_classes, n_classes) for array-like parameter). A[l,i,j] is the estimated number of times. annotator l has provided label j for an instance of true label i. annot_prior_diag : int or float or array-like, default=0 This parameter adds a value to the diagonal of A[l] being the Dirichlet prior for annotator l (i.e., A[l] += annot_prior_diag * np.eye(n_classes) for numeric or A[l] += annot_prior_diag[l] * np.ones(n_classes) for array-like parameter). A[l,i,j] is the estimated number of times annotator l has provided label j for an instance of true label i. weights_prior : int or float, default=1 Determines Gamma as the inverse covariance matrix of the prior distribution for every weight vector (i.e., Gamma=weights_prior * np.eye(n_features)). As default, the identity matrix is used for each weight vector. solver : str or callable, default='Newton-CG' Type of solver. Should be 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', or custom - a callable object. See scipy.optimize.minimize for more information. solver_dict : dictionary, default=None Additional solver options passed to scipy.optimize.minimize. If None, {'maxiter': 5} is passed. classes : array-like of shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with cost_matrix[i,j] indicating cost of predicting class classes[j] for a sample of class classes[i]. Can be only set, if classes is not none. random_state : int or RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- n_annotators_ : int Number of annotators. W_ : numpy.ndarray of shape (n_features, n_classes) The weight vectors of the logistic regression model. Alpha_ : numpy.ndarray of shape (n_annotators, n_classes, n_classes) This is a confusion matrix for each annotator, where each row is normalized. `Alpha_[l,k,c]` describes the probability that annotator l provides the class label c for a sample belonging to class k. classes_ : array-like of shape (n_classes) Holds the label for each class after fitting. cost_matrix_ : array-like of shape (classes, classes) Cost matrix with C[i,j] indicating cost of predicting class classes_[j] for a sample of class classes_[i]. References ---------- .. [1] `Raykar, V. C., Yu, S., Zhao, L. H., Valadez, G. H., Florin, C., Bogoni, L., & Moy, L. (2010). Learning from crowds. Journal of Machine Learning Research, 11(4).`_ """ def __init__( self, tol=1.0e-2, max_iter=100, fit_intercept=True, annot_prior_full=1, annot_prior_diag=0, weights_prior=1, solver="Newton-CG", solver_dict=None, classes=None, cost_matrix=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( classes=classes, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.tol = tol self.max_iter = max_iter self.fit_intercept = fit_intercept self.annot_prior_full = annot_prior_full self.annot_prior_diag = annot_prior_diag self.weights_prior = weights_prior self.solver = solver self.solver_dict = solver_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the class labels of the training samples. The number of class labels may be variable for the samples, where missing labels are represented the attribute 'missing_label'. sample_weight : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ------- self: AnnotatorLogisticRegression, The AnnotatorLogisticRegression is fitted on the training data. """ # Check input data. X, y, sample_weight = self._validate_data( X=X, y=y, sample_weight=sample_weight, y_ensure_1d=False ) self._check_n_features(X, reset=True) # Ensure value of 'tol' to be positive. if not isinstance(self.tol, float): raise TypeError( "`tol` must be an instance of float, not {}.".format( type(self.tol) ) ) if self.tol <= 0: raise ValueError("`tol`= {}, must be > 0.".format(self.tol)) # Ensure value of 'max_iter' to be positive. if not isinstance(self.max_iter, int): raise TypeError( "`max_iter` must be an instance of int, not {}.".format( type(self.max_iter) ) ) if self.max_iter <= 0: raise ValueError( "`max_iter`= {}, must be an integer >= 1.".format(self.tol) ) if not isinstance(self.fit_intercept, bool): raise TypeError( "'fit_intercept' must be of type 'bool', got {}".format( type(self.fit_intercept) ) ) solver_dict = ( {"maxiter": 5} if self.solver_dict is None else self.solver_dict ) # Check weights prior. if not isinstance(self.weights_prior, (int, float)): raise TypeError( "'weights_prior' must be of a positive 'int' or " "'float', got {}".format(type(self.weights_prior)) ) if self.weights_prior < 0: raise ValueError( "'weights_prior' must be of a positive 'int' or " "'float', got {}".format(self.weights_prior) ) # Check for empty training data. if self.n_features_in_ is None: return self if len(y.shape) != 2: raise ValueError( "`y` must be an array-like of shape " "`(n_samples, n_annotators)`." ) # Insert bias, if 'fit_intercept' is set to 'True'. if self.fit_intercept: X = np.insert(X, 0, values=1, axis=1) # Ensure sample weights to form a 2d array. if sample_weight is None: sample_weight = np.ones_like(y) # Set auxiliary variables. n_samples = X.shape[0] n_features = X.shape[1] n_classes = len(self.classes_) self.n_annotators_ = y.shape[1] I = np.eye(n_classes) # Convert Gamma to matrix, if it is a number: Gamma = self.weights_prior * np.eye(n_features) all_zeroes = not np.any(Gamma) Gamma_tmp = Gamma if all_zeroes else np.linalg.inv(Gamma) # Check input 'annot_prior_full' and 'annot_prior_diag'. annot_prior = [] for name, prior in [ ("annot_prior_full", self.annot_prior_full), ("annot_prior_diag", self.annot_prior_diag), ]: if isinstance(prior, int or float): prior_array = np.ones(self.n_annotators_) * prior else: prior_array = column_or_1d(prior) if name == "annot_prior_full": is_invalid_prior = np.sum(prior_array <= 0) else: is_invalid_prior = np.sum(prior_array < 0) if len(prior_array) != self.n_annotators_ or is_invalid_prior: raise ValueError( "'{}' must be either 'int', 'float' or " "array-like with positive values and shape " "(n_annotators), got {}".format(name, prior) ) annot_prior.append(prior_array) # Set up prior matrix for each annotator. A = np.ones((self.n_annotators_, n_classes, n_classes)) for a in range(self.n_annotators_): A[a] *= annot_prior[0][a] A[a] += np.eye(n_classes) * annot_prior[1][a] # Init Mu (i.e., estimates of true labels) with (weighted) majority # voting. Mu = compute_vote_vectors( y=y, classes=np.arange(n_classes), missing_label=-1, w=sample_weight, ) Mu_sum = np.sum(Mu, axis=1) is_zero = Mu_sum == 0 Mu[~is_zero] /= Mu_sum[~is_zero, np.newaxis] Mu[is_zero] = 1 / n_classes # Set initial weights. self.W_ = np.zeros((n_features, n_classes)) # Use majority vote to initialize alpha, alpha_j is the confusion # matrix of annotator j. y_majority = rand_argmax(Mu, random_state=self.random_state, axis=1) self.Alpha_ = ext_confusion_matrix( y_true=y_majority, y_pred=y, normalize="true", missing_label=-1, classes=np.arange(n_classes), ) # Initialize first expectation to infinity such that # |current - new| < tol is False. current_expectation = -np.inf # Execute expectation maximization (EM) algorithm. self.n_iter_ = 0 while self.n_iter_ < self.max_iter: # E-step: P = softmax(X @ self.W_, axis=1) V = self._calc_V(y, self.Alpha_) Mu = self._calc_Mu(V, P) new_expectation = self._calc_expectation( Mu, P, V, Gamma, A, self.Alpha_, self.W_ ) # Stop EM, if it converges (to a local maximum). if ( current_expectation == new_expectation or (new_expectation - current_expectation) < self.tol ): break # Update expectation value. current_expectation = new_expectation # M-Step: self._Alpha = self._calc_Alpha(y, Mu, A, sample_weight) def error(w): """ Evaluate cross-entropy error of weights for scipy.minimize. Parameters ---------- w : ndarray, shape (n_features * n_classes) Weights for which cross-entropy error is to be computed. Returns ------- G : flaot Computed cross-entropy error. """ W = w.reshape(n_features, n_classes) P_W = softmax(X @ W, axis=1) prior_W = 0 for c_idx in range(n_classes): prior_W += multi_normal.logpdf( x=W[:, c_idx], cov=Gamma_tmp, allow_singular=True ) log = np.sum(Mu * np.log(P_W * V + np.finfo(float).eps)) log += prior_W return -log / n_samples def grad(w): """ Compute gradient of error function for scipy.minimize. Parameters ---------- w : ndarray, shape (n_features * n_classes) Weights whose gradient is to be computed. Returns ------- G : narray, shape (n_features * n_classes) Computed gradient of weights. """ W = w.reshape(n_features, n_classes) P_W = softmax(X @ W, axis=1) G = (X.T @ (P_W - Mu) + Gamma @ W).ravel() return G / n_samples def hessian(w): """ Compute Hessian matrix of error function for scipy.minimize. Parameters ---------- w : numpy.ndarray, shape (n_features * n_classes) Weights whose Hessian matrix is to be computed. Returns ------- H : numpy.narray, shape (n_features * n_classes, n_features * n_classes) Computed Hessian matrix of weights. """ W = w.reshape(n_features, n_classes) H = np.empty((n_classes * n_features, n_classes * n_features)) P_W = softmax(X @ W, axis=1) for k in range(n_classes): for j in range(n_classes): diagonal = P_W[:, j] * (I[k, j] - P_W[:, k]) D = np.diag(diagonal) H_kj = X.T @ D @ X + Gamma H[ k * n_features : (k + 1) * n_features, j * n_features : (j + 1) * n_features, ] = H_kj return H / n_samples with warnings.catch_warnings(): warning_msg = ".*Method .* does not use Hessian information.*" warnings.filterwarnings("ignore", message=warning_msg) warning_msg = ".*Method .* does not use gradient information.*" warnings.filterwarnings("ignore", message=warning_msg) res = minimize( error, x0=self.W_.ravel(), method=self.solver, tol=self.tol, jac=grad, hess=hessian, options=solver_dict, ) self.W_ = res.x.reshape((n_features, n_classes)) self.n_iter_ += 1 return self def predict_proba(self, X): """Return probability estimates for the test data `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P : numpy.ndarray of shape (n_samples, classes) The class probabilities of the test samples. Classes are ordered according to `classes_`. """ # Check test samples. check_is_fitted(self) X = check_array(X) self._check_n_features(X, reset=False) # Prediction without training data. if self.n_features_in_ is None: return np.ones((len(X), len(self.classes_))) / len(self.classes_) # Check whether a bias feature is missing. if self.fit_intercept: X = np.insert(X, 0, values=1, axis=1) # Compute and normalize probabilities. P = softmax(X @ self.W_, axis=1) return P def predict_annotator_perf(self, X): """Calculates the probability that an annotator provides the true label for a given sample. The true label is hereby provided by the classification model. The label provided by an annotator l is based on his/her confusion matrix (i.e., attribute `Alpha_[l]`). Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P_annot : numpy.ndarray of shape (n_samples, classes) `P_annot[i,l]` is the probability, that annotator l provides the correct class label for sample `X[i]`. """ # Prediction without training data. if self.n_features_in_ is None: return np.ones((len(X), 1)) / len(self.classes_) # Compute class probabilities. P = self.predict_proba(X) # Get correctness probabilities for each annotator per class. diag_Alpha = np.array( [np.diagonal(self._Alpha[j]) for j in range(self._Alpha.shape[0])] ) # Compute correctness probabilities for each annotator per sample. P_annot = P @ diag_Alpha.T return P_annot @staticmethod def _calc_V(y, Alpha): """Calculates a value used for updating Mu and the expectation. Parameters ---------- y: numpy.ndarray of shape (n_samples, n_annotators) The class labels provided by the annotators for all samples. Alpha: numpy.ndarray of shape (n_annotators, n_classes, n_classes) annot_prior vector (n_annotators, n_classes, n_classes) containing the new estimates for Alpha. This is effectively a confusion matrix for each annotator, where each row is normalized. Returns ------- out: numpy.ndarray Vector of shape (n_samples, n_classes). """ n_samples, _, n_classes = ( y.shape[0], y.shape[1], Alpha.shape[1], ) V = np.ones((n_samples, n_classes)) for c in range(n_classes): for k in range(n_classes): y_is_k = y == k V[:, c] *= np.prod(Alpha[:, c, k] ** y_is_k, axis=1) return V @staticmethod def _calc_Alpha(y, Mu, A, sample_weight): """Calculates the class-dependent performance estimates of the annotators. Parameters ---------- y : numpy.ndarray of shape (n_samples, n_annotators) The class labels provided by the annotators for all samples. Mu : numpy.ndarray of shape (n_samples, n_classes) Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. A : numpy.ndarray of shape (n_annotators, n_classes, n_classes) A[l,i,j] is the estimated number of times. annotator l has provided label j for an instance of true label i. sample_weight : numpy.ndarray of shape (n_samples, n_annotators) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ---------- new_Alpha : numpy.ndarray of shape (n_annotators, n_classes, n_classes) This is a confusion matrix for each annotator, where each row is normalized. `new_Alpha[l,k,c]` describes the probability that annotator l provides the class label c for a sample belonging to class k. """ n_annotators, n_classes = y.shape[1], Mu.shape[1] new_Alpha = np.zeros((n_annotators, n_classes, n_classes)) not_nan_y = ~np.isnan(y) for j in range(n_annotators): # Only take those rows from Y, where Y is not NaN: y_j = np.eye(n_classes)[y[not_nan_y[:, j], j].astype(int)] w_j = sample_weight[not_nan_y[:, j], j].reshape(-1, 1) new_Alpha[j] = (Mu[not_nan_y[:, j]].T @ (w_j * y_j)) + A[j] - 1 # Lazy normalization: (The real normalization factor # (sum_i=1^N mu_i,c + sum_k=0^K-1 A_j,c,k - K) is omitted here) with np.errstate(all="ignore"): new_Alpha = new_Alpha / new_Alpha.sum(axis=2, keepdims=True) new_Alpha = np.nan_to_num(new_Alpha, nan=1.0 / n_classes) return new_Alpha @staticmethod def _calc_Mu(V, P): """Calculates the new estimate for Mu, using Bayes' theorem. Parameters ---------- V : numpy.ndarray, shape (n_samples, n_classes) Describes an intermediate result. P : numpy.ndarray, shape (n_samples, n_classes) P[i,k] contains the probabilities of sample X[i] belonging to class classes_[k], as estimated by the classifier (i.e., sigmoid(W.T, X[i])). Returns ------- new_Mu : numpy.ndarray new_Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. """ new_Mu = P * V new_Mu_sum = np.sum(new_Mu, axis=1) is_zero = new_Mu_sum == 0 new_Mu[~is_zero] /= new_Mu_sum[~is_zero, np.newaxis] new_Mu[is_zero] = 1 / P.shape[1] return new_Mu @staticmethod def _calc_expectation(Mu, P, V, Gamma, A, Alpha, W): """Calculates the conditional expectation in the E-step of the EM-Algorithm, given the observations and the current estimates of the classifier. Parameters ---------- Mu : numpy.ndarray, shape (n_samples, n_classes) Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. V : numpy.ndarray, shape (n_samples, n_classes) Describes an intermediate result. P : numpy.ndarray, shape (n_samples, n_classes) P[i,k] contains the probabilities of sample X[i] belonging to class classes_[k], as estimated by the classifier (i.e., sigmoid(W.T, X[i])). Returns ------- expectation : float The conditional expectation. """ # Evaluate prior of weight vectors. all_zeroes = not np.any(Gamma) Gamma = Gamma if all_zeroes else np.linalg.inv(Gamma) prior_W = np.sum( [ multi_normal.logpdf(x=W[:, k], cov=Gamma, allow_singular=True) for k in range(W.shape[1]) ] ) # Evaluate prior of alpha matrices. prior_Alpha = np.sum( [ [ dirichlet.logpdf(x=Alpha[j, k, :], alpha=A[j, k, :]) for k in range(Alpha.shape[1]) ] for j in range(Alpha.shape[0]) ] ) # Evaluate log-likelihood for data. log_likelihood = np.sum(Mu * np.log(P * V + np.finfo(float).eps)) expectation = log_likelihood + prior_W + prior_Alpha return expectation
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/classifier/multiannotator/_annotator_logistic_regression.py
_annotator_logistic_regression.py
import warnings import numpy as np from scipy.optimize import minimize from scipy.special import softmax from scipy.stats import dirichlet from scipy.stats import multivariate_normal as multi_normal from sklearn.utils.validation import check_array, check_is_fitted, column_or_1d from ...base import SkactivemlClassifier, AnnotatorModelMixin from ...utils import ( MISSING_LABEL, compute_vote_vectors, rand_argmax, ext_confusion_matrix, ) class AnnotatorLogisticRegression(SkactivemlClassifier, AnnotatorModelMixin): """AnnotatorLogisticRegression Logistic Regression based on Raykar [1] is a classification algorithm that learns from multiple annotators. Besides, building a model for the classification task, the algorithm estimates the performance of the annotators. The performance of an annotator is assumed to only depend on the true label of a sample and not on the sample itself. Each annotator is assigned a confusion matrix, where each row is normalized. This contains the bias of the annotators decisions. These estimated biases are then used to refine the classifier itself. The classifier also supports a bayesian view on the problem, for this a prior distribution over an annotator's confusion matrix is assumed. It also assumes a prior distribution over the classifiers weight vectors corresponding to a regularization. Parameters ---------- tol : float, default=1.e-2 Threshold for stopping the EM-Algorithm, if the change of the expectation value between two steps is smaller than tol, the fit algorithm stops. max_iter : int, default=100 The maximum number of iterations of the EM-algorithm to be performed. fit_intercept : bool, default=True Specifies if a constant (a.k.a. bias or intercept) should be added to input samples. annot_prior_full : int or float or array-like, default=1 This parameter determines A as the Dirichlet prior for each annotator l (i.e., A[l] = annot_prior_full * np.ones(n_classes, n_classes) for numeric or A[l] = annot_prior_full[l] * np.ones(n_classes, n_classes) for array-like parameter). A[l,i,j] is the estimated number of times. annotator l has provided label j for an instance of true label i. annot_prior_diag : int or float or array-like, default=0 This parameter adds a value to the diagonal of A[l] being the Dirichlet prior for annotator l (i.e., A[l] += annot_prior_diag * np.eye(n_classes) for numeric or A[l] += annot_prior_diag[l] * np.ones(n_classes) for array-like parameter). A[l,i,j] is the estimated number of times annotator l has provided label j for an instance of true label i. weights_prior : int or float, default=1 Determines Gamma as the inverse covariance matrix of the prior distribution for every weight vector (i.e., Gamma=weights_prior * np.eye(n_features)). As default, the identity matrix is used for each weight vector. solver : str or callable, default='Newton-CG' Type of solver. Should be 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', or custom - a callable object. See scipy.optimize.minimize for more information. solver_dict : dictionary, default=None Additional solver options passed to scipy.optimize.minimize. If None, {'maxiter': 5} is passed. classes : array-like of shape (n_classes), default=None Holds the label for each class. If none, the classes are determined during the fit. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. cost_matrix : array-like of shape (n_classes, n_classes) Cost matrix with cost_matrix[i,j] indicating cost of predicting class classes[j] for a sample of class classes[i]. Can be only set, if classes is not none. random_state : int or RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. Attributes ---------- n_annotators_ : int Number of annotators. W_ : numpy.ndarray of shape (n_features, n_classes) The weight vectors of the logistic regression model. Alpha_ : numpy.ndarray of shape (n_annotators, n_classes, n_classes) This is a confusion matrix for each annotator, where each row is normalized. `Alpha_[l,k,c]` describes the probability that annotator l provides the class label c for a sample belonging to class k. classes_ : array-like of shape (n_classes) Holds the label for each class after fitting. cost_matrix_ : array-like of shape (classes, classes) Cost matrix with C[i,j] indicating cost of predicting class classes_[j] for a sample of class classes_[i]. References ---------- .. [1] `Raykar, V. C., Yu, S., Zhao, L. H., Valadez, G. H., Florin, C., Bogoni, L., & Moy, L. (2010). Learning from crowds. Journal of Machine Learning Research, 11(4).`_ """ def __init__( self, tol=1.0e-2, max_iter=100, fit_intercept=True, annot_prior_full=1, annot_prior_diag=0, weights_prior=1, solver="Newton-CG", solver_dict=None, classes=None, cost_matrix=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( classes=classes, missing_label=missing_label, cost_matrix=cost_matrix, random_state=random_state, ) self.tol = tol self.max_iter = max_iter self.fit_intercept = fit_intercept self.annot_prior_full = annot_prior_full self.annot_prior_diag = annot_prior_diag self.weights_prior = weights_prior self.solver = solver self.solver_dict = solver_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the class labels of the training samples. The number of class labels may be variable for the samples, where missing labels are represented the attribute 'missing_label'. sample_weight : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ------- self: AnnotatorLogisticRegression, The AnnotatorLogisticRegression is fitted on the training data. """ # Check input data. X, y, sample_weight = self._validate_data( X=X, y=y, sample_weight=sample_weight, y_ensure_1d=False ) self._check_n_features(X, reset=True) # Ensure value of 'tol' to be positive. if not isinstance(self.tol, float): raise TypeError( "`tol` must be an instance of float, not {}.".format( type(self.tol) ) ) if self.tol <= 0: raise ValueError("`tol`= {}, must be > 0.".format(self.tol)) # Ensure value of 'max_iter' to be positive. if not isinstance(self.max_iter, int): raise TypeError( "`max_iter` must be an instance of int, not {}.".format( type(self.max_iter) ) ) if self.max_iter <= 0: raise ValueError( "`max_iter`= {}, must be an integer >= 1.".format(self.tol) ) if not isinstance(self.fit_intercept, bool): raise TypeError( "'fit_intercept' must be of type 'bool', got {}".format( type(self.fit_intercept) ) ) solver_dict = ( {"maxiter": 5} if self.solver_dict is None else self.solver_dict ) # Check weights prior. if not isinstance(self.weights_prior, (int, float)): raise TypeError( "'weights_prior' must be of a positive 'int' or " "'float', got {}".format(type(self.weights_prior)) ) if self.weights_prior < 0: raise ValueError( "'weights_prior' must be of a positive 'int' or " "'float', got {}".format(self.weights_prior) ) # Check for empty training data. if self.n_features_in_ is None: return self if len(y.shape) != 2: raise ValueError( "`y` must be an array-like of shape " "`(n_samples, n_annotators)`." ) # Insert bias, if 'fit_intercept' is set to 'True'. if self.fit_intercept: X = np.insert(X, 0, values=1, axis=1) # Ensure sample weights to form a 2d array. if sample_weight is None: sample_weight = np.ones_like(y) # Set auxiliary variables. n_samples = X.shape[0] n_features = X.shape[1] n_classes = len(self.classes_) self.n_annotators_ = y.shape[1] I = np.eye(n_classes) # Convert Gamma to matrix, if it is a number: Gamma = self.weights_prior * np.eye(n_features) all_zeroes = not np.any(Gamma) Gamma_tmp = Gamma if all_zeroes else np.linalg.inv(Gamma) # Check input 'annot_prior_full' and 'annot_prior_diag'. annot_prior = [] for name, prior in [ ("annot_prior_full", self.annot_prior_full), ("annot_prior_diag", self.annot_prior_diag), ]: if isinstance(prior, int or float): prior_array = np.ones(self.n_annotators_) * prior else: prior_array = column_or_1d(prior) if name == "annot_prior_full": is_invalid_prior = np.sum(prior_array <= 0) else: is_invalid_prior = np.sum(prior_array < 0) if len(prior_array) != self.n_annotators_ or is_invalid_prior: raise ValueError( "'{}' must be either 'int', 'float' or " "array-like with positive values and shape " "(n_annotators), got {}".format(name, prior) ) annot_prior.append(prior_array) # Set up prior matrix for each annotator. A = np.ones((self.n_annotators_, n_classes, n_classes)) for a in range(self.n_annotators_): A[a] *= annot_prior[0][a] A[a] += np.eye(n_classes) * annot_prior[1][a] # Init Mu (i.e., estimates of true labels) with (weighted) majority # voting. Mu = compute_vote_vectors( y=y, classes=np.arange(n_classes), missing_label=-1, w=sample_weight, ) Mu_sum = np.sum(Mu, axis=1) is_zero = Mu_sum == 0 Mu[~is_zero] /= Mu_sum[~is_zero, np.newaxis] Mu[is_zero] = 1 / n_classes # Set initial weights. self.W_ = np.zeros((n_features, n_classes)) # Use majority vote to initialize alpha, alpha_j is the confusion # matrix of annotator j. y_majority = rand_argmax(Mu, random_state=self.random_state, axis=1) self.Alpha_ = ext_confusion_matrix( y_true=y_majority, y_pred=y, normalize="true", missing_label=-1, classes=np.arange(n_classes), ) # Initialize first expectation to infinity such that # |current - new| < tol is False. current_expectation = -np.inf # Execute expectation maximization (EM) algorithm. self.n_iter_ = 0 while self.n_iter_ < self.max_iter: # E-step: P = softmax(X @ self.W_, axis=1) V = self._calc_V(y, self.Alpha_) Mu = self._calc_Mu(V, P) new_expectation = self._calc_expectation( Mu, P, V, Gamma, A, self.Alpha_, self.W_ ) # Stop EM, if it converges (to a local maximum). if ( current_expectation == new_expectation or (new_expectation - current_expectation) < self.tol ): break # Update expectation value. current_expectation = new_expectation # M-Step: self._Alpha = self._calc_Alpha(y, Mu, A, sample_weight) def error(w): """ Evaluate cross-entropy error of weights for scipy.minimize. Parameters ---------- w : ndarray, shape (n_features * n_classes) Weights for which cross-entropy error is to be computed. Returns ------- G : flaot Computed cross-entropy error. """ W = w.reshape(n_features, n_classes) P_W = softmax(X @ W, axis=1) prior_W = 0 for c_idx in range(n_classes): prior_W += multi_normal.logpdf( x=W[:, c_idx], cov=Gamma_tmp, allow_singular=True ) log = np.sum(Mu * np.log(P_W * V + np.finfo(float).eps)) log += prior_W return -log / n_samples def grad(w): """ Compute gradient of error function for scipy.minimize. Parameters ---------- w : ndarray, shape (n_features * n_classes) Weights whose gradient is to be computed. Returns ------- G : narray, shape (n_features * n_classes) Computed gradient of weights. """ W = w.reshape(n_features, n_classes) P_W = softmax(X @ W, axis=1) G = (X.T @ (P_W - Mu) + Gamma @ W).ravel() return G / n_samples def hessian(w): """ Compute Hessian matrix of error function for scipy.minimize. Parameters ---------- w : numpy.ndarray, shape (n_features * n_classes) Weights whose Hessian matrix is to be computed. Returns ------- H : numpy.narray, shape (n_features * n_classes, n_features * n_classes) Computed Hessian matrix of weights. """ W = w.reshape(n_features, n_classes) H = np.empty((n_classes * n_features, n_classes * n_features)) P_W = softmax(X @ W, axis=1) for k in range(n_classes): for j in range(n_classes): diagonal = P_W[:, j] * (I[k, j] - P_W[:, k]) D = np.diag(diagonal) H_kj = X.T @ D @ X + Gamma H[ k * n_features : (k + 1) * n_features, j * n_features : (j + 1) * n_features, ] = H_kj return H / n_samples with warnings.catch_warnings(): warning_msg = ".*Method .* does not use Hessian information.*" warnings.filterwarnings("ignore", message=warning_msg) warning_msg = ".*Method .* does not use gradient information.*" warnings.filterwarnings("ignore", message=warning_msg) res = minimize( error, x0=self.W_.ravel(), method=self.solver, tol=self.tol, jac=grad, hess=hessian, options=solver_dict, ) self.W_ = res.x.reshape((n_features, n_classes)) self.n_iter_ += 1 return self def predict_proba(self, X): """Return probability estimates for the test data `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P : numpy.ndarray of shape (n_samples, classes) The class probabilities of the test samples. Classes are ordered according to `classes_`. """ # Check test samples. check_is_fitted(self) X = check_array(X) self._check_n_features(X, reset=False) # Prediction without training data. if self.n_features_in_ is None: return np.ones((len(X), len(self.classes_))) / len(self.classes_) # Check whether a bias feature is missing. if self.fit_intercept: X = np.insert(X, 0, values=1, axis=1) # Compute and normalize probabilities. P = softmax(X @ self.W_, axis=1) return P def predict_annotator_perf(self, X): """Calculates the probability that an annotator provides the true label for a given sample. The true label is hereby provided by the classification model. The label provided by an annotator l is based on his/her confusion matrix (i.e., attribute `Alpha_[l]`). Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. Returns ------- P_annot : numpy.ndarray of shape (n_samples, classes) `P_annot[i,l]` is the probability, that annotator l provides the correct class label for sample `X[i]`. """ # Prediction without training data. if self.n_features_in_ is None: return np.ones((len(X), 1)) / len(self.classes_) # Compute class probabilities. P = self.predict_proba(X) # Get correctness probabilities for each annotator per class. diag_Alpha = np.array( [np.diagonal(self._Alpha[j]) for j in range(self._Alpha.shape[0])] ) # Compute correctness probabilities for each annotator per sample. P_annot = P @ diag_Alpha.T return P_annot @staticmethod def _calc_V(y, Alpha): """Calculates a value used for updating Mu and the expectation. Parameters ---------- y: numpy.ndarray of shape (n_samples, n_annotators) The class labels provided by the annotators for all samples. Alpha: numpy.ndarray of shape (n_annotators, n_classes, n_classes) annot_prior vector (n_annotators, n_classes, n_classes) containing the new estimates for Alpha. This is effectively a confusion matrix for each annotator, where each row is normalized. Returns ------- out: numpy.ndarray Vector of shape (n_samples, n_classes). """ n_samples, _, n_classes = ( y.shape[0], y.shape[1], Alpha.shape[1], ) V = np.ones((n_samples, n_classes)) for c in range(n_classes): for k in range(n_classes): y_is_k = y == k V[:, c] *= np.prod(Alpha[:, c, k] ** y_is_k, axis=1) return V @staticmethod def _calc_Alpha(y, Mu, A, sample_weight): """Calculates the class-dependent performance estimates of the annotators. Parameters ---------- y : numpy.ndarray of shape (n_samples, n_annotators) The class labels provided by the annotators for all samples. Mu : numpy.ndarray of shape (n_samples, n_classes) Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. A : numpy.ndarray of shape (n_annotators, n_classes, n_classes) A[l,i,j] is the estimated number of times. annotator l has provided label j for an instance of true label i. sample_weight : numpy.ndarray of shape (n_samples, n_annotators) It contains the weights of the training samples' class labels. It must have the same shape as y. Returns ---------- new_Alpha : numpy.ndarray of shape (n_annotators, n_classes, n_classes) This is a confusion matrix for each annotator, where each row is normalized. `new_Alpha[l,k,c]` describes the probability that annotator l provides the class label c for a sample belonging to class k. """ n_annotators, n_classes = y.shape[1], Mu.shape[1] new_Alpha = np.zeros((n_annotators, n_classes, n_classes)) not_nan_y = ~np.isnan(y) for j in range(n_annotators): # Only take those rows from Y, where Y is not NaN: y_j = np.eye(n_classes)[y[not_nan_y[:, j], j].astype(int)] w_j = sample_weight[not_nan_y[:, j], j].reshape(-1, 1) new_Alpha[j] = (Mu[not_nan_y[:, j]].T @ (w_j * y_j)) + A[j] - 1 # Lazy normalization: (The real normalization factor # (sum_i=1^N mu_i,c + sum_k=0^K-1 A_j,c,k - K) is omitted here) with np.errstate(all="ignore"): new_Alpha = new_Alpha / new_Alpha.sum(axis=2, keepdims=True) new_Alpha = np.nan_to_num(new_Alpha, nan=1.0 / n_classes) return new_Alpha @staticmethod def _calc_Mu(V, P): """Calculates the new estimate for Mu, using Bayes' theorem. Parameters ---------- V : numpy.ndarray, shape (n_samples, n_classes) Describes an intermediate result. P : numpy.ndarray, shape (n_samples, n_classes) P[i,k] contains the probabilities of sample X[i] belonging to class classes_[k], as estimated by the classifier (i.e., sigmoid(W.T, X[i])). Returns ------- new_Mu : numpy.ndarray new_Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. """ new_Mu = P * V new_Mu_sum = np.sum(new_Mu, axis=1) is_zero = new_Mu_sum == 0 new_Mu[~is_zero] /= new_Mu_sum[~is_zero, np.newaxis] new_Mu[is_zero] = 1 / P.shape[1] return new_Mu @staticmethod def _calc_expectation(Mu, P, V, Gamma, A, Alpha, W): """Calculates the conditional expectation in the E-step of the EM-Algorithm, given the observations and the current estimates of the classifier. Parameters ---------- Mu : numpy.ndarray, shape (n_samples, n_classes) Mu[i,k] contains the probability of a sample X[i] to be of class classes_[k] estimated according to the EM-algorithm. V : numpy.ndarray, shape (n_samples, n_classes) Describes an intermediate result. P : numpy.ndarray, shape (n_samples, n_classes) P[i,k] contains the probabilities of sample X[i] belonging to class classes_[k], as estimated by the classifier (i.e., sigmoid(W.T, X[i])). Returns ------- expectation : float The conditional expectation. """ # Evaluate prior of weight vectors. all_zeroes = not np.any(Gamma) Gamma = Gamma if all_zeroes else np.linalg.inv(Gamma) prior_W = np.sum( [ multi_normal.logpdf(x=W[:, k], cov=Gamma, allow_singular=True) for k in range(W.shape[1]) ] ) # Evaluate prior of alpha matrices. prior_Alpha = np.sum( [ [ dirichlet.logpdf(x=Alpha[j, k, :], alpha=A[j, k, :]) for k in range(Alpha.shape[1]) ] for j in range(Alpha.shape[0]) ] ) # Evaluate log-likelihood for data. log_likelihood = np.sum(Mu * np.log(P * V + np.finfo(float).eps)) expectation = log_likelihood + prior_W + prior_Alpha return expectation
0.922692
0.685903
import numpy as np from scipy.stats import t from sklearn.metrics.pairwise import pairwise_kernels, KERNEL_PARAMS from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from skactiveml.base import ProbabilisticRegressor from skactiveml.utils import ( is_labeled, MISSING_LABEL, check_scalar, check_type, ) class NICKernelRegressor(ProbabilisticRegressor): """NICKernelRegressor The NICKernelRegressor (Normal inverse chi kernel regressor) locally fits a t-distribution using the training data, weighting the samples by a kernel. Parameters __________ metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. mu_0 : int or float, optional (default=0) The prior mean. kappa_0 : int or float, optional (default=0.1) The weight of the prior mean. sigma_sq_0: int or float, optional (default=1.0) The prior variance. nu_0 : int or float, optional (default=2.5) The weight of the prior variance. missing_label : scalar, string, np.nan, or None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ METRICS = list(KERNEL_PARAMS.keys()) + ["precomputed"] def __init__( self, metric="rbf", metric_dict=None, mu_0=0, kappa_0=0.1, sigma_sq_0=1.0, nu_0=2.5, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.kappa_0 = kappa_0 self.nu_0 = nu_0 self.mu_0 = mu_0 self.sigma_sq_0 = sigma_sq_0 self.metric = metric self.metric_dict = metric_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like, shape (n_samples) or (n_samples, n_targets) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). sample_weight : array-like, shape (n_samples) It contains the weights of the training samples' values. Returns ------- self: SkactivemlRegressor, The SkactivemlRegressor is fitted on the training data. """ X, y, sample_weight = self._validate_data(X, y, sample_weight) is_lbld = is_labeled(y, missing_label=self.missing_label_) for (value, name) in [ (self.kappa_0, "self.kappa_0"), (self.nu_0, "self.nu_0"), (self.sigma_sq_0, "self.sigma_sq_0"), ]: check_scalar(value, name, (int, float), min_val=0) check_scalar(self.mu_0, "self.mu_0", (int, float)) self.X_ = X[is_lbld] self.y_ = y[is_lbld] self.prior_params_ = ( self.kappa_0, self.nu_0, self.mu_0, self.sigma_sq_0, ) if sample_weight is not None: self.weights_ = sample_weight[is_lbld] if np.sum(self.weights_) == 0: raise ValueError( "The sample weights of the labeled samples " "must not be all zero." ) else: self.weights_ = None check_type(self.metric, "self.metric", target_vals=self.METRICS) self.metric_dict = {} if self.metric_dict is None else self.metric_dict check_type( self.metric_dict, "self.metric_dict", dict, target_vals=[None] ) return self def _estimate_ml_params(self, X): K = pairwise_kernels( X, self.X_, metric=self.metric, **self.metric_dict ) if self.weights_ is not None: K = self.weights_.reshape(1, -1) * K N = np.sum(K, axis=1) mu_ml = K @ self.y_ / N scatter = np.sum( K * (self.y_[np.newaxis, :] - mu_ml[:, np.newaxis]) ** 2, axis=1 ) var_ml = 1 / N * scatter return N, mu_ml, var_ml def _estimate_update_params(self, X): if len(self.X_) != 0: N, mu_ml, var_ml = self._estimate_ml_params(X) update_params = (N, N, mu_ml, var_ml) return update_params else: neutral_params = (np.zeros(len(X)),) * 4 return neutral_params def predict_target_distribution(self, X): """Returns the estimated target distribution conditioned on the test samples `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution of the targets at the test samples. """ check_is_fitted(self) X = check_array(X) prior_params = self.prior_params_ update_params = self._estimate_update_params(X) post_params = _combine_params(prior_params, update_params) kappa_post, nu_post, mu_post, sigma_sq_post = post_params df = nu_post loc = mu_post scale = np.sqrt((1 + kappa_post) / kappa_post * sigma_sq_post) return t(df=df, loc=loc, scale=scale) def _combine_params(prior_params, update_params): kappa_1, nu_1, mu_1, sigma_sq_1 = prior_params kappa_2, nu_2, mu_2, sigma_sq_2 = update_params kappa_com = kappa_1 + kappa_2 nu_com = nu_1 + nu_2 mu_com = (kappa_1 * mu_1 + kappa_2 * mu_2) / kappa_com scatter_com = ( nu_1 * sigma_sq_1 + nu_2 * sigma_sq_2 + kappa_1 * kappa_2 * (mu_1 - mu_2) ** 2 / kappa_com ) sigma_sq_com = scatter_com / nu_com return kappa_com, nu_com, mu_com, sigma_sq_com class NadarayaWatsonRegressor(NICKernelRegressor): """NadarayaWatsonRegressor The Nadaraya Watson Regressor predicts the target value by taking a weighted average based on a kernel. It is implemented asa `NICKernelRegressor` with different prior values. Parameters __________ metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. missing_label : scalar, string, np.nan, or None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, metric="rbf", metric_dict=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label, metric=metric, metric_dict=metric_dict, kappa_0=0, nu_0=3, sigma_sq_0=1, )
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/regressor/_nic_kernel_regressor.py
_nic_kernel_regressor.py
import numpy as np from scipy.stats import t from sklearn.metrics.pairwise import pairwise_kernels, KERNEL_PARAMS from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from skactiveml.base import ProbabilisticRegressor from skactiveml.utils import ( is_labeled, MISSING_LABEL, check_scalar, check_type, ) class NICKernelRegressor(ProbabilisticRegressor): """NICKernelRegressor The NICKernelRegressor (Normal inverse chi kernel regressor) locally fits a t-distribution using the training data, weighting the samples by a kernel. Parameters __________ metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. mu_0 : int or float, optional (default=0) The prior mean. kappa_0 : int or float, optional (default=0.1) The weight of the prior mean. sigma_sq_0: int or float, optional (default=1.0) The prior variance. nu_0 : int or float, optional (default=2.5) The weight of the prior variance. missing_label : scalar, string, np.nan, or None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ METRICS = list(KERNEL_PARAMS.keys()) + ["precomputed"] def __init__( self, metric="rbf", metric_dict=None, mu_0=0, kappa_0=0.1, sigma_sq_0=1.0, nu_0=2.5, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label ) self.kappa_0 = kappa_0 self.nu_0 = nu_0 self.mu_0 = mu_0 self.sigma_sq_0 = sigma_sq_0 self.metric = metric self.metric_dict = metric_dict def fit(self, X, y, sample_weight=None): """Fit the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) Training data set, usually complete, i.e. including the labeled and unlabeled samples. y : array-like, shape (n_samples) or (n_samples, n_targets) Labels of the training data set (possibly including unlabeled ones indicated by `self.missing_label`). sample_weight : array-like, shape (n_samples) It contains the weights of the training samples' values. Returns ------- self: SkactivemlRegressor, The SkactivemlRegressor is fitted on the training data. """ X, y, sample_weight = self._validate_data(X, y, sample_weight) is_lbld = is_labeled(y, missing_label=self.missing_label_) for (value, name) in [ (self.kappa_0, "self.kappa_0"), (self.nu_0, "self.nu_0"), (self.sigma_sq_0, "self.sigma_sq_0"), ]: check_scalar(value, name, (int, float), min_val=0) check_scalar(self.mu_0, "self.mu_0", (int, float)) self.X_ = X[is_lbld] self.y_ = y[is_lbld] self.prior_params_ = ( self.kappa_0, self.nu_0, self.mu_0, self.sigma_sq_0, ) if sample_weight is not None: self.weights_ = sample_weight[is_lbld] if np.sum(self.weights_) == 0: raise ValueError( "The sample weights of the labeled samples " "must not be all zero." ) else: self.weights_ = None check_type(self.metric, "self.metric", target_vals=self.METRICS) self.metric_dict = {} if self.metric_dict is None else self.metric_dict check_type( self.metric_dict, "self.metric_dict", dict, target_vals=[None] ) return self def _estimate_ml_params(self, X): K = pairwise_kernels( X, self.X_, metric=self.metric, **self.metric_dict ) if self.weights_ is not None: K = self.weights_.reshape(1, -1) * K N = np.sum(K, axis=1) mu_ml = K @ self.y_ / N scatter = np.sum( K * (self.y_[np.newaxis, :] - mu_ml[:, np.newaxis]) ** 2, axis=1 ) var_ml = 1 / N * scatter return N, mu_ml, var_ml def _estimate_update_params(self, X): if len(self.X_) != 0: N, mu_ml, var_ml = self._estimate_ml_params(X) update_params = (N, N, mu_ml, var_ml) return update_params else: neutral_params = (np.zeros(len(X)),) * 4 return neutral_params def predict_target_distribution(self, X): """Returns the estimated target distribution conditioned on the test samples `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution of the targets at the test samples. """ check_is_fitted(self) X = check_array(X) prior_params = self.prior_params_ update_params = self._estimate_update_params(X) post_params = _combine_params(prior_params, update_params) kappa_post, nu_post, mu_post, sigma_sq_post = post_params df = nu_post loc = mu_post scale = np.sqrt((1 + kappa_post) / kappa_post * sigma_sq_post) return t(df=df, loc=loc, scale=scale) def _combine_params(prior_params, update_params): kappa_1, nu_1, mu_1, sigma_sq_1 = prior_params kappa_2, nu_2, mu_2, sigma_sq_2 = update_params kappa_com = kappa_1 + kappa_2 nu_com = nu_1 + nu_2 mu_com = (kappa_1 * mu_1 + kappa_2 * mu_2) / kappa_com scatter_com = ( nu_1 * sigma_sq_1 + nu_2 * sigma_sq_2 + kappa_1 * kappa_2 * (mu_1 - mu_2) ** 2 / kappa_com ) sigma_sq_com = scatter_com / nu_com return kappa_com, nu_com, mu_com, sigma_sq_com class NadarayaWatsonRegressor(NICKernelRegressor): """NadarayaWatsonRegressor The Nadaraya Watson Regressor predicts the target value by taking a weighted average based on a kernel. It is implemented asa `NICKernelRegressor` with different prior values. Parameters __________ metric : str or callable, default='rbf' The metric must a be a valid kernel defined by the function `sklearn.metrics.pairwise.pairwise_kernels`. metric_dict : dict, optional (default=None) Any further parameters are passed directly to the kernel function. missing_label : scalar, string, np.nan, or None, default=np.nan Value to represent a missing label. random_state : int, RandomState instance or None, optional (default=None) Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, metric="rbf", metric_dict=None, missing_label=MISSING_LABEL, random_state=None, ): super().__init__( random_state=random_state, missing_label=missing_label, metric=metric, metric_dict=metric_dict, kappa_0=0, nu_0=3, sigma_sq_0=1, )
0.935051
0.671834
import inspect import warnings from copy import deepcopy from operator import attrgetter import numpy as np from scipy.stats import norm from sklearn.base import MetaEstimatorMixin, is_regressor from sklearn.exceptions import NotFittedError from sklearn.utils import metaestimators from sklearn.utils.validation import ( has_fit_parameter, check_array, check_is_fitted, ) from skactiveml.base import SkactivemlRegressor, ProbabilisticRegressor from skactiveml.utils._functions import _available_if from skactiveml.utils._label import is_labeled, MISSING_LABEL class SklearnRegressor(SkactivemlRegressor, MetaEstimatorMixin): """SklearnRegressor Implementation of a wrapper class for scikit-learn regressors such that missing labels can be handled. Therefore, samples with missing values are filtered. Parameters ---------- estimator : sklearn.base.RegressorMixin with predict method scikit-learn regressor. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, estimator, missing_label=MISSING_LABEL, random_state=None ): super().__init__( random_state=random_state, missing_label=missing_label ) self.estimator = estimator def fit(self, X, y, sample_weight=None, **fit_kwargs): """Fit the model using X as training data and y as labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) It contains the values of the training samples. Missing labels are represented as 'np.nan'. sample_weight : array-like, shape (n_samples), optional (default=None) It contains the weights of the training samples´ labels. It must have the same shape as y. fit_kwargs : dict-like Further parameters are passed as input to the 'fit' method of the 'estimator'. Returns ------- self: SklearnRegressor, The SklearnRegressor is fitted on the training data. """ return self._fit( fit_function="fit", X=X, y=y, sample_weight=sample_weight, **fit_kwargs, ) @_available_if("partial_fit", hasattr(metaestimators, "available_if")) def partial_fit(self, X, y, sample_weight=None, **fit_kwargs): """Partially fitting the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the numeric labels of the training samples. Missing labels are represented the attribute 'missing_label'. In case of multiple labels per sample (i.e., n_outputs > 1), the samples are duplicated. sample_weight : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the weights of the training samples' numeric labels. It must have the same shape as y. fit_kwargs : dict-like Further parameters as input to the 'fit' method of the 'estimator'. Returns ------- self : SklearnRegressor, The SklearnRegressor is fitted on the training data. """ return self._fit( fit_function="partial_fit", X=X, y=y, sample_weight=sample_weight, **fit_kwargs, ) def _fit(self, fit_function, X, y, sample_weight, **fit_kwargs): if not is_regressor(estimator=self.estimator): raise TypeError( "'{}' must be a scikit-learn " "regressor.".format(self.estimator) ) self.estimator_ = deepcopy(self.estimator) self._label_mean = 0 self._label_std = 1 self.check_X_dict_ = { "ensure_min_samples": 0, "ensure_min_features": 0, "allow_nd": True, "dtype": None, } X, y, sample_weight = self._validate_data( X, y, sample_weight, check_X_dict=self.check_X_dict_ ) is_lbld = is_labeled(y, missing_label=self.missing_label_) X_labeled = X[is_lbld] y_labeled = y[is_lbld] estimator_params = dict(fit_kwargs) if fit_kwargs is not None else {} if ( has_fit_parameter(self.estimator_, "sample_weight") and sample_weight is not None ): sample_weight_labeled = sample_weight[is_lbld] estimator_params["sample_weight"] = sample_weight_labeled if np.sum(is_lbld) != 0: self._label_mean = np.mean(y[is_lbld]) self._label_std = np.std(y[is_lbld]) if np.sum(is_lbld) > 1 else 1 try: attrgetter(fit_function)(self.estimator_)( X_labeled, y_labeled, **estimator_params ) except Exception as e: warnings.warn( f"The 'estimator' could not be fitted because of" f" '{e}'. Therefore, the empirical label mean " f"`_label_mean={self._label_mean}` and the " f"empirical label standard deviation " f"`_label_std={self._label_std}` will be used to make " f"predictions." ) return self def predict(self, X, **predict_kwargs): """Return label predictions for the input data X. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. predict_kwargs : dict-like Further parameters are passed as input to the 'predict' method of the 'estimator'. If the estimator could not be fitted, only `return_std` is supported as keyword argument. Returns ------- y : array-like, shape (n_samples) Predicted labels of the input samples. """ check_is_fitted(self) X = check_array(X, **self.check_X_dict_) self._check_n_features(X, reset=False) try: return self.estimator_.predict(X, **predict_kwargs) except NotFittedError: warnings.warn( f"Since the 'estimator' could not be fitted when" f" calling the `fit` method, the label " f"mean `_label_mean={self._label_mean}` and optionally the " f"label standard deviation `_label_std={self._label_std}` is " f"used to make the predictions." ) has_std = predict_kwargs.pop("return_std", False) if has_std: return ( np.full(len(X), self._label_mean), np.full(len(X), self._label_std), ) else: return np.full(len(X), self._label_mean) @_available_if( ("sample_y", "sample"), hasattr(metaestimators, "available_if") ) def sample_y(self, X, n_samples=1, random_state=None): """Assumes a probabilistic regressor. Samples are drawn from a predicted target distribution. Parameters ---------- X : array-like, shape (n_samples_X, n_features) Input samples, where the target values are drawn from. n_samples: int, optional (default=1) Number of random samples to be drawn. random_state : int, RandomState instance or None, optional (default=None) Determines random number generation to randomly draw samples. Pass an int for reproducible results across multiple method calls. Returns ------- y_samples : ndarray of shape (n_samples_X, n_samples) Drawn random target samples. """ check_is_fitted(self) if hasattr(self.estimator_, "sample_y"): return self.estimator_.sample_y(X, n_samples, random_state) else: return self.estimator_.sample(X, n_samples) def __sklearn_is_fitted__(self): return hasattr(self, "_label_mean") def __getattr__(self, item): if "estimator_" in self.__dict__: return getattr(self.estimator_, item) else: return getattr(self.estimator, item) class SklearnNormalRegressor(ProbabilisticRegressor, SklearnRegressor): """SklearnNormalRegressor Implementation of a wrapper class for scikit-learn probabilistic regressors such that missing labels can be handled and the target distribution can be estimated. Therefore, samples with missing values are filtered and a normal distribution is fitted to the predicted standard deviation. The wrapped regressor of sklearn needs `return_std` as a key_word argument for `predict`. Parameters ---------- estimator : sklearn.base.RegressorMixin with predict method scikit-learn regressor. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, estimator, missing_label=MISSING_LABEL, random_state=None ): super().__init__( estimator, missing_label=missing_label, random_state=random_state ) def predict_target_distribution(self, X): """Returns the estimated target normal distribution conditioned on the test samples `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution of the targets at the test samples. """ check_is_fitted(self) if ( "return_std" not in inspect.signature(self.estimator.predict).parameters.keys() ): raise ValueError( f"`{self.estimator}` must have key_word argument" f"`return_std` for predict." ) X = check_array(X) loc, scale = SklearnRegressor.predict(self, X, return_std=True) return norm(loc=loc, scale=scale)
scikit-activeml
/scikit_activeml-0.4.1-py3-none-any.whl/skactiveml/regressor/_wrapper.py
_wrapper.py
import inspect import warnings from copy import deepcopy from operator import attrgetter import numpy as np from scipy.stats import norm from sklearn.base import MetaEstimatorMixin, is_regressor from sklearn.exceptions import NotFittedError from sklearn.utils import metaestimators from sklearn.utils.validation import ( has_fit_parameter, check_array, check_is_fitted, ) from skactiveml.base import SkactivemlRegressor, ProbabilisticRegressor from skactiveml.utils._functions import _available_if from skactiveml.utils._label import is_labeled, MISSING_LABEL class SklearnRegressor(SkactivemlRegressor, MetaEstimatorMixin): """SklearnRegressor Implementation of a wrapper class for scikit-learn regressors such that missing labels can be handled. Therefore, samples with missing values are filtered. Parameters ---------- estimator : sklearn.base.RegressorMixin with predict method scikit-learn regressor. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, estimator, missing_label=MISSING_LABEL, random_state=None ): super().__init__( random_state=random_state, missing_label=missing_label ) self.estimator = estimator def fit(self, X, y, sample_weight=None, **fit_kwargs): """Fit the model using X as training data and y as labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) It contains the values of the training samples. Missing labels are represented as 'np.nan'. sample_weight : array-like, shape (n_samples), optional (default=None) It contains the weights of the training samples´ labels. It must have the same shape as y. fit_kwargs : dict-like Further parameters are passed as input to the 'fit' method of the 'estimator'. Returns ------- self: SklearnRegressor, The SklearnRegressor is fitted on the training data. """ return self._fit( fit_function="fit", X=X, y=y, sample_weight=sample_weight, **fit_kwargs, ) @_available_if("partial_fit", hasattr(metaestimators, "available_if")) def partial_fit(self, X, y, sample_weight=None, **fit_kwargs): """Partially fitting the model using X as training data and y as class labels. Parameters ---------- X : matrix-like, shape (n_samples, n_features) The sample matrix X is the feature matrix representing the samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the numeric labels of the training samples. Missing labels are represented the attribute 'missing_label'. In case of multiple labels per sample (i.e., n_outputs > 1), the samples are duplicated. sample_weight : array-like, shape (n_samples) or (n_samples, n_outputs) It contains the weights of the training samples' numeric labels. It must have the same shape as y. fit_kwargs : dict-like Further parameters as input to the 'fit' method of the 'estimator'. Returns ------- self : SklearnRegressor, The SklearnRegressor is fitted on the training data. """ return self._fit( fit_function="partial_fit", X=X, y=y, sample_weight=sample_weight, **fit_kwargs, ) def _fit(self, fit_function, X, y, sample_weight, **fit_kwargs): if not is_regressor(estimator=self.estimator): raise TypeError( "'{}' must be a scikit-learn " "regressor.".format(self.estimator) ) self.estimator_ = deepcopy(self.estimator) self._label_mean = 0 self._label_std = 1 self.check_X_dict_ = { "ensure_min_samples": 0, "ensure_min_features": 0, "allow_nd": True, "dtype": None, } X, y, sample_weight = self._validate_data( X, y, sample_weight, check_X_dict=self.check_X_dict_ ) is_lbld = is_labeled(y, missing_label=self.missing_label_) X_labeled = X[is_lbld] y_labeled = y[is_lbld] estimator_params = dict(fit_kwargs) if fit_kwargs is not None else {} if ( has_fit_parameter(self.estimator_, "sample_weight") and sample_weight is not None ): sample_weight_labeled = sample_weight[is_lbld] estimator_params["sample_weight"] = sample_weight_labeled if np.sum(is_lbld) != 0: self._label_mean = np.mean(y[is_lbld]) self._label_std = np.std(y[is_lbld]) if np.sum(is_lbld) > 1 else 1 try: attrgetter(fit_function)(self.estimator_)( X_labeled, y_labeled, **estimator_params ) except Exception as e: warnings.warn( f"The 'estimator' could not be fitted because of" f" '{e}'. Therefore, the empirical label mean " f"`_label_mean={self._label_mean}` and the " f"empirical label standard deviation " f"`_label_std={self._label_std}` will be used to make " f"predictions." ) return self def predict(self, X, **predict_kwargs): """Return label predictions for the input data X. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. predict_kwargs : dict-like Further parameters are passed as input to the 'predict' method of the 'estimator'. If the estimator could not be fitted, only `return_std` is supported as keyword argument. Returns ------- y : array-like, shape (n_samples) Predicted labels of the input samples. """ check_is_fitted(self) X = check_array(X, **self.check_X_dict_) self._check_n_features(X, reset=False) try: return self.estimator_.predict(X, **predict_kwargs) except NotFittedError: warnings.warn( f"Since the 'estimator' could not be fitted when" f" calling the `fit` method, the label " f"mean `_label_mean={self._label_mean}` and optionally the " f"label standard deviation `_label_std={self._label_std}` is " f"used to make the predictions." ) has_std = predict_kwargs.pop("return_std", False) if has_std: return ( np.full(len(X), self._label_mean), np.full(len(X), self._label_std), ) else: return np.full(len(X), self._label_mean) @_available_if( ("sample_y", "sample"), hasattr(metaestimators, "available_if") ) def sample_y(self, X, n_samples=1, random_state=None): """Assumes a probabilistic regressor. Samples are drawn from a predicted target distribution. Parameters ---------- X : array-like, shape (n_samples_X, n_features) Input samples, where the target values are drawn from. n_samples: int, optional (default=1) Number of random samples to be drawn. random_state : int, RandomState instance or None, optional (default=None) Determines random number generation to randomly draw samples. Pass an int for reproducible results across multiple method calls. Returns ------- y_samples : ndarray of shape (n_samples_X, n_samples) Drawn random target samples. """ check_is_fitted(self) if hasattr(self.estimator_, "sample_y"): return self.estimator_.sample_y(X, n_samples, random_state) else: return self.estimator_.sample(X, n_samples) def __sklearn_is_fitted__(self): return hasattr(self, "_label_mean") def __getattr__(self, item): if "estimator_" in self.__dict__: return getattr(self.estimator_, item) else: return getattr(self.estimator, item) class SklearnNormalRegressor(ProbabilisticRegressor, SklearnRegressor): """SklearnNormalRegressor Implementation of a wrapper class for scikit-learn probabilistic regressors such that missing labels can be handled and the target distribution can be estimated. Therefore, samples with missing values are filtered and a normal distribution is fitted to the predicted standard deviation. The wrapped regressor of sklearn needs `return_std` as a key_word argument for `predict`. Parameters ---------- estimator : sklearn.base.RegressorMixin with predict method scikit-learn regressor. missing_label : scalar or string or np.nan or None, default=np.nan Value to represent a missing label. random_state : int or RandomState instance or None, default=None Determines random number for 'predict' method. Pass an int for reproducible results across multiple method calls. """ def __init__( self, estimator, missing_label=MISSING_LABEL, random_state=None ): super().__init__( estimator, missing_label=missing_label, random_state=random_state ) def predict_target_distribution(self, X): """Returns the estimated target normal distribution conditioned on the test samples `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input samples. Returns ------- dist : scipy.stats._distn_infrastructure.rv_frozen The distribution of the targets at the test samples. """ check_is_fitted(self) if ( "return_std" not in inspect.signature(self.estimator.predict).parameters.keys() ): raise ValueError( f"`{self.estimator}` must have key_word argument" f"`return_std` for predict." ) X = check_array(X) loc, scale = SklearnRegressor.predict(self, X, return_std=True) return norm(loc=loc, scale=scale)
0.89772
0.584093
scikit-aero =========== :Name: scikit-aero :Website: https://github.com/Juanlu001/scikit-aero :Author: Juan Luis Cano <[email protected]> :Version: 0.1 scikit-aero is a Python package for various aeronautical engineering calculations. It is based on several existing Python packages on the field, but intends to provide pythonic syntax, use of SI units and full NumPy arrays support among other things. scikit-aero is licensed under the BSD license. It was started by Juan Luis Cano in 2012 and it is currently developed and maintained by him. The source code and issue tracker are both hosted on GitHub https://github.com/Juanlu001/scikit-aero **Notice**: This package is under heavy development and the API might change at any time until a 1.0 version is reached. It is stable but not feaure complete yet, and it might contain bugs. Features -------- * Pythonic interface. * Use of SI units. * Full support of NumPy arrays. * Support for both Python 2 and 3. * Fully tested and documented. * Standard atmosphere properties up to 11 kilometers (troposphere). * Gas dynamics calculations. Future ------ * Full COESA model. * Airspeed conversions. * Coordinate systems. * Most of the PDAS. Usage ===== Atmosphere properties:: >>> from skaero.atmosphere import coesa >>> h, T, p, rho = coesa.table(1000) # Altitude by default, 1 km Inverse computations allowed with density and pressure, which are monotonic:: >>> h, T, p, rho = coesa.table(p=101325) # Pressure of 1 atm Gas dynamics calculations:: >>> from skaero.gasdynamics import isentropic, shocks >>> fl = isentropic.IsentropicFlow(gamma=1.4) >>> p = 101325 * fl.p_p0(M=0.8) # Static pressure given total pressure of 1 atm >>> ns = shocks.NormalShock(M_1=2.5, gamma=1.4) >>> M_2 = ns.M_2 # Mach number behind a normal shock wave Dependencies ============ This package depends on Python, NumPy and SciPy and is usually tested on Linux with the following versions: * Python 2.7, NumPy 1.6, SciPy 0.11 * Python 3.3, NumPy 1.7.0b2, SciPy 0.11.0 but there is no reason it shouldn't work on Windows or Mac OS X. If you are willing to provide testing on this platforms, please `contact me <mailto:[email protected]>`_ and if you find any bugs file them on the `issue tracker`_. Install ======= This package uses distutils. To install, execute as usual:: $ python setup.py install It is recommended that you **never ever use sudo** with distutils, pip, setuptools and friends in Linux because you might seriously break your system [1_][2_][3_][4_]. I recommend using `virtualenv`_, `per user directories`_ or `local installations`_. .. _1: http://wiki.python.org/moin/CheeseShopTutorial#Distutils_Installation .. _2: http://stackoverflow.com/questions/4314376/how-can-i-install-a-python-egg-file/4314446#comment4690673_4314446 .. _3: http://workaround.org/easy-install-debian .. _4: http://matplotlib.1069221.n5.nabble.com/Why-is-pip-not-mentioned-in-the-Installation-Documentation-tp39779p39812.html .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _`per user directories`: http://stackoverflow.com/a/7143496/554319 .. _`local installations`: http://stackoverflow.com/a/4325047/554319 Testing ======= scikit-aero recommends py.test for running the test suite. Running from the top directory:: $ py.test Bug reporting ============= I am pretty sure I never introduce bugs in my code, but if you want to prove me wrong please refer to the `issue tracker`_ on GitHub. .. _`issue tracker`: https://github.com/Juanlu001/scikit-aero/issues Citing ====== If you use scikit-aero on your project, please `drop me a line <mailto:[email protected]>`_. License ======= scikit-aero is released under a 2-clause BSD license, hence allowing commercial use of the library. Please refer to the COPYING file. See also ======== * `AeroCalc`_, package written by Kevin Horton which inspired scikit-aero. * `MATLAB Aerospace Toolbox`_, * `PDAS`_, the Public Domain Aeronautical Software. .. _Aerocalc: http://pypi.python.org/pypi/AeroCalc/0.11 .. _`MATLAB Aerospace Toolbox`: http://www.mathworks.com/help/aerotbx/index.html .. _PDAS: http://www.pdas.com/index.html
scikit-aero
/scikit-aero-v0.1.0.tar.gz/Pybonacci-scikit-aero-cc233f6/README.rst
README.rst
scikit-aero =========== :Name: scikit-aero :Website: https://github.com/Juanlu001/scikit-aero :Author: Juan Luis Cano <[email protected]> :Version: 0.1 scikit-aero is a Python package for various aeronautical engineering calculations. It is based on several existing Python packages on the field, but intends to provide pythonic syntax, use of SI units and full NumPy arrays support among other things. scikit-aero is licensed under the BSD license. It was started by Juan Luis Cano in 2012 and it is currently developed and maintained by him. The source code and issue tracker are both hosted on GitHub https://github.com/Juanlu001/scikit-aero **Notice**: This package is under heavy development and the API might change at any time until a 1.0 version is reached. It is stable but not feaure complete yet, and it might contain bugs. Features -------- * Pythonic interface. * Use of SI units. * Full support of NumPy arrays. * Support for both Python 2 and 3. * Fully tested and documented. * Standard atmosphere properties up to 11 kilometers (troposphere). * Gas dynamics calculations. Future ------ * Full COESA model. * Airspeed conversions. * Coordinate systems. * Most of the PDAS. Usage ===== Atmosphere properties:: >>> from skaero.atmosphere import coesa >>> h, T, p, rho = coesa.table(1000) # Altitude by default, 1 km Inverse computations allowed with density and pressure, which are monotonic:: >>> h, T, p, rho = coesa.table(p=101325) # Pressure of 1 atm Gas dynamics calculations:: >>> from skaero.gasdynamics import isentropic, shocks >>> fl = isentropic.IsentropicFlow(gamma=1.4) >>> p = 101325 * fl.p_p0(M=0.8) # Static pressure given total pressure of 1 atm >>> ns = shocks.NormalShock(M_1=2.5, gamma=1.4) >>> M_2 = ns.M_2 # Mach number behind a normal shock wave Dependencies ============ This package depends on Python, NumPy and SciPy and is usually tested on Linux with the following versions: * Python 2.7, NumPy 1.6, SciPy 0.11 * Python 3.3, NumPy 1.7.0b2, SciPy 0.11.0 but there is no reason it shouldn't work on Windows or Mac OS X. If you are willing to provide testing on this platforms, please `contact me <mailto:[email protected]>`_ and if you find any bugs file them on the `issue tracker`_. Install ======= This package uses distutils. To install, execute as usual:: $ python setup.py install It is recommended that you **never ever use sudo** with distutils, pip, setuptools and friends in Linux because you might seriously break your system [1_][2_][3_][4_]. I recommend using `virtualenv`_, `per user directories`_ or `local installations`_. .. _1: http://wiki.python.org/moin/CheeseShopTutorial#Distutils_Installation .. _2: http://stackoverflow.com/questions/4314376/how-can-i-install-a-python-egg-file/4314446#comment4690673_4314446 .. _3: http://workaround.org/easy-install-debian .. _4: http://matplotlib.1069221.n5.nabble.com/Why-is-pip-not-mentioned-in-the-Installation-Documentation-tp39779p39812.html .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _`per user directories`: http://stackoverflow.com/a/7143496/554319 .. _`local installations`: http://stackoverflow.com/a/4325047/554319 Testing ======= scikit-aero recommends py.test for running the test suite. Running from the top directory:: $ py.test Bug reporting ============= I am pretty sure I never introduce bugs in my code, but if you want to prove me wrong please refer to the `issue tracker`_ on GitHub. .. _`issue tracker`: https://github.com/Juanlu001/scikit-aero/issues Citing ====== If you use scikit-aero on your project, please `drop me a line <mailto:[email protected]>`_. License ======= scikit-aero is released under a 2-clause BSD license, hence allowing commercial use of the library. Please refer to the COPYING file. See also ======== * `AeroCalc`_, package written by Kevin Horton which inspired scikit-aero. * `MATLAB Aerospace Toolbox`_, * `PDAS`_, the Public Domain Aeronautical Software. .. _Aerocalc: http://pypi.python.org/pypi/AeroCalc/0.11 .. _`MATLAB Aerospace Toolbox`: http://www.mathworks.com/help/aerotbx/index.html .. _PDAS: http://www.pdas.com/index.html
0.678327
0.673101
from __future__ import division import numpy as np import scipy as sp import scipy.optimize def mach_from_area_ratio(fl, A_ratio): """Computes the Mach number given an area ratio asuming isentropic flow. Uses the relation between Mach number and area ratio for isentropic flow, and returns both the subsonic and the supersonic solution. Parameters ---------- fl : IsentropicFlow Isentropic flow object. A_ratio : float Cross sectional area. Returns ------- out : tuple of floats Subsonic and supersonic Mach number solution of the equation. Raises ------ ValueError If the area ratio is less than 1.0 (the critical area is always the minimum). """ def eq(M, fl, A_ratio): result = fl.A_Astar(M) - A_ratio return result if A_ratio < 1.0: raise ValueError("Area ratio must be greater than 1.") elif A_ratio == 1.0: M_sub = M_sup = 1.0 else: M_sub = sp.optimize.bisect(eq, 0.0, 1.0, args=(fl, A_ratio)) M_sup = sp.optimize.newton(eq, 2.0, args=(fl, A_ratio)) return M_sub, M_sup class IsentropicFlow(object): """Class representing an isentropic flow. Parameters ---------- gamma : float, optional Specific heat ratio, default 7 / 5. """ def __init__(self, gamma=1.4): self.gamma = gamma def T_T0(self, M): """Temperature ratio from Mach number. Static tempeature divided by stagnation temperature at the point with given Mach number. Arguments --------- M : array_like Mach number. Returns ------- T_T0 : array_like Temperature ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") T_T0 = 1 / (1 + (self.gamma - 1) * M_ * M_ / 2) return T_T0 def p_p0(self, M): """Pressure ratio from Mach number. Static pressure divided by stagnation pressure at the point with given Mach number. Arguments --------- M : array_like Mach number. Returns ------- p_p0 : array_like Pressure ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") p_p0 = ( (1 + (self.gamma - 1) * M_ * M_ / 2) ** (self.gamma / (1 - self.gamma)) ) return p_p0 def A_Astar(self, M): """Area ratio from Mach number. Duct area divided by critial area given Mach number. Arguments --------- M : array_like Mach number. Returns ------- A_Astar : array_like Area ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") # If there is any zero entry, NumPy array division gives infnity, # which is correct. A_Astar = ( (2 * (1 + (self.gamma - 1) * M_ * M_ / 2) / (self.gamma + 1)) ** ((self.gamma + 1) / (2 * (self.gamma - 1))) / M ) return A_Astar
scikit-aero
/scikit-aero-v0.1.0.tar.gz/Pybonacci-scikit-aero-cc233f6/skaero/gasdynamics/isentropic.py
isentropic.py
from __future__ import division import numpy as np import scipy as sp import scipy.optimize def mach_from_area_ratio(fl, A_ratio): """Computes the Mach number given an area ratio asuming isentropic flow. Uses the relation between Mach number and area ratio for isentropic flow, and returns both the subsonic and the supersonic solution. Parameters ---------- fl : IsentropicFlow Isentropic flow object. A_ratio : float Cross sectional area. Returns ------- out : tuple of floats Subsonic and supersonic Mach number solution of the equation. Raises ------ ValueError If the area ratio is less than 1.0 (the critical area is always the minimum). """ def eq(M, fl, A_ratio): result = fl.A_Astar(M) - A_ratio return result if A_ratio < 1.0: raise ValueError("Area ratio must be greater than 1.") elif A_ratio == 1.0: M_sub = M_sup = 1.0 else: M_sub = sp.optimize.bisect(eq, 0.0, 1.0, args=(fl, A_ratio)) M_sup = sp.optimize.newton(eq, 2.0, args=(fl, A_ratio)) return M_sub, M_sup class IsentropicFlow(object): """Class representing an isentropic flow. Parameters ---------- gamma : float, optional Specific heat ratio, default 7 / 5. """ def __init__(self, gamma=1.4): self.gamma = gamma def T_T0(self, M): """Temperature ratio from Mach number. Static tempeature divided by stagnation temperature at the point with given Mach number. Arguments --------- M : array_like Mach number. Returns ------- T_T0 : array_like Temperature ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") T_T0 = 1 / (1 + (self.gamma - 1) * M_ * M_ / 2) return T_T0 def p_p0(self, M): """Pressure ratio from Mach number. Static pressure divided by stagnation pressure at the point with given Mach number. Arguments --------- M : array_like Mach number. Returns ------- p_p0 : array_like Pressure ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") p_p0 = ( (1 + (self.gamma - 1) * M_ * M_ / 2) ** (self.gamma / (1 - self.gamma)) ) return p_p0 def A_Astar(self, M): """Area ratio from Mach number. Duct area divided by critial area given Mach number. Arguments --------- M : array_like Mach number. Returns ------- A_Astar : array_like Area ratio. """ M_ = np.asanyarray(M) if np.any(M_ < 0.0): raise ValueError("Mach number must be positive.") # If there is any zero entry, NumPy array division gives infnity, # which is correct. A_Astar = ( (2 * (1 + (self.gamma - 1) * M_ * M_ / 2) / (self.gamma + 1)) ** ((self.gamma + 1) / (2 * (self.gamma - 1))) / M ) return A_Astar
0.956074
0.684658
from contextlib import contextmanager from functools import update_wrapper import atexit import os import warnings import numpy as np @contextmanager def ignore_invalid(): err = np.seterr(invalid='ignore') try: yield finally: np.seterr(**err) def check_array_like(a, *ndims, **kwargs): if not hasattr(a, 'ndim'): cls = kwargs.pop('default', np.asarray) a = cls(a, **kwargs) if a.ndim not in ndims: raise ValueError('invalid number of dimensions: %s' % a.ndim) def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a def check_ndim(a, ndim): if a.ndim != ndim: raise TypeError('bad number of dimensions: expected %s; found %s' % (ndim, a.ndim)) def check_shape(a, shape): if a.shape != shape: raise TypeError('bad shape: expected %s; found %s' % (shape, a.shape)) def check_dtype(a, *dtypes): dtypes = [np.dtype(t) for t in dtypes] if a.dtype not in dtypes: raise TypeError('bad dtype: expected on of %s; found %s' % (dtypes, a.dtype)) def check_dtype_kind(a, *kinds): if a.dtype.kind not in kinds: raise TypeError('bad dtype kind: expected on of %s; found %s' % (kinds, a.dtype.kind)) def check_integer_dtype(a): check_dtype_kind(a, 'u', 'i') def check_dim0_aligned(*arrays): check_dim_aligned(0, *arrays) def check_dim1_aligned(*arrays): check_dim_aligned(1, *arrays) def check_dim_aligned(dim, *arrays): a = arrays[0] for b in arrays[1:]: if b.shape[dim] != a.shape[dim]: raise ValueError( 'arrays do not have matching length for dimension %s' % dim ) def check_same_ndim(*arrays): a = arrays[0] for b in arrays[1:]: if len(b.shape) != len(a.shape): raise ValueError( 'arrays do not have same number of dimensions' ) def check_equal_length(a, *others): expected_length = len(a) for b in others: if len(b) != expected_length: raise ValueError('sequences do not have matching length') def resize_dim1(a, s, fill=0): if a.shape[1] < s: newshape = a.shape[0], s b = np.zeros(newshape, dtype=a.dtype) if fill != 0: b.fill(fill) b[:, :a.shape[1]] = a return b else: return a def ensure_dim1_aligned(*arrays, **kwargs): fill = kwargs.get('fill', 0) dim1_length = max(a.shape[1] for a in arrays) arrays = [resize_dim1(a, dim1_length, fill=fill) for a in arrays] return arrays def ensure_square(dist): from scipy.spatial.distance import squareform dist = asarray_ndim(dist, 1, 2) if dist.ndim == 1: dist = squareform(dist) else: if dist.shape[0] != dist.shape[1]: raise ValueError('distance matrix is not square') return dist def mask_inaccessible(is_accessible, pos, *arrays): """ This function returns a tuple (positions, *arrays) in which positions that are not accessible are removed from the positions and the *arrays. Parameters ---------- is_accessible : array_like, bool, shape (len(contig),) Boolean array indicating accessibility status for all positions in the chromosome/contig. pos : array_like, int, shape (n_variants,) Variant positions, using 1-based coordinates, in ascending order. array1, array2, ... : array_like N-dimensional array objects with n_variants elements in the 1D. Returns ------- pos : array_like, int, shape (n_items,) Positions array consisting exclusively of accessible sites in the original positions array. array1, array2, ... : array_like N-dimensional array objects with n_variants elements in the 1D but now consisting exclusively of accessible sites in the original arrays. """ if is_accessible is not None: # sanity check if np.max(pos) > len(is_accessible): raise ValueError( 'Not all positions are covered by is_accessible.' ) # check array shapes check_dim0_aligned(pos, *arrays) loc_accessible = is_accessible[pos-1] if np.any(np.logical_not(loc_accessible)): warnings.warn("Some variants were inaccessible and hence masked.") arrays = tuple(a[loc_accessible] for a in arrays) pos = pos[loc_accessible] return (pos,) + arrays class _HashedSeq(list): __slots__ = 'hashvalue' # noinspection PyShadowingBuiltins,PyMissingConstructor def __init__(self, tup, hash=hash): self[:] = tup self.hashvalue = hash(tup) def __hash__(self): return self.hashvalue # noinspection PyShadowingBuiltins def _make_key(args, kwds, typed, kwd_mark=('__kwargs__',), fasttypes=(int, str, frozenset, type(None)), sorted=sorted, tuple=tuple, type=type, len=len): key = args kwd_items = sorted(kwds.items()) if kwds: key += kwd_mark for item in kwd_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for _, v in kwd_items) else: key = args if len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) def _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs): import h5py # open the HDF5 file with h5py.File(filepath, mode='a') as h5f: # find parent group if parent is None: # use root group h5g_parent = h5f else: h5g_parent = h5f.require_group(parent) # find cache container group h5g_container = h5g_parent.require_group(container) # find cache group h5g = h5g_container.require_group(key) # call user function and (re)build cache if no_cache or '__success__' not in h5g.attrs: # reset success mark if present if '__success__' in h5g.attrs: del h5g.attrs['__success__'] # compute result result = user_function(*args, **kwargs) # handle tuple of return values if isinstance(result, tuple): # determine dataset names if names is None: names = ['f%02d' % i for i in range(len(result))] elif len(names) < len(result): names = list(names) + ['f%02d' % i for i in range(len(names), len(result))] # save data for n, r in zip(names, result): if n in h5g: del h5g[n] if np.isscalar(r): h5g.create_dataset(n, data=r) else: h5g.create_dataset(n, data=r, **h5dcreate_kwargs) # handle single return value else: # determine dataset name if names is None: n = 'data' elif isinstance(names, str): n = names elif len(names) > 0: n = names[0] else: n = 'data' # save data if n in h5g: del h5g[n] if np.isscalar(result): h5g.create_dataset(n, data=result) else: h5g.create_dataset(n, data=result, **h5dcreate_kwargs) # mark success h5g.attrs['__success__'] = True # load from cache else: # determine dataset names if names is None: names = sorted(h5g.keys()) elif isinstance(names, str): names = (names,) # load result from cache if len(names) == 1: result = h5g[names[0]] result = result[:] if len(result.shape) > 0 else result[()] else: result = tuple(h5g[n] for n in names) result = tuple(r[:] if len(r.shape) > 0 else r[()] for r in result) return result def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): """HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9) """ # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator def contains_newaxis(item): if item is None: return True elif item is np.newaxis: return True elif isinstance(item, tuple): return any((i is None or i is np.newaxis) for i in item) return False def check_ploidy(actual, expect): if expect != actual: raise ValueError( 'expected ploidy %s, found %s' % (expect, actual) ) def check_min_samples(actual, expect): if actual < expect: raise ValueError( 'expected at least %s samples, found %s' % (expect, actual) ) def check_type(obj, expected): if not isinstance(obj, expected): raise TypeError('bad argument type, expected %s, found %s' % (expected, type(obj)))
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/util.py
util.py
from contextlib import contextmanager from functools import update_wrapper import atexit import os import warnings import numpy as np @contextmanager def ignore_invalid(): err = np.seterr(invalid='ignore') try: yield finally: np.seterr(**err) def check_array_like(a, *ndims, **kwargs): if not hasattr(a, 'ndim'): cls = kwargs.pop('default', np.asarray) a = cls(a, **kwargs) if a.ndim not in ndims: raise ValueError('invalid number of dimensions: %s' % a.ndim) def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a def check_ndim(a, ndim): if a.ndim != ndim: raise TypeError('bad number of dimensions: expected %s; found %s' % (ndim, a.ndim)) def check_shape(a, shape): if a.shape != shape: raise TypeError('bad shape: expected %s; found %s' % (shape, a.shape)) def check_dtype(a, *dtypes): dtypes = [np.dtype(t) for t in dtypes] if a.dtype not in dtypes: raise TypeError('bad dtype: expected on of %s; found %s' % (dtypes, a.dtype)) def check_dtype_kind(a, *kinds): if a.dtype.kind not in kinds: raise TypeError('bad dtype kind: expected on of %s; found %s' % (kinds, a.dtype.kind)) def check_integer_dtype(a): check_dtype_kind(a, 'u', 'i') def check_dim0_aligned(*arrays): check_dim_aligned(0, *arrays) def check_dim1_aligned(*arrays): check_dim_aligned(1, *arrays) def check_dim_aligned(dim, *arrays): a = arrays[0] for b in arrays[1:]: if b.shape[dim] != a.shape[dim]: raise ValueError( 'arrays do not have matching length for dimension %s' % dim ) def check_same_ndim(*arrays): a = arrays[0] for b in arrays[1:]: if len(b.shape) != len(a.shape): raise ValueError( 'arrays do not have same number of dimensions' ) def check_equal_length(a, *others): expected_length = len(a) for b in others: if len(b) != expected_length: raise ValueError('sequences do not have matching length') def resize_dim1(a, s, fill=0): if a.shape[1] < s: newshape = a.shape[0], s b = np.zeros(newshape, dtype=a.dtype) if fill != 0: b.fill(fill) b[:, :a.shape[1]] = a return b else: return a def ensure_dim1_aligned(*arrays, **kwargs): fill = kwargs.get('fill', 0) dim1_length = max(a.shape[1] for a in arrays) arrays = [resize_dim1(a, dim1_length, fill=fill) for a in arrays] return arrays def ensure_square(dist): from scipy.spatial.distance import squareform dist = asarray_ndim(dist, 1, 2) if dist.ndim == 1: dist = squareform(dist) else: if dist.shape[0] != dist.shape[1]: raise ValueError('distance matrix is not square') return dist def mask_inaccessible(is_accessible, pos, *arrays): """ This function returns a tuple (positions, *arrays) in which positions that are not accessible are removed from the positions and the *arrays. Parameters ---------- is_accessible : array_like, bool, shape (len(contig),) Boolean array indicating accessibility status for all positions in the chromosome/contig. pos : array_like, int, shape (n_variants,) Variant positions, using 1-based coordinates, in ascending order. array1, array2, ... : array_like N-dimensional array objects with n_variants elements in the 1D. Returns ------- pos : array_like, int, shape (n_items,) Positions array consisting exclusively of accessible sites in the original positions array. array1, array2, ... : array_like N-dimensional array objects with n_variants elements in the 1D but now consisting exclusively of accessible sites in the original arrays. """ if is_accessible is not None: # sanity check if np.max(pos) > len(is_accessible): raise ValueError( 'Not all positions are covered by is_accessible.' ) # check array shapes check_dim0_aligned(pos, *arrays) loc_accessible = is_accessible[pos-1] if np.any(np.logical_not(loc_accessible)): warnings.warn("Some variants were inaccessible and hence masked.") arrays = tuple(a[loc_accessible] for a in arrays) pos = pos[loc_accessible] return (pos,) + arrays class _HashedSeq(list): __slots__ = 'hashvalue' # noinspection PyShadowingBuiltins,PyMissingConstructor def __init__(self, tup, hash=hash): self[:] = tup self.hashvalue = hash(tup) def __hash__(self): return self.hashvalue # noinspection PyShadowingBuiltins def _make_key(args, kwds, typed, kwd_mark=('__kwargs__',), fasttypes=(int, str, frozenset, type(None)), sorted=sorted, tuple=tuple, type=type, len=len): key = args kwd_items = sorted(kwds.items()) if kwds: key += kwd_mark for item in kwd_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for _, v in kwd_items) else: key = args if len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) def _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs): import h5py # open the HDF5 file with h5py.File(filepath, mode='a') as h5f: # find parent group if parent is None: # use root group h5g_parent = h5f else: h5g_parent = h5f.require_group(parent) # find cache container group h5g_container = h5g_parent.require_group(container) # find cache group h5g = h5g_container.require_group(key) # call user function and (re)build cache if no_cache or '__success__' not in h5g.attrs: # reset success mark if present if '__success__' in h5g.attrs: del h5g.attrs['__success__'] # compute result result = user_function(*args, **kwargs) # handle tuple of return values if isinstance(result, tuple): # determine dataset names if names is None: names = ['f%02d' % i for i in range(len(result))] elif len(names) < len(result): names = list(names) + ['f%02d' % i for i in range(len(names), len(result))] # save data for n, r in zip(names, result): if n in h5g: del h5g[n] if np.isscalar(r): h5g.create_dataset(n, data=r) else: h5g.create_dataset(n, data=r, **h5dcreate_kwargs) # handle single return value else: # determine dataset name if names is None: n = 'data' elif isinstance(names, str): n = names elif len(names) > 0: n = names[0] else: n = 'data' # save data if n in h5g: del h5g[n] if np.isscalar(result): h5g.create_dataset(n, data=result) else: h5g.create_dataset(n, data=result, **h5dcreate_kwargs) # mark success h5g.attrs['__success__'] = True # load from cache else: # determine dataset names if names is None: names = sorted(h5g.keys()) elif isinstance(names, str): names = (names,) # load result from cache if len(names) == 1: result = h5g[names[0]] result = result[:] if len(result.shape) > 0 else result[()] else: result = tuple(h5g[n] for n in names) result = tuple(r[:] if len(r.shape) > 0 else r[()] for r in result) return result def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): """HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9) """ # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator def contains_newaxis(item): if item is None: return True elif item is np.newaxis: return True elif isinstance(item, tuple): return any((i is None or i is np.newaxis) for i in item) return False def check_ploidy(actual, expect): if expect != actual: raise ValueError( 'expected ploidy %s, found %s' % (expect, actual) ) def check_min_samples(actual, expect): if actual < expect: raise ValueError( 'expected at least %s samples, found %s' % (expect, actual) ) def check_type(obj, expected): if not isinstance(obj, expected): raise TypeError('bad argument type, expected %s, found %s' % (expected, type(obj)))
0.760917
0.47859
import numpy as np class ArrayWrapper(object): """Abstract base class that delegates to a wrapped array-like object.""" def __init__(self, data): if isinstance(data, ArrayWrapper): # don't wrap a wrapper data = data.values if not hasattr(data, 'shape') or not hasattr(data, 'dtype'): raise TypeError('values must be array-like') self._values = data @property def values(self): """The underlying array of values. Returns ------- ndarray """ return self._values @property def caption(self): return '<%s shape=%s dtype=%s>' % (type(self).__name__, self.shape, self.dtype) def __repr__(self): return self.caption def __getattr__(self, item): if item in {'__array_struct__', '__array_interface__'}: # don't pass these through because we want to use __array__ to control numpy # behaviour raise AttributeError return getattr(self.values, item) def __getitem__(self, item): return self.values[item] def __setitem__(self, item, value): self.values[item] = value def __iter__(self): return iter(self.values) def __len__(self): return len(self.values) def __array__(self, *args): v = self.values[:] a = np.asanyarray(v) if args: a = a.astype(args[0]) return a def __eq__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values == other def __ne__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values != other def __lt__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values < other def __gt__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values > other def __le__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values <= other def __ge__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values >= other def __abs__(self): return abs(self.values) def __add__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values + other def __and__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values & other def __div__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values.__div__(other) def __floordiv__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values // other def __inv__(self): return ~self.values def __invert__(self): return ~self.values def __lshift__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values << other def __mod__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values % other def __mul__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values * other def __neg__(self): return -self.values def __or__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values | other def __pos__(self): return +self.values def __pow__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values ** other def __rshift__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values >> other def __sub__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values - other def __truediv__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values.__truediv__(other) def __xor__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values ^ other ellipsis_str = '...' def arr1d_to_html(indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAs1D">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr>' for i in indices: html += '<th style="text-align: center">%s</th>' % i html += '</tr>' html += '</thead>' html += '<tbody>' html += '<tr>' for item in items: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html _row_index_style = ('text-align: center; ' 'background-color: white; ' 'border-right: 1px solid black; ') def arr2d_to_html(row_indices, col_indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAs2D">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr><th></th>' for i in col_indices: html += '<th style="text-align: center">%s</th>' % i html += '</tr>' html += '</thead>' html += '<tbody>' for row_index, row in zip(row_indices, items): if row_index == ellipsis_str: html += (('<tr><th style="%s">...</th>' % _row_index_style) + ('<td style="text-align: center" colspan="%s">...</td></tr>' % (len(col_indices) + 1))) else: html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index) for item in row: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html def recarr_to_html(names, indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAsTable">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr><th></th>' for n in names: html += '<th style="text-align: center">%s</th>' % n html += '</tr>' html += '</thead>' html += '<tbody>' for row_index, row in zip(indices, items): if row_index == ellipsis_str: html += (('<tr><th style="%s">...</th>' % _row_index_style) + ('<td style="text-align: center" colspan="%s">...</td></tr>' % (len(names) + 1))) else: html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index) for item in row: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html class DisplayableArray(ArrayWrapper): def __repr__(self): return self.caption + '\n' + str(self) def __str__(self): return self.to_str() def _repr_html_(self): return self.to_html() # noinspection PyAbstractClass class DisplayAs1D(DisplayableArray): def str_items(self): # can be overridden in sub-class to provide custom display behaviour return [repr(i) for i in self] def get_display_items(self, threshold=10, edgeitems=5): # ensure threshold if threshold is None: threshold = self.shape[0] # ensure sensible edgeitems edgeitems = min(edgeitems, threshold // 2) # determine indices of items to show if self.shape[0] > threshold: indices = ( list(range(edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - edgeitems, self.shape[0], 1)) ) head = self[:edgeitems].str_items() tail = self[self.shape[0] - edgeitems:].str_items() items = head + [ellipsis_str] + tail else: indices = list(range(self.shape[0])) items = self[:].str_items() return indices, items def to_str(self, threshold=10, edgeitems=5): _, items = self.get_display_items(threshold, edgeitems) s = '[' + ', '.join(items) + ']' return s def to_html(self, threshold=10, edgeitems=5, caption=None): indices, items = self.get_display_items(threshold, edgeitems) if caption is None: caption = self.caption return arr1d_to_html(indices, items, caption) def display(self, threshold=10, edgeitems=5, caption=None): html = self.to_html(threshold, edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(threshold=None, caption=caption) # noinspection PyAbstractClass class DisplayAs2D(DisplayableArray): def str_items(self): # can be overridden in sub-class to provide custom display behaviour return [[repr(i) for i in row] for row in self] def get_display_items(self, row_threshold, col_threshold, row_edgeitems, col_edgeitems): # ensure threshold if row_threshold is None: row_threshold = self.shape[0] if col_threshold is None: col_threshold = self.shape[1] # ensure sensible edgeitems row_edgeitems = min(row_edgeitems, row_threshold // 2) col_edgeitems = min(col_edgeitems, col_threshold // 2) # determine row indices of items to show if self.shape[0] > row_threshold: row_indices = ( list(range(row_edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - row_edgeitems, self.shape[0], 1)) ) head = self[:row_edgeitems].str_items() tail = self[self.shape[0] - row_edgeitems:].str_items() items = head + [ellipsis_str] + tail else: row_indices = list(range(self.shape[0])) items = self[:].str_items() # determine col indices of items to show if self.shape[1] > col_threshold: col_indices = ( list(range(col_edgeitems)) + [ellipsis_str] + list(range(self.shape[1] - col_edgeitems, self.shape[1], 1)) ) items = [ row if row == ellipsis_str else (row[:col_edgeitems] + [ellipsis_str] + row[self.shape[1] - col_edgeitems:]) for row in items ] else: col_indices = list(range(self.shape[1])) # items unchanged return row_indices, col_indices, items def to_str(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5): _, _, items = self.get_display_items(row_threshold, col_threshold, row_edgeitems, col_edgeitems) s = '' for row in items: if row == ellipsis_str: s += row + '\n' else: s += ' '.join(row) + '\n' return s def to_html(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5, caption=None): row_indices, col_indices, items = self.get_display_items( row_threshold, col_threshold, row_edgeitems, col_edgeitems ) if caption is None: caption = self.caption return arr2d_to_html(row_indices, col_indices, items, caption) def display(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5, caption=None): html = self.to_html(row_threshold, col_threshold, row_edgeitems, col_edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(row_threshold=None, col_threshold=None, caption=caption) class DisplayAsTable(DisplayableArray): @property def names(self): """Column names.""" return self.dtype.names def str_items(self): tmp = self[:] items = [[str(x) for x in row] for row in tmp] return items def get_display_items(self, threshold=6, edgeitems=3): # ensure threshold if threshold is None: threshold = self.shape[0] # ensure sensible edgeitems edgeitems = min(edgeitems, threshold // 2) # determine indices of items to show if self.shape[0] > threshold: indices = ( list(range(edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - edgeitems, self.shape[0], 1)) ) head = self[:edgeitems].str_items() tail = self[self.shape[0] - edgeitems:].str_items() items = head + [ellipsis_str] + tail else: indices = list(range(self.shape[0])) items = self[:].str_items() return indices, items def to_str(self, threshold=6, edgeitems=3): _, items = self.get_display_items(threshold, edgeitems) s = ' '.join(items) return s def to_html(self, threshold=6, edgeitems=3, caption=None): indices, items = self.get_display_items(threshold, edgeitems) if caption is None: caption = self.caption return recarr_to_html(self.names, indices, items, caption) def display(self, threshold=6, edgeitems=3, caption=None): html = self.to_html(threshold, edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(threshold=None, caption=caption) def __str__(self): # stick with default string output of values return str(self.values)
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/abc.py
abc.py
import numpy as np class ArrayWrapper(object): """Abstract base class that delegates to a wrapped array-like object.""" def __init__(self, data): if isinstance(data, ArrayWrapper): # don't wrap a wrapper data = data.values if not hasattr(data, 'shape') or not hasattr(data, 'dtype'): raise TypeError('values must be array-like') self._values = data @property def values(self): """The underlying array of values. Returns ------- ndarray """ return self._values @property def caption(self): return '<%s shape=%s dtype=%s>' % (type(self).__name__, self.shape, self.dtype) def __repr__(self): return self.caption def __getattr__(self, item): if item in {'__array_struct__', '__array_interface__'}: # don't pass these through because we want to use __array__ to control numpy # behaviour raise AttributeError return getattr(self.values, item) def __getitem__(self, item): return self.values[item] def __setitem__(self, item, value): self.values[item] = value def __iter__(self): return iter(self.values) def __len__(self): return len(self.values) def __array__(self, *args): v = self.values[:] a = np.asanyarray(v) if args: a = a.astype(args[0]) return a def __eq__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values == other def __ne__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values != other def __lt__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values < other def __gt__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values > other def __le__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values <= other def __ge__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values >= other def __abs__(self): return abs(self.values) def __add__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values + other def __and__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values & other def __div__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values.__div__(other) def __floordiv__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values // other def __inv__(self): return ~self.values def __invert__(self): return ~self.values def __lshift__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values << other def __mod__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values % other def __mul__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values * other def __neg__(self): return -self.values def __or__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values | other def __pos__(self): return +self.values def __pow__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values ** other def __rshift__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values >> other def __sub__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values - other def __truediv__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values.__truediv__(other) def __xor__(self, other): if isinstance(other, ArrayWrapper): other = other.values return self.values ^ other ellipsis_str = '...' def arr1d_to_html(indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAs1D">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr>' for i in indices: html += '<th style="text-align: center">%s</th>' % i html += '</tr>' html += '</thead>' html += '<tbody>' html += '<tr>' for item in items: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html _row_index_style = ('text-align: center; ' 'background-color: white; ' 'border-right: 1px solid black; ') def arr2d_to_html(row_indices, col_indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAs2D">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr><th></th>' for i in col_indices: html += '<th style="text-align: center">%s</th>' % i html += '</tr>' html += '</thead>' html += '<tbody>' for row_index, row in zip(row_indices, items): if row_index == ellipsis_str: html += (('<tr><th style="%s">...</th>' % _row_index_style) + ('<td style="text-align: center" colspan="%s">...</td></tr>' % (len(col_indices) + 1))) else: html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index) for item in row: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html def recarr_to_html(names, indices, items, caption): # N.B., table captions don't render in jupyter notebooks on GitHub, # so put caption outside table element html = '<div class="allel allel-DisplayAsTable">' # sanitize caption caption = caption.replace('<', '&lt;').replace('>', '&gt;') html += '<span>%s</span>' % caption # build table html += '<table>' html += '<thead>' html += '<tr><th></th>' for n in names: html += '<th style="text-align: center">%s</th>' % n html += '</tr>' html += '</thead>' html += '<tbody>' for row_index, row in zip(indices, items): if row_index == ellipsis_str: html += (('<tr><th style="%s">...</th>' % _row_index_style) + ('<td style="text-align: center" colspan="%s">...</td></tr>' % (len(names) + 1))) else: html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index) for item in row: html += '<td style="text-align: center">%s</td>' % item html += '</tr>' html += '</tbody>' html += '</table>' html += '</div>' return html class DisplayableArray(ArrayWrapper): def __repr__(self): return self.caption + '\n' + str(self) def __str__(self): return self.to_str() def _repr_html_(self): return self.to_html() # noinspection PyAbstractClass class DisplayAs1D(DisplayableArray): def str_items(self): # can be overridden in sub-class to provide custom display behaviour return [repr(i) for i in self] def get_display_items(self, threshold=10, edgeitems=5): # ensure threshold if threshold is None: threshold = self.shape[0] # ensure sensible edgeitems edgeitems = min(edgeitems, threshold // 2) # determine indices of items to show if self.shape[0] > threshold: indices = ( list(range(edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - edgeitems, self.shape[0], 1)) ) head = self[:edgeitems].str_items() tail = self[self.shape[0] - edgeitems:].str_items() items = head + [ellipsis_str] + tail else: indices = list(range(self.shape[0])) items = self[:].str_items() return indices, items def to_str(self, threshold=10, edgeitems=5): _, items = self.get_display_items(threshold, edgeitems) s = '[' + ', '.join(items) + ']' return s def to_html(self, threshold=10, edgeitems=5, caption=None): indices, items = self.get_display_items(threshold, edgeitems) if caption is None: caption = self.caption return arr1d_to_html(indices, items, caption) def display(self, threshold=10, edgeitems=5, caption=None): html = self.to_html(threshold, edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(threshold=None, caption=caption) # noinspection PyAbstractClass class DisplayAs2D(DisplayableArray): def str_items(self): # can be overridden in sub-class to provide custom display behaviour return [[repr(i) for i in row] for row in self] def get_display_items(self, row_threshold, col_threshold, row_edgeitems, col_edgeitems): # ensure threshold if row_threshold is None: row_threshold = self.shape[0] if col_threshold is None: col_threshold = self.shape[1] # ensure sensible edgeitems row_edgeitems = min(row_edgeitems, row_threshold // 2) col_edgeitems = min(col_edgeitems, col_threshold // 2) # determine row indices of items to show if self.shape[0] > row_threshold: row_indices = ( list(range(row_edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - row_edgeitems, self.shape[0], 1)) ) head = self[:row_edgeitems].str_items() tail = self[self.shape[0] - row_edgeitems:].str_items() items = head + [ellipsis_str] + tail else: row_indices = list(range(self.shape[0])) items = self[:].str_items() # determine col indices of items to show if self.shape[1] > col_threshold: col_indices = ( list(range(col_edgeitems)) + [ellipsis_str] + list(range(self.shape[1] - col_edgeitems, self.shape[1], 1)) ) items = [ row if row == ellipsis_str else (row[:col_edgeitems] + [ellipsis_str] + row[self.shape[1] - col_edgeitems:]) for row in items ] else: col_indices = list(range(self.shape[1])) # items unchanged return row_indices, col_indices, items def to_str(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5): _, _, items = self.get_display_items(row_threshold, col_threshold, row_edgeitems, col_edgeitems) s = '' for row in items: if row == ellipsis_str: s += row + '\n' else: s += ' '.join(row) + '\n' return s def to_html(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5, caption=None): row_indices, col_indices, items = self.get_display_items( row_threshold, col_threshold, row_edgeitems, col_edgeitems ) if caption is None: caption = self.caption return arr2d_to_html(row_indices, col_indices, items, caption) def display(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5, caption=None): html = self.to_html(row_threshold, col_threshold, row_edgeitems, col_edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(row_threshold=None, col_threshold=None, caption=caption) class DisplayAsTable(DisplayableArray): @property def names(self): """Column names.""" return self.dtype.names def str_items(self): tmp = self[:] items = [[str(x) for x in row] for row in tmp] return items def get_display_items(self, threshold=6, edgeitems=3): # ensure threshold if threshold is None: threshold = self.shape[0] # ensure sensible edgeitems edgeitems = min(edgeitems, threshold // 2) # determine indices of items to show if self.shape[0] > threshold: indices = ( list(range(edgeitems)) + [ellipsis_str] + list(range(self.shape[0] - edgeitems, self.shape[0], 1)) ) head = self[:edgeitems].str_items() tail = self[self.shape[0] - edgeitems:].str_items() items = head + [ellipsis_str] + tail else: indices = list(range(self.shape[0])) items = self[:].str_items() return indices, items def to_str(self, threshold=6, edgeitems=3): _, items = self.get_display_items(threshold, edgeitems) s = ' '.join(items) return s def to_html(self, threshold=6, edgeitems=3, caption=None): indices, items = self.get_display_items(threshold, edgeitems) if caption is None: caption = self.caption return recarr_to_html(self.names, indices, items, caption) def display(self, threshold=6, edgeitems=3, caption=None): html = self.to_html(threshold, edgeitems, caption) from IPython.display import display_html display_html(html, raw=True) def displayall(self, caption=None): self.display(threshold=None, caption=caption) def __str__(self): # stick with default string output of values return str(self.values)
0.798894
0.383815
from .model.ndarray import * from .model.chunked import * from .model.util import * try: import dask except ImportError: pass else: from .model.dask import * from .stats.window import moving_statistic, windowed_count, \ windowed_statistic, per_base, equally_accessible_windows, moving_mean, \ moving_std, moving_midpoint, index_windows, position_windows, window_locations from .stats.diversity import mean_pairwise_difference, \ sequence_diversity, windowed_diversity, mean_pairwise_difference_between, \ sequence_divergence, windowed_divergence, windowed_df, watterson_theta, \ windowed_watterson_theta, tajima_d, windowed_tajima_d, moving_tajima_d from .stats.fst import weir_cockerham_fst, hudson_fst, \ windowed_weir_cockerham_fst, windowed_hudson_fst, patterson_fst, \ windowed_patterson_fst, blockwise_weir_cockerham_fst, \ blockwise_hudson_fst, blockwise_patterson_fst, average_hudson_fst, \ average_patterson_fst, average_weir_cockerham_fst, moving_hudson_fst, \ moving_patterson_fst, moving_weir_cockerham_fst from .stats.distance import pairwise_distance, pairwise_dxy, pcoa, \ plot_pairwise_distance, condensed_coords, condensed_coords_between, \ condensed_coords_within from .stats.hw import heterozygosity_observed, heterozygosity_expected, \ inbreeding_coefficient from .stats.ld import rogers_huff_r, rogers_huff_r_between, \ locate_unlinked, plot_pairwise_ld, windowed_r_squared from .stats.decomposition import pca, randomized_pca from .stats.preprocessing import StandardScaler, CenterScaler, PattersonScaler, get_scaler from .stats.admixture import patterson_f2, patterson_f3, patterson_d, \ blockwise_patterson_f3, blockwise_patterson_d, average_patterson_d, \ average_patterson_f3, moving_patterson_d, moving_patterson_f3 from .stats.selection import ehh_decay, voight_painting, xpehh, ihs, \ plot_voight_painting, fig_voight_painting, plot_haplotype_frequencies, \ plot_moving_haplotype_frequencies, haplotype_diversity, \ moving_haplotype_diversity, garud_h, moving_garud_h, nsl, xpnsl, \ standardize, standardize_by_allele_count, moving_delta_tajima_d, pbs from .stats.sf import sfs, sfs_folded, sfs_scaled, sfs_folded_scaled, \ joint_sfs, joint_sfs_folded, joint_sfs_scaled, joint_sfs_folded_scaled, \ fold_sfs, fold_joint_sfs, scale_sfs, scale_sfs_folded, scale_joint_sfs, \ scale_joint_sfs_folded, plot_sfs, plot_sfs_folded, plot_sfs_scaled, \ plot_sfs_folded_scaled, plot_joint_sfs, plot_joint_sfs_folded, \ plot_joint_sfs_scaled, plot_joint_sfs_folded_scaled from .stats.misc import plot_variant_locator, tabulate_state_transitions, \ tabulate_state_blocks from .stats.mendel import mendel_errors, paint_transmission, \ phase_progeny_by_transmission, phase_parents_by_transmission, \ phase_by_transmission, INHERIT_MISSING, INHERIT_NONPARENTAL, INHERIT_NONSEG_ALT, \ INHERIT_NONSEG_REF, INHERIT_PARENT1, INHERIT_PARENT2, INHERIT_PARENT_MISSING, \ INHERIT_UNDETERMINED from .stats.roh import roh_mhmm, roh_poissonhmm from .io.vcf_read import * from .io.vcf_write import * from .io.gff import * from .io.fasta import * from .io.util import * from .util import hdf5_cache from .version import version as __version__
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/__init__.py
__init__.py
from .model.ndarray import * from .model.chunked import * from .model.util import * try: import dask except ImportError: pass else: from .model.dask import * from .stats.window import moving_statistic, windowed_count, \ windowed_statistic, per_base, equally_accessible_windows, moving_mean, \ moving_std, moving_midpoint, index_windows, position_windows, window_locations from .stats.diversity import mean_pairwise_difference, \ sequence_diversity, windowed_diversity, mean_pairwise_difference_between, \ sequence_divergence, windowed_divergence, windowed_df, watterson_theta, \ windowed_watterson_theta, tajima_d, windowed_tajima_d, moving_tajima_d from .stats.fst import weir_cockerham_fst, hudson_fst, \ windowed_weir_cockerham_fst, windowed_hudson_fst, patterson_fst, \ windowed_patterson_fst, blockwise_weir_cockerham_fst, \ blockwise_hudson_fst, blockwise_patterson_fst, average_hudson_fst, \ average_patterson_fst, average_weir_cockerham_fst, moving_hudson_fst, \ moving_patterson_fst, moving_weir_cockerham_fst from .stats.distance import pairwise_distance, pairwise_dxy, pcoa, \ plot_pairwise_distance, condensed_coords, condensed_coords_between, \ condensed_coords_within from .stats.hw import heterozygosity_observed, heterozygosity_expected, \ inbreeding_coefficient from .stats.ld import rogers_huff_r, rogers_huff_r_between, \ locate_unlinked, plot_pairwise_ld, windowed_r_squared from .stats.decomposition import pca, randomized_pca from .stats.preprocessing import StandardScaler, CenterScaler, PattersonScaler, get_scaler from .stats.admixture import patterson_f2, patterson_f3, patterson_d, \ blockwise_patterson_f3, blockwise_patterson_d, average_patterson_d, \ average_patterson_f3, moving_patterson_d, moving_patterson_f3 from .stats.selection import ehh_decay, voight_painting, xpehh, ihs, \ plot_voight_painting, fig_voight_painting, plot_haplotype_frequencies, \ plot_moving_haplotype_frequencies, haplotype_diversity, \ moving_haplotype_diversity, garud_h, moving_garud_h, nsl, xpnsl, \ standardize, standardize_by_allele_count, moving_delta_tajima_d, pbs from .stats.sf import sfs, sfs_folded, sfs_scaled, sfs_folded_scaled, \ joint_sfs, joint_sfs_folded, joint_sfs_scaled, joint_sfs_folded_scaled, \ fold_sfs, fold_joint_sfs, scale_sfs, scale_sfs_folded, scale_joint_sfs, \ scale_joint_sfs_folded, plot_sfs, plot_sfs_folded, plot_sfs_scaled, \ plot_sfs_folded_scaled, plot_joint_sfs, plot_joint_sfs_folded, \ plot_joint_sfs_scaled, plot_joint_sfs_folded_scaled from .stats.misc import plot_variant_locator, tabulate_state_transitions, \ tabulate_state_blocks from .stats.mendel import mendel_errors, paint_transmission, \ phase_progeny_by_transmission, phase_parents_by_transmission, \ phase_by_transmission, INHERIT_MISSING, INHERIT_NONPARENTAL, INHERIT_NONSEG_ALT, \ INHERIT_NONSEG_REF, INHERIT_PARENT1, INHERIT_PARENT2, INHERIT_PARENT_MISSING, \ INHERIT_UNDETERMINED from .stats.roh import roh_mhmm, roh_poissonhmm from .io.vcf_read import * from .io.vcf_write import * from .io.gff import * from .io.fasta import * from .io.util import * from .util import hdf5_cache from .version import version as __version__
0.521715
0.174868
import operator from functools import reduce import numpy as np storage_registry = dict() def get_storage(storage=None): if storage is None: try: return storage_registry['default'] except KeyError: raise RuntimeError('no default storage available; is either h5py ' 'or zarr installed?') elif isinstance(storage, str): # normalise storage name storage = str(storage).lower() try: return storage_registry[storage] except KeyError: raise RuntimeError('storage not recognised: %r' % storage) else: # assume custom instance return storage def check_equal_length(*sequences): s = sequences[0] for t in sequences[1:]: if len(t) != len(s): raise ValueError('lengths do not match') def is_array_like(a): return hasattr(a, 'shape') and hasattr(a, 'dtype') def ensure_array_like(a, **kwargs): ndim = kwargs.get('ndim', None) if not is_array_like(a): a = np.asarray(a) if ndim is not None and len(a.shape) != ndim: raise ValueError( 'expected array-like with %s dimensions, found %s' % (ndim, len(a.shape)) ) return a def check_table_like(data, names=None): if isinstance(data, (list, tuple)): # sequence of columns if names is None: names = ['f%d' % i for i in range(len(data))] else: if len(names) != len(data): raise ValueError('bad number of column names') columns = list(data) elif hasattr(data, 'names'): # bcolz ctable or similar if names is None: names = list(data.names) columns = [data[n] for n in names] elif hasattr(data, 'keys') and callable(data.keys): # dict, h5py Group or similar if names is None: names = sorted(data.keys()) columns = [data[n] for n in names] elif hasattr(data, 'dtype') and hasattr(data.dtype, 'names'): # numpy recarray or similar if names is None: names = list(data.dtype.names) columns = [data[n] for n in names] else: raise ValueError('invalid data: %r' % data) columns = [ensure_array_like(c) for c in columns] check_equal_length(*columns) return names, columns def get_blen_array(data, blen=None): """Try to guess a reasonable block length to use for block-wise iteration over `data`.""" if blen is None: if hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset or zarr array return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen def get_blen_table(data, blen=None): if blen is None: _, columns = check_table_like(data) return max(get_blen_array(c) for c in columns) else: return blen def human_readable_size(size): if size < 2**10: return "%s" % size elif size < 2**20: return "%.1fK" % (size / float(2**10)) elif size < 2**30: return "%.1fM" % (size / float(2**20)) elif size < 2**40: return "%.1fG" % (size / float(2**30)) else: return "%.1fT" % (size / float(2**40)) def get_nbytes(data): if hasattr(data, 'nbytes'): return data.nbytes elif is_array_like(data): return reduce(operator.mul, data.shape) * data.dtype.itemsize else: return None # noinspection PyProtectedMember def get_cbytes(data): if hasattr(data, 'cbytes'): return data.cbytes elif hasattr(data, 'nbytes_stored'): return data.nbytes_stored elif hasattr(data, '_id') and hasattr(data._id, 'get_storage_size'): return data._id.get_storage_size() else: return None def get_compression(data): if hasattr(data, 'cparams'): return 'blosc' elif hasattr(data, 'compression'): return data.compression elif hasattr(data, 'compressor'): # zarr 2 return data.compressor.codec_id else: return None def get_compression_opts(data): if hasattr(data, 'cparams'): return data.cparams elif hasattr(data, 'compression_opts'): return data.compression_opts elif hasattr(data, 'compressor'): # zarr 2 config = data.compressor.get_config() del config['id'] return config else: return None def get_shuffle(data): if hasattr(data, 'cparams'): return data.cparams.shuffle elif hasattr(data, 'shuffle'): return data.shuffle else: return None def get_chunks(data): if hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset or zarr array return data.chunks else: return None
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/chunked/util.py
util.py
import operator from functools import reduce import numpy as np storage_registry = dict() def get_storage(storage=None): if storage is None: try: return storage_registry['default'] except KeyError: raise RuntimeError('no default storage available; is either h5py ' 'or zarr installed?') elif isinstance(storage, str): # normalise storage name storage = str(storage).lower() try: return storage_registry[storage] except KeyError: raise RuntimeError('storage not recognised: %r' % storage) else: # assume custom instance return storage def check_equal_length(*sequences): s = sequences[0] for t in sequences[1:]: if len(t) != len(s): raise ValueError('lengths do not match') def is_array_like(a): return hasattr(a, 'shape') and hasattr(a, 'dtype') def ensure_array_like(a, **kwargs): ndim = kwargs.get('ndim', None) if not is_array_like(a): a = np.asarray(a) if ndim is not None and len(a.shape) != ndim: raise ValueError( 'expected array-like with %s dimensions, found %s' % (ndim, len(a.shape)) ) return a def check_table_like(data, names=None): if isinstance(data, (list, tuple)): # sequence of columns if names is None: names = ['f%d' % i for i in range(len(data))] else: if len(names) != len(data): raise ValueError('bad number of column names') columns = list(data) elif hasattr(data, 'names'): # bcolz ctable or similar if names is None: names = list(data.names) columns = [data[n] for n in names] elif hasattr(data, 'keys') and callable(data.keys): # dict, h5py Group or similar if names is None: names = sorted(data.keys()) columns = [data[n] for n in names] elif hasattr(data, 'dtype') and hasattr(data.dtype, 'names'): # numpy recarray or similar if names is None: names = list(data.dtype.names) columns = [data[n] for n in names] else: raise ValueError('invalid data: %r' % data) columns = [ensure_array_like(c) for c in columns] check_equal_length(*columns) return names, columns def get_blen_array(data, blen=None): """Try to guess a reasonable block length to use for block-wise iteration over `data`.""" if blen is None: if hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset or zarr array return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen def get_blen_table(data, blen=None): if blen is None: _, columns = check_table_like(data) return max(get_blen_array(c) for c in columns) else: return blen def human_readable_size(size): if size < 2**10: return "%s" % size elif size < 2**20: return "%.1fK" % (size / float(2**10)) elif size < 2**30: return "%.1fM" % (size / float(2**20)) elif size < 2**40: return "%.1fG" % (size / float(2**30)) else: return "%.1fT" % (size / float(2**40)) def get_nbytes(data): if hasattr(data, 'nbytes'): return data.nbytes elif is_array_like(data): return reduce(operator.mul, data.shape) * data.dtype.itemsize else: return None # noinspection PyProtectedMember def get_cbytes(data): if hasattr(data, 'cbytes'): return data.cbytes elif hasattr(data, 'nbytes_stored'): return data.nbytes_stored elif hasattr(data, '_id') and hasattr(data._id, 'get_storage_size'): return data._id.get_storage_size() else: return None def get_compression(data): if hasattr(data, 'cparams'): return 'blosc' elif hasattr(data, 'compression'): return data.compression elif hasattr(data, 'compressor'): # zarr 2 return data.compressor.codec_id else: return None def get_compression_opts(data): if hasattr(data, 'cparams'): return data.cparams elif hasattr(data, 'compression_opts'): return data.compression_opts elif hasattr(data, 'compressor'): # zarr 2 config = data.compressor.get_config() del config['id'] return config else: return None def get_shuffle(data): if hasattr(data, 'cparams'): return data.cparams.shuffle elif hasattr(data, 'shuffle'): return data.shuffle else: return None def get_chunks(data): if hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset or zarr array return data.chunks else: return None
0.523177
0.407157
import operator from functools import reduce import zarr import zarr.util import numcodecs from allel.chunked import util as _util def default_chunks(data, expectedlen): # here we will only ever chunk first dimension rowsize = data.dtype.itemsize if data.ndim > 1: # pretend array is 1D rowsize *= reduce(operator.mul, data.shape[1:]) if expectedlen is None: # default to 4M chunks of first dimension chunklen = 2**22 // rowsize else: # use zarr heuristics chunklen, = zarr.util.guess_chunks((expectedlen,), rowsize) if data.ndim > 1: chunks = (chunklen,) + data.shape[1:] else: chunks = chunklen, return chunks class ZarrStorage(object): """Storage layer using Zarr.""" def __init__(self, **kwargs): self.defaults = kwargs def _set_defaults(self, kwargs): # copy in master defaults for k, v in self.defaults.items(): kwargs.setdefault(k, v) return kwargs # noinspection PyUnusedLocal def array(self, data, expectedlen=None, **kwargs): # setup data = _util.ensure_array_like(data) kwargs = self._set_defaults(kwargs) # determine chunks kwargs.setdefault('chunks', default_chunks(data, expectedlen)) # determine object codec if data.dtype == object: # peek at first value peek = data[0] if isinstance(peek, bytes): object_codec = numcodecs.VLenBytes() elif isinstance(peek, str): object_codec = numcodecs.VLenUTF8() else: object_codec = numcodecs.MsgPack() kwargs.setdefault('object_codec', object_codec) # create z = zarr.array(data, **kwargs) return z def table(self, data, names=None, expectedlen=None, **kwargs): # setup names, columns = _util.check_table_like(data, names=names) kwargs = self._set_defaults(kwargs) chunks = kwargs.pop('chunks', None) g = zarr.group(**kwargs) # create columns for n, c in zip(names, columns): if chunks is None: chunks = default_chunks(c, expectedlen) if c.dtype == object: # peek at first value peek = c[0] if isinstance(peek, bytes): object_codec = numcodecs.VLenBytes() elif isinstance(peek, str): object_codec = numcodecs.VLenUTF8() else: object_codec = numcodecs.MsgPack() else: object_codec = None g.array(name=n, data=c, chunks=chunks, object_codec=object_codec) # create table ztbl = ZarrTable(g, names=names) return ztbl class ZarrTable(object): def __init__(self, grp, names=None): self.grp = grp available_names = sorted(grp.array_keys()) if names is None: names = available_names else: for n in names: if n not in available_names: raise ValueError('name not available: %s' % n) self.names = names def __getitem__(self, item): return self.grp[item] def append(self, data): names, columns = _util.check_table_like(data, names=self.names) for n, c in zip(names, columns): self.grp[n].append(c) class ZarrMemStorage(ZarrStorage): # noinspection PyShadowingBuiltins def _set_defaults(self, kwargs): kwargs = super(ZarrMemStorage, self)._set_defaults(kwargs) kwargs.setdefault('store', zarr.DictStore()) return kwargs class ZarrTmpStorage(ZarrStorage): def _set_defaults(self, kwargs): kwargs = super(ZarrTmpStorage, self)._set_defaults(kwargs) suffix = kwargs.pop('suffix', '.zarr') prefix = kwargs.pop('prefix', 'scikit_allel_') # noinspection PyShadowingBuiltins dir = kwargs.pop('dir', None) kwargs.setdefault('store', zarr.TempStore(suffix=suffix, prefix=prefix, dir=dir)) return kwargs zarr_storage = ZarrStorage() """zarr storage with default parameters""" zarrmem_storage = ZarrMemStorage() """zarr in-memory storage with default compression""" zarrtmp_storage = ZarrTmpStorage() """zarr temporary file storage with default compression""" _util.storage_registry['zarr'] = zarr_storage _util.storage_registry['zarrmem'] = zarrmem_storage _util.storage_registry['zarrtmp'] = zarrtmp_storage
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/chunked/storage_zarr.py
storage_zarr.py
import operator from functools import reduce import zarr import zarr.util import numcodecs from allel.chunked import util as _util def default_chunks(data, expectedlen): # here we will only ever chunk first dimension rowsize = data.dtype.itemsize if data.ndim > 1: # pretend array is 1D rowsize *= reduce(operator.mul, data.shape[1:]) if expectedlen is None: # default to 4M chunks of first dimension chunklen = 2**22 // rowsize else: # use zarr heuristics chunklen, = zarr.util.guess_chunks((expectedlen,), rowsize) if data.ndim > 1: chunks = (chunklen,) + data.shape[1:] else: chunks = chunklen, return chunks class ZarrStorage(object): """Storage layer using Zarr.""" def __init__(self, **kwargs): self.defaults = kwargs def _set_defaults(self, kwargs): # copy in master defaults for k, v in self.defaults.items(): kwargs.setdefault(k, v) return kwargs # noinspection PyUnusedLocal def array(self, data, expectedlen=None, **kwargs): # setup data = _util.ensure_array_like(data) kwargs = self._set_defaults(kwargs) # determine chunks kwargs.setdefault('chunks', default_chunks(data, expectedlen)) # determine object codec if data.dtype == object: # peek at first value peek = data[0] if isinstance(peek, bytes): object_codec = numcodecs.VLenBytes() elif isinstance(peek, str): object_codec = numcodecs.VLenUTF8() else: object_codec = numcodecs.MsgPack() kwargs.setdefault('object_codec', object_codec) # create z = zarr.array(data, **kwargs) return z def table(self, data, names=None, expectedlen=None, **kwargs): # setup names, columns = _util.check_table_like(data, names=names) kwargs = self._set_defaults(kwargs) chunks = kwargs.pop('chunks', None) g = zarr.group(**kwargs) # create columns for n, c in zip(names, columns): if chunks is None: chunks = default_chunks(c, expectedlen) if c.dtype == object: # peek at first value peek = c[0] if isinstance(peek, bytes): object_codec = numcodecs.VLenBytes() elif isinstance(peek, str): object_codec = numcodecs.VLenUTF8() else: object_codec = numcodecs.MsgPack() else: object_codec = None g.array(name=n, data=c, chunks=chunks, object_codec=object_codec) # create table ztbl = ZarrTable(g, names=names) return ztbl class ZarrTable(object): def __init__(self, grp, names=None): self.grp = grp available_names = sorted(grp.array_keys()) if names is None: names = available_names else: for n in names: if n not in available_names: raise ValueError('name not available: %s' % n) self.names = names def __getitem__(self, item): return self.grp[item] def append(self, data): names, columns = _util.check_table_like(data, names=self.names) for n, c in zip(names, columns): self.grp[n].append(c) class ZarrMemStorage(ZarrStorage): # noinspection PyShadowingBuiltins def _set_defaults(self, kwargs): kwargs = super(ZarrMemStorage, self)._set_defaults(kwargs) kwargs.setdefault('store', zarr.DictStore()) return kwargs class ZarrTmpStorage(ZarrStorage): def _set_defaults(self, kwargs): kwargs = super(ZarrTmpStorage, self)._set_defaults(kwargs) suffix = kwargs.pop('suffix', '.zarr') prefix = kwargs.pop('prefix', 'scikit_allel_') # noinspection PyShadowingBuiltins dir = kwargs.pop('dir', None) kwargs.setdefault('store', zarr.TempStore(suffix=suffix, prefix=prefix, dir=dir)) return kwargs zarr_storage = ZarrStorage() """zarr storage with default parameters""" zarrmem_storage = ZarrMemStorage() """zarr in-memory storage with default compression""" zarrtmp_storage = ZarrTmpStorage() """zarr temporary file storage with default compression""" _util.storage_registry['zarr'] = zarr_storage _util.storage_registry['zarrmem'] = zarrmem_storage _util.storage_registry['zarrtmp'] = zarrtmp_storage
0.553505
0.500183
import tempfile import atexit import operator import os from types import MethodType from functools import reduce import h5py from allel.chunked import util as _util def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f def _dataset_append(h5d, data): hl = len(h5d) dl = len(data) hln = hl + dl h5d.resize(hln, axis=0) h5d[hl:hln] = data def _table_append(h5g, data): names, columns = _util.check_table_like(data, names=h5g.names) for n, c in zip(names, columns): h5d = h5g[n] _dataset_append(h5d, c) class HDF5Storage(object): """Storage layer using HDF5 dataset and group.""" def __init__(self, **kwargs): self.defaults = kwargs def open_file(self, **kwargs): # override in sub-classes raise NotImplementedError('group must be provided') def create_dataset(self, h5g, data=None, expectedlen=None, **kwargs): # set defaults kwargs.setdefault('name', 'data') for k, v in self.defaults.items(): kwargs.setdefault(k, v) # handle data if data is not None: data = _util.ensure_array_like(data) # by default, simple chunking across rows rowsize = data.dtype.itemsize * reduce(operator.mul, data.shape[1:], 1) # 1Mb chunks chunklen = max(1, (2**20) // rowsize) if expectedlen is not None: # ensure chunks not bigger than expected length chunklen = min(chunklen, expectedlen) chunks = (chunklen,) + data.shape[1:] kwargs.setdefault('chunks', chunks) # by default, can resize dim 0 maxshape = (None,) + data.shape[1:] kwargs.setdefault('maxshape', maxshape) # set data kwargs['data'] = data # create dataset h5d = h5g.create_dataset(**kwargs) return h5d # noinspection PyUnusedLocal def array(self, data, expectedlen=None, **kwargs): # setup data = _util.ensure_array_like(data) # obtain group h5g = kwargs.pop('group', None) if h5g is None: # open file, use root group h5g, kwargs = self.open_file(**kwargs) # create dataset h5d = self.create_dataset(h5g, data=data, expectedlen=expectedlen, **kwargs) # patch in append method h5d.append = MethodType(_dataset_append, h5d) return h5d # noinspection PyUnusedLocal def table(self, data, names=None, expectedlen=None, **kwargs): # setup names, columns = _util.check_table_like(data, names=names) # obtain group h5g = kwargs.pop('group', None) if h5g is None: # open file, use root group h5g, kwargs = self.open_file(**kwargs) # create columns for n, c in zip(names, columns): self.create_dataset(h5g, data=c, name=n, expectedlen=expectedlen, **kwargs) # patch in append method h5g.append = MethodType(_table_append, h5g) # patch in names attribute h5g.names = names return h5g class HDF5MemStorage(HDF5Storage): def open_file(self, **kwargs): return h5fmem(), kwargs class HDF5TmpStorage(HDF5Storage): def open_file(self, **kwargs): suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) return h5ftmp(dir=tempdir, suffix=suffix, prefix=prefix), kwargs hdf5_storage = HDF5Storage() """HDF5 storage with default parameters""" hdf5mem_storage = HDF5MemStorage() """HDF5 in-memory storage with default compression""" hdf5tmp_storage = HDF5TmpStorage() """HDF5 temporary file storage with default compression""" hdf5_zlib1_storage = HDF5Storage(compression='gzip', compression_opts=1) """HDF5 storage with zlib level 1 compression""" hdf5mem_zlib1_storage = HDF5MemStorage(compression='gzip', compression_opts=1) """HDF5 in-memory storage with zlib level 1 compression""" hdf5tmp_zlib1_storage = HDF5TmpStorage(compression='gzip', compression_opts=1) """HDF5 temporary file storage with zlib level 1 compression""" hdf5_lzf_storage = HDF5Storage(compression='lzf') """HDF5 storage with LZF compression""" hdf5mem_lzf_storage = HDF5MemStorage(compression='lzf') """HDF5 in-memory storage with LZF compression""" hdf5tmp_lzf_storage = HDF5TmpStorage(compression='lzf') """HDF5 temporary file storage with LZF compression""" _util.storage_registry['hdf5'] = hdf5_storage _util.storage_registry['hdf5mem'] = hdf5mem_storage _util.storage_registry['hdf5tmp'] = hdf5tmp_storage _util.storage_registry['hdf5_zlib1'] = hdf5_zlib1_storage _util.storage_registry['hdf5mem_zlib1'] = hdf5mem_zlib1_storage _util.storage_registry['hdf5tmp_zlib1'] = hdf5tmp_zlib1_storage _util.storage_registry['hdf5_lzf'] = hdf5_lzf_storage _util.storage_registry['hdf5mem_lzf'] = hdf5mem_lzf_storage _util.storage_registry['hdf5tmp_lzf'] = hdf5tmp_lzf_storage
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/chunked/storage_hdf5.py
storage_hdf5.py
import tempfile import atexit import operator import os from types import MethodType from functools import reduce import h5py from allel.chunked import util as _util def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f def _dataset_append(h5d, data): hl = len(h5d) dl = len(data) hln = hl + dl h5d.resize(hln, axis=0) h5d[hl:hln] = data def _table_append(h5g, data): names, columns = _util.check_table_like(data, names=h5g.names) for n, c in zip(names, columns): h5d = h5g[n] _dataset_append(h5d, c) class HDF5Storage(object): """Storage layer using HDF5 dataset and group.""" def __init__(self, **kwargs): self.defaults = kwargs def open_file(self, **kwargs): # override in sub-classes raise NotImplementedError('group must be provided') def create_dataset(self, h5g, data=None, expectedlen=None, **kwargs): # set defaults kwargs.setdefault('name', 'data') for k, v in self.defaults.items(): kwargs.setdefault(k, v) # handle data if data is not None: data = _util.ensure_array_like(data) # by default, simple chunking across rows rowsize = data.dtype.itemsize * reduce(operator.mul, data.shape[1:], 1) # 1Mb chunks chunklen = max(1, (2**20) // rowsize) if expectedlen is not None: # ensure chunks not bigger than expected length chunklen = min(chunklen, expectedlen) chunks = (chunklen,) + data.shape[1:] kwargs.setdefault('chunks', chunks) # by default, can resize dim 0 maxshape = (None,) + data.shape[1:] kwargs.setdefault('maxshape', maxshape) # set data kwargs['data'] = data # create dataset h5d = h5g.create_dataset(**kwargs) return h5d # noinspection PyUnusedLocal def array(self, data, expectedlen=None, **kwargs): # setup data = _util.ensure_array_like(data) # obtain group h5g = kwargs.pop('group', None) if h5g is None: # open file, use root group h5g, kwargs = self.open_file(**kwargs) # create dataset h5d = self.create_dataset(h5g, data=data, expectedlen=expectedlen, **kwargs) # patch in append method h5d.append = MethodType(_dataset_append, h5d) return h5d # noinspection PyUnusedLocal def table(self, data, names=None, expectedlen=None, **kwargs): # setup names, columns = _util.check_table_like(data, names=names) # obtain group h5g = kwargs.pop('group', None) if h5g is None: # open file, use root group h5g, kwargs = self.open_file(**kwargs) # create columns for n, c in zip(names, columns): self.create_dataset(h5g, data=c, name=n, expectedlen=expectedlen, **kwargs) # patch in append method h5g.append = MethodType(_table_append, h5g) # patch in names attribute h5g.names = names return h5g class HDF5MemStorage(HDF5Storage): def open_file(self, **kwargs): return h5fmem(), kwargs class HDF5TmpStorage(HDF5Storage): def open_file(self, **kwargs): suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) return h5ftmp(dir=tempdir, suffix=suffix, prefix=prefix), kwargs hdf5_storage = HDF5Storage() """HDF5 storage with default parameters""" hdf5mem_storage = HDF5MemStorage() """HDF5 in-memory storage with default compression""" hdf5tmp_storage = HDF5TmpStorage() """HDF5 temporary file storage with default compression""" hdf5_zlib1_storage = HDF5Storage(compression='gzip', compression_opts=1) """HDF5 storage with zlib level 1 compression""" hdf5mem_zlib1_storage = HDF5MemStorage(compression='gzip', compression_opts=1) """HDF5 in-memory storage with zlib level 1 compression""" hdf5tmp_zlib1_storage = HDF5TmpStorage(compression='gzip', compression_opts=1) """HDF5 temporary file storage with zlib level 1 compression""" hdf5_lzf_storage = HDF5Storage(compression='lzf') """HDF5 storage with LZF compression""" hdf5mem_lzf_storage = HDF5MemStorage(compression='lzf') """HDF5 in-memory storage with LZF compression""" hdf5tmp_lzf_storage = HDF5TmpStorage(compression='lzf') """HDF5 temporary file storage with LZF compression""" _util.storage_registry['hdf5'] = hdf5_storage _util.storage_registry['hdf5mem'] = hdf5mem_storage _util.storage_registry['hdf5tmp'] = hdf5tmp_storage _util.storage_registry['hdf5_zlib1'] = hdf5_zlib1_storage _util.storage_registry['hdf5mem_zlib1'] = hdf5mem_zlib1_storage _util.storage_registry['hdf5tmp_zlib1'] = hdf5tmp_zlib1_storage _util.storage_registry['hdf5_lzf'] = hdf5_lzf_storage _util.storage_registry['hdf5mem_lzf'] = hdf5mem_lzf_storage _util.storage_registry['hdf5tmp_lzf'] = hdf5tmp_lzf_storage
0.545286
0.289709
import numpy as np from allel.util import asarray_ndim def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close() # noinspection PyIncorrectDocstring def recarray_from_hdf5_group(*args, **kwargs): """Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load. """ import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close() def recarray_to_hdf5_group(ra, parent, name, **kwargs): """Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/io/util.py
util.py
import numpy as np from allel.util import asarray_ndim def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close() # noinspection PyIncorrectDocstring def recarray_from_hdf5_group(*args, **kwargs): """Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load. """ import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close() def recarray_to_hdf5_group(ra, parent, name, **kwargs): """Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
0.85741
0.709824
import csv from datetime import date import itertools from operator import itemgetter import logging import numpy as np import allel logger = logging.getLogger(__name__) debug = logger.debug VCF_FIXED_FIELDS = 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO' def normalize_callset(callset): if hasattr(callset, 'keys'): names = list() new_callset = dict() for k in list(callset.keys()): a = callset[k] if k.startswith('calldata/'): continue if k == 'samples': continue if k.startswith('variants/'): k = k[9:] names.append(k) new_callset[k] = a callset = new_callset elif hasattr(callset, 'dtype') and callset.dtype.names: names = list(callset.dtype.names) else: raise ValueError('callset should be dict or recarray, found %r' % callset) return names, callset def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill) def write_vcf_header(vcf_file, names, callset, rename, number, description): if rename is None: rename = dict() if number is None: number = dict() if description is None: description = dict() # write file format version print('##fileformat=VCFv4.1', file=vcf_file) # write today's date today = date.today().strftime('%Y%m%d') print('##fileDate=%s' % today, file=vcf_file) # write source print('##source=scikit-allel-%s' % allel.__version__, file=vcf_file) info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] # write INFO headers, sorted by ID for name, vcf_id in sorted(zip(info_names, info_ids), key=itemgetter(1)): col = callset[name] # determine VCF Number if name in number: vcf_number = number[name] else: if col.ndim == 1 and col.dtype.kind == 'b': # Flag vcf_number = 0 elif col.ndim == 1: vcf_number = 1 elif col.ndim == 2: vcf_number = col.shape[1] else: raise NotImplementedError('only columns with 1 or two ' 'dimensions are supported') # determine VCF Type kind = col.dtype.kind if kind == 'b': vcf_type = 'Flag' elif kind in 'ui': vcf_type = 'Integer' elif kind == 'f': vcf_type = 'Float' else: vcf_type = 'String' # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct INFO header line header_line = '##INFO=<ID=%s,Number=%s,Type=%s,Description="%s">'\ % (vcf_id, vcf_number, vcf_type, vcf_description) print(header_line, file=vcf_file) filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] # write FILTER headers, sorted by ID for name, vcf_id in sorted(zip(filter_names, filter_ids), key=itemgetter(1)): # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct FILTER header line header_line = '##FILTER=<ID=%s,Description="%s">'\ % (vcf_id, vcf_description) print(header_line, file=vcf_file) # write column names line = '#' + '\t'.join(VCF_FIXED_FIELDS) print(line, file=vcf_file) # noinspection PyShadowingBuiltins def write_vcf_data(vcf_file, names, callset, rename, fill): if rename is None: rename = dict() if fill is None: fill = dict() # find the fixed columns, allowing for case insensitive naming in the # input array col_chrom = None col_pos = None col_id = None col_ref = None col_alt = None col_qual = None for n in names: if n.upper() == 'CHROM': col_chrom = callset[n] elif n.upper() == 'POS': col_pos = callset[n] elif n.upper() == 'ID': col_id = callset[n] elif n.upper() == 'REF': col_ref = callset[n] elif n.upper() == 'ALT': col_alt = callset[n] elif n.upper() == 'QUAL': col_qual = callset[n] # check for required columns if col_chrom is None: raise ValueError('CHROM column not found') if col_pos is None: raise ValueError('POS column not found') # pad optional columns dot = itertools.repeat('.') if col_id is None: col_id = dot if col_ref is None: col_ref = dot if col_alt is None: col_alt = dot if col_qual is None: col_qual = dot # find FILTER columns filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] filter_cols = [callset[n] for n in filter_names] # sort by ID if filter_names: filters = sorted(zip(filter_names, filter_ids, filter_cols), key=itemgetter(1)) filter_names, filter_ids, filter_cols = zip(*filters) # find INFO columns info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] info_cols = [callset[n] for n in info_names] # sort by ID if info_names: infos = sorted(zip(info_names, info_ids, info_cols), key=itemgetter(1)) info_names, info_ids, info_cols = zip(*infos) # setup writer writer = csv.writer(vcf_file, delimiter='\t', lineterminator='\n') # zip up data as rows rows = zip(col_chrom, col_pos, col_id, col_ref, col_alt, col_qual) filter_rows = zip(*filter_cols) info_rows = zip(*info_cols) for row, filter_row, info_row in itertools.zip_longest(rows, filter_rows, info_rows): # unpack main row chrom, pos, id, ref, alt, qual = row chrom = _vcf_value_str(chrom) pos = _vcf_value_str(pos) id = _vcf_value_str(id) ref = _vcf_value_str(ref) alt = _vcf_value_str(alt, fill=fill.get('ALT', None)) qual = _vcf_value_str(qual) # construct FILTER value if filter_row is not None: flt = [i for i, v in zip(filter_ids, filter_row) if v] if flt: flt = ';'.join(flt) else: flt = 'PASS' else: flt = '.' # construct INFO value if info_row is not None: info_vals = [_vcf_info_str(n, i, v, fill) for n, i, v in zip(info_names, info_ids, info_row)] info_vals = [x for x in info_vals if x is not None] info = ';'.join(info_vals) else: info = '.' # repack row = chrom, pos, id, ref, alt, qual, flt, info writer.writerow(row) def _vcf_value_str(o, fill=None): if isinstance(o, bytes): return str(o, encoding='ascii') elif isinstance(o, (tuple, list, np.ndarray)): if fill is None: t = [_vcf_value_str(x) for x in o] else: t = [_vcf_value_str(x) for x in o if x != fill] return ','.join(t) else: return str(o) # noinspection PyShadowingBuiltins def _vcf_info_str(name, id, value, fill): if isinstance(value, (bool, np.bool_)): if bool(value): return id else: return None else: return '%s=%s' % (id, _vcf_value_str(value, fill=fill.get(name, None)))
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/io/vcf_write.py
vcf_write.py
import csv from datetime import date import itertools from operator import itemgetter import logging import numpy as np import allel logger = logging.getLogger(__name__) debug = logger.debug VCF_FIXED_FIELDS = 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO' def normalize_callset(callset): if hasattr(callset, 'keys'): names = list() new_callset = dict() for k in list(callset.keys()): a = callset[k] if k.startswith('calldata/'): continue if k == 'samples': continue if k.startswith('variants/'): k = k[9:] names.append(k) new_callset[k] = a callset = new_callset elif hasattr(callset, 'dtype') and callset.dtype.names: names = list(callset.dtype.names) else: raise ValueError('callset should be dict or recarray, found %r' % callset) return names, callset def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill) def write_vcf_header(vcf_file, names, callset, rename, number, description): if rename is None: rename = dict() if number is None: number = dict() if description is None: description = dict() # write file format version print('##fileformat=VCFv4.1', file=vcf_file) # write today's date today = date.today().strftime('%Y%m%d') print('##fileDate=%s' % today, file=vcf_file) # write source print('##source=scikit-allel-%s' % allel.__version__, file=vcf_file) info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] # write INFO headers, sorted by ID for name, vcf_id in sorted(zip(info_names, info_ids), key=itemgetter(1)): col = callset[name] # determine VCF Number if name in number: vcf_number = number[name] else: if col.ndim == 1 and col.dtype.kind == 'b': # Flag vcf_number = 0 elif col.ndim == 1: vcf_number = 1 elif col.ndim == 2: vcf_number = col.shape[1] else: raise NotImplementedError('only columns with 1 or two ' 'dimensions are supported') # determine VCF Type kind = col.dtype.kind if kind == 'b': vcf_type = 'Flag' elif kind in 'ui': vcf_type = 'Integer' elif kind == 'f': vcf_type = 'Float' else: vcf_type = 'String' # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct INFO header line header_line = '##INFO=<ID=%s,Number=%s,Type=%s,Description="%s">'\ % (vcf_id, vcf_number, vcf_type, vcf_description) print(header_line, file=vcf_file) filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] # write FILTER headers, sorted by ID for name, vcf_id in sorted(zip(filter_names, filter_ids), key=itemgetter(1)): # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct FILTER header line header_line = '##FILTER=<ID=%s,Description="%s">'\ % (vcf_id, vcf_description) print(header_line, file=vcf_file) # write column names line = '#' + '\t'.join(VCF_FIXED_FIELDS) print(line, file=vcf_file) # noinspection PyShadowingBuiltins def write_vcf_data(vcf_file, names, callset, rename, fill): if rename is None: rename = dict() if fill is None: fill = dict() # find the fixed columns, allowing for case insensitive naming in the # input array col_chrom = None col_pos = None col_id = None col_ref = None col_alt = None col_qual = None for n in names: if n.upper() == 'CHROM': col_chrom = callset[n] elif n.upper() == 'POS': col_pos = callset[n] elif n.upper() == 'ID': col_id = callset[n] elif n.upper() == 'REF': col_ref = callset[n] elif n.upper() == 'ALT': col_alt = callset[n] elif n.upper() == 'QUAL': col_qual = callset[n] # check for required columns if col_chrom is None: raise ValueError('CHROM column not found') if col_pos is None: raise ValueError('POS column not found') # pad optional columns dot = itertools.repeat('.') if col_id is None: col_id = dot if col_ref is None: col_ref = dot if col_alt is None: col_alt = dot if col_qual is None: col_qual = dot # find FILTER columns filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] filter_cols = [callset[n] for n in filter_names] # sort by ID if filter_names: filters = sorted(zip(filter_names, filter_ids, filter_cols), key=itemgetter(1)) filter_names, filter_ids, filter_cols = zip(*filters) # find INFO columns info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] info_cols = [callset[n] for n in info_names] # sort by ID if info_names: infos = sorted(zip(info_names, info_ids, info_cols), key=itemgetter(1)) info_names, info_ids, info_cols = zip(*infos) # setup writer writer = csv.writer(vcf_file, delimiter='\t', lineterminator='\n') # zip up data as rows rows = zip(col_chrom, col_pos, col_id, col_ref, col_alt, col_qual) filter_rows = zip(*filter_cols) info_rows = zip(*info_cols) for row, filter_row, info_row in itertools.zip_longest(rows, filter_rows, info_rows): # unpack main row chrom, pos, id, ref, alt, qual = row chrom = _vcf_value_str(chrom) pos = _vcf_value_str(pos) id = _vcf_value_str(id) ref = _vcf_value_str(ref) alt = _vcf_value_str(alt, fill=fill.get('ALT', None)) qual = _vcf_value_str(qual) # construct FILTER value if filter_row is not None: flt = [i for i, v in zip(filter_ids, filter_row) if v] if flt: flt = ';'.join(flt) else: flt = 'PASS' else: flt = '.' # construct INFO value if info_row is not None: info_vals = [_vcf_info_str(n, i, v, fill) for n, i, v in zip(info_names, info_ids, info_row)] info_vals = [x for x in info_vals if x is not None] info = ';'.join(info_vals) else: info = '.' # repack row = chrom, pos, id, ref, alt, qual, flt, info writer.writerow(row) def _vcf_value_str(o, fill=None): if isinstance(o, bytes): return str(o, encoding='ascii') elif isinstance(o, (tuple, list, np.ndarray)): if fill is None: t = [_vcf_value_str(x) for x in o] else: t = [_vcf_value_str(x) for x in o if x != fill] return ','.join(t) else: return str(o) # noinspection PyShadowingBuiltins def _vcf_info_str(name, id, value, fill): if isinstance(value, (bool, np.bool_)): if bool(value): return id else: return None else: return '%s=%s' % (id, _vcf_value_str(value, fill=fill.get(name, None)))
0.380759
0.113826
import numpy as np from allel.model.ndarray import GenotypeArray from allel.util import ignore_invalid, asarray_ndim def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho def heterozygosity_expected(af, ploidy, fill=np.nan): """Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ]) """ # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/hw.py
hw.py
import numpy as np from allel.model.ndarray import GenotypeArray from allel.util import ignore_invalid, asarray_ndim def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho def heterozygosity_expected(af, ploidy, fill=np.nan): """Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ]) """ # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
0.921473
0.818338
import numpy as np from allel.util import asarray_ndim def get_scaler(scaler, copy, ploidy): # normalise strings to lower case if isinstance(scaler, str): scaler = scaler.lower() if scaler == 'patterson': return PattersonScaler(copy=copy, ploidy=ploidy) elif scaler == 'standard': return StandardScaler(copy=copy) elif hasattr(scaler, 'fit'): return scaler elif scaler in ['center', 'centre'] or scaler is None: return CenterScaler(copy=copy) else: raise ValueError('unrecognised scaler: %s' % scaler) class StandardScaler(object): def __init__(self, copy=True): self.copy = copy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) # find scaling factor self.std_ = np.std(gn, axis=1, keepdims=True) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ # scale gn /= self.std_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy) class CenterScaler(object): def __init__(self, copy=True): self.copy = copy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy) class PattersonScaler(object): def __init__(self, copy=True, ploidy=2): self.copy = copy self.ploidy = ploidy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) # find scaling factor p = self.mean_ / self.ploidy self.std_ = np.sqrt(p * (1 - p)) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ # scale gn /= self.std_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy)
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/preprocessing.py
preprocessing.py
import numpy as np from allel.util import asarray_ndim def get_scaler(scaler, copy, ploidy): # normalise strings to lower case if isinstance(scaler, str): scaler = scaler.lower() if scaler == 'patterson': return PattersonScaler(copy=copy, ploidy=ploidy) elif scaler == 'standard': return StandardScaler(copy=copy) elif hasattr(scaler, 'fit'): return scaler elif scaler in ['center', 'centre'] or scaler is None: return CenterScaler(copy=copy) else: raise ValueError('unrecognised scaler: %s' % scaler) class StandardScaler(object): def __init__(self, copy=True): self.copy = copy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) # find scaling factor self.std_ = np.std(gn, axis=1, keepdims=True) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ # scale gn /= self.std_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy) class CenterScaler(object): def __init__(self, copy=True): self.copy = copy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy) class PattersonScaler(object): def __init__(self, copy=True, ploidy=2): self.copy = copy self.ploidy = ploidy self.mean_ = None self.std_ = None def fit(self, gn): # check input gn = asarray_ndim(gn, 2) # find mean self.mean_ = np.mean(gn, axis=1, keepdims=True) # find scaling factor p = self.mean_ / self.ploidy self.std_ = np.sqrt(p * (1 - p)) return self def transform(self, gn, copy=None): # check inputs copy = copy if copy is not None else self.copy gn = asarray_ndim(gn, 2, copy=copy) if not gn.dtype.kind == 'f': gn = gn.astype('f2') # center gn -= self.mean_ # scale gn /= self.std_ return gn def fit_transform(self, gn, copy=None): self.fit(gn) return self.transform(gn, copy=copy)
0.672224
0.335378
import numpy as np from allel.stats.preprocessing import get_scaler def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model class GenotypePCA(object): def __init__(self, n_components=10, copy=True, scaler='patterson', ploidy=2): self.n_components = n_components self.copy = copy self.scaler = scaler self.scaler_ = get_scaler(scaler, copy, ploidy) def fit(self, gn): self._fit(gn) return self def fit_transform(self, gn): u, s, v = self._fit(gn) u = u[:, :self.n_components] u *= s[:self.n_components] return u def _fit(self, gn): import scipy.linalg # apply scaling gn = self.scaler_.fit(gn).transform(gn) # transpose for svd # TODO eliminate need for transposition x = gn.T n_samples, n_features = x.shape # singular value decomposition u, s, v = scipy.linalg.svd(x, full_matrices=False) # calculate explained variance explained_variance_ = (s ** 2) / n_samples explained_variance_ratio_ = (explained_variance_ / np.sum(explained_variance_)) # store variables n_components = self.n_components self.components_ = v[:n_components] self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] return u, s, v def transform(self, gn, copy=None): if not hasattr(self, 'components_'): raise ValueError('model has not been not fitted') # scaling gn = self.scaler_.transform(gn, copy=copy) # transpose for transformation # TODO eliminate need for transposition x = gn.T # apply transformation x_transformed = np.dot(x, self.components_.T) return x_transformed def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model class GenotypeRandomizedPCA(object): def __init__(self, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.random_state = random_state self.scaler = scaler self.scaler_ = get_scaler(scaler, copy, ploidy) def fit(self, gn): self._fit(gn) return self def fit_transform(self, gn): u, s, v = self._fit(gn) u *= s return u def _fit(self, gn): from sklearn.utils.validation import check_random_state from sklearn.utils.extmath import randomized_svd # apply scaling gn = self.scaler_.fit(gn).transform(gn) # transpose for svd # TODO eliminate need for transposition x = gn.T # intermediates random_state = check_random_state(self.random_state) n_components = self.n_components n_samples, n_features = x.shape # singular value decomposition u, s, v = randomized_svd(x, n_components, n_iter=self.iterated_power, random_state=random_state) # calculate explained variance self.explained_variance_ = exp_var = (s ** 2) / n_samples full_var = np.var(x, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var # store components self.components_ = v return u, s, v def transform(self, gn, copy=None): if not hasattr(self, 'components_'): raise ValueError('model has not been not fitted') # scaling gn = self.scaler_.transform(gn, copy=copy) # transpose for transformation # TODO eliminate need for transposition x = gn.T # apply transformation x_transformed = np.dot(x, self.components_.T) return x_transformed
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/decomposition.py
decomposition.py
import numpy as np from allel.stats.preprocessing import get_scaler def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model class GenotypePCA(object): def __init__(self, n_components=10, copy=True, scaler='patterson', ploidy=2): self.n_components = n_components self.copy = copy self.scaler = scaler self.scaler_ = get_scaler(scaler, copy, ploidy) def fit(self, gn): self._fit(gn) return self def fit_transform(self, gn): u, s, v = self._fit(gn) u = u[:, :self.n_components] u *= s[:self.n_components] return u def _fit(self, gn): import scipy.linalg # apply scaling gn = self.scaler_.fit(gn).transform(gn) # transpose for svd # TODO eliminate need for transposition x = gn.T n_samples, n_features = x.shape # singular value decomposition u, s, v = scipy.linalg.svd(x, full_matrices=False) # calculate explained variance explained_variance_ = (s ** 2) / n_samples explained_variance_ratio_ = (explained_variance_ / np.sum(explained_variance_)) # store variables n_components = self.n_components self.components_ = v[:n_components] self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] return u, s, v def transform(self, gn, copy=None): if not hasattr(self, 'components_'): raise ValueError('model has not been not fitted') # scaling gn = self.scaler_.transform(gn, copy=copy) # transpose for transformation # TODO eliminate need for transposition x = gn.T # apply transformation x_transformed = np.dot(x, self.components_.T) return x_transformed def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model class GenotypeRandomizedPCA(object): def __init__(self, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.random_state = random_state self.scaler = scaler self.scaler_ = get_scaler(scaler, copy, ploidy) def fit(self, gn): self._fit(gn) return self def fit_transform(self, gn): u, s, v = self._fit(gn) u *= s return u def _fit(self, gn): from sklearn.utils.validation import check_random_state from sklearn.utils.extmath import randomized_svd # apply scaling gn = self.scaler_.fit(gn).transform(gn) # transpose for svd # TODO eliminate need for transposition x = gn.T # intermediates random_state = check_random_state(self.random_state) n_components = self.n_components n_samples, n_features = x.shape # singular value decomposition u, s, v = randomized_svd(x, n_components, n_iter=self.iterated_power, random_state=random_state) # calculate explained variance self.explained_variance_ = exp_var = (s ** 2) / n_samples full_var = np.var(x, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var # store components self.components_ = v return u, s, v def transform(self, gn, copy=None): if not hasattr(self, 'components_'): raise ValueError('model has not been not fitted') # scaling gn = self.scaler_.transform(gn, copy=copy) # transpose for transformation # TODO eliminate need for transposition x = gn.T # apply transformation x_transformed = np.dot(x, self.components_.T) return x_transformed
0.8758
0.654136
import logging import itertools import numpy as np from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned from allel.model.ndarray import GenotypeArray from allel.stats.window import windowed_statistic, moving_statistic from allel.stats.diversity import mean_pairwise_difference, \ mean_pairwise_difference_between from allel.stats.misc import jackknife from allel.chunked import get_blen_array logger = logging.getLogger(__name__) debug = logger.debug def weir_cockerham_fst(g, subpops, max_allele=None, blen=None): """Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. max_allele : int, optional The highest allele index to consider. blen : int, optional Block length to use for chunked computation. Returns ------- a : ndarray, float, shape (n_variants, n_alleles) Component of variance between populations. b : ndarray, float, shape (n_variants, n_alleles) Component of variance between individuals within populations. c : ndarray, float, shape (n_variants, n_alleles) Component of variance between gametes within individuals. Examples -------- Calculate variance components from some genotype data:: >>> import allel >>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]] >>> subpops = [[0, 1], [2, 3]] >>> a, b, c = allel.weir_cockerham_fst(g, subpops) >>> a array([[ 0.5 , 0.5 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ], [ 0. , -0.125, -0.125], [-0.375, -0.375, 0. ]]) >>> b array([[ 0. , 0. , 0. ], [-0.25 , -0.25 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0.125 , 0.25 ], [ 0.41666667, 0.41666667, 0. ]]) >>> c array([[0. , 0. , 0. ], [0.5 , 0.5 , 0. ], [0. , 0. , 0. ], [0.125 , 0.25 , 0.125 ], [0.16666667, 0.16666667, 0. ]]) Estimate the parameter theta (a.k.a., Fst) for each variant and each allele individually:: >>> fst = a / (a + b + c) >>> fst array([[ 1. , 1. , nan], [ 0. , 0. , nan], [ nan, nan, nan], [ 0. , -0.5, -0.5], [-1.8, -1.8, nan]]) Estimate Fst for each variant individually (averaging over alleles):: >>> fst = (np.sum(a, axis=1) / ... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1))) >>> fst array([ 1. , 0. , nan, -0.4, -1.8]) Estimate Fst averaging over all variants and alleles:: >>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c)) >>> fst -4.36809058868914e-17 Note that estimated Fst values may be negative. """ # check inputs if not hasattr(g, 'shape') or not hasattr(g, 'ndim'): g = GenotypeArray(g, copy=False) if g.ndim != 3: raise ValueError('g must have three dimensions') if g.shape[2] != 2: raise NotImplementedError('only diploid genotypes are supported') # determine highest allele index if max_allele is None: max_allele = g.max() # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(g, blen) n_variants = g.shape[0] shape = (n_variants, max_allele + 1) a = np.zeros(shape, dtype='f8') b = np.zeros(shape, dtype='f8') c = np.zeros(shape, dtype='f8') for i in range(0, n_variants, blen): j = min(n_variants, i+blen) gb = g[i:j] ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele) a[i:j] = ab b[i:j] = bb c[i:j] = cb return a, b, c # noinspection PyPep8Naming def _weir_cockerham_fst(g, subpops, max_allele): # check inputs g = GenotypeArray(g, copy=False) n_variants, n_samples, ploidy = g.shape n_alleles = max_allele + 1 # number of populations sampled r = len(subpops) n_populations = r debug('r: %r', r) # count alleles within each subpopulation ac = [g.count_alleles(subpop=s, max_allele=max_allele) for s in subpops] # stack allele counts from each sub-population into a single array ac = np.dstack(ac) assert ac.shape == (n_variants, n_alleles, n_populations) debug('ac: %s, %r', ac.shape, ac) # count number of alleles called within each population by summing # allele counts along the alleles dimension an = np.sum(ac, axis=1) assert an.shape == (n_variants, n_populations) debug('an: %s, %r', an.shape, an) # compute number of individuals sampled from each population n = an // 2 assert n.shape == (n_variants, n_populations) debug('n: %s, %r', n.shape, n) # compute the total number of individuals sampled across all populations n_total = np.sum(n, axis=1) assert n_total.shape == (n_variants,) debug('n_total: %s, %r', n_total.shape, n_total) # compute the average sample size across populations n_bar = np.mean(n, axis=1) assert n_bar.shape == (n_variants,) debug('n_bar: %s, %r', n_bar.shape, n_bar) # compute the term n sub C incorporating the coefficient of variation in # sample sizes n_C = (n_total - (np.sum(n**2, axis=1) / n_total)) / (r - 1) assert n_C.shape == (n_variants,) debug('n_C: %s, %r', n_C.shape, n_C) # compute allele frequencies within each population p = ac / an[:, np.newaxis, :] assert p.shape == (n_variants, n_alleles, n_populations) debug('p: %s, %r', p.shape, p) # compute the average sample frequency of each allele ac_total = np.sum(ac, axis=2) an_total = np.sum(an, axis=1) p_bar = ac_total / an_total[:, np.newaxis] assert p_bar.shape == (n_variants, n_alleles) debug('p_bar: %s, %r', p_bar.shape, p_bar) # add in some extra dimensions to enable broadcasting n_bar = n_bar[:, np.newaxis] n_C = n_C[:, np.newaxis] n = n[:, np.newaxis, :] p_bar = p_bar[:, :, np.newaxis] # compute the sample variance of allele frequencies over populations s_squared = ( np.sum(n * ((p - p_bar) ** 2), axis=2) / (n_bar * (r - 1)) ) assert s_squared.shape == (n_variants, n_alleles) debug('s_squared: %s, %r', s_squared.shape, s_squared) # remove extra dimensions for correct broadcasting p_bar = p_bar[:, :, 0] # compute the average heterozygosity over all populations # N.B., take only samples in subpops of interest gs = g.take(list(itertools.chain(*subpops)), axis=1) h_bar = [gs.count_het(allele=allele, axis=1) / n_total for allele in range(n_alleles)] h_bar = np.column_stack(h_bar) assert h_bar.shape == (n_variants, n_alleles) debug('h_bar: %s, %r', h_bar.shape, h_bar) # now comes the tricky bit... # component of variance between populations a = ((n_bar / n_C) * (s_squared - ((1 / (n_bar - 1)) * ((p_bar * (1 - p_bar)) - ((r - 1) * s_squared / r) - (h_bar / 4))))) assert a.shape == (n_variants, n_alleles) # component of variance between individuals within populations b = ((n_bar / (n_bar - 1)) * ((p_bar * (1 - p_bar)) - ((r - 1) * s_squared / r) - (((2 * n_bar) - 1) * h_bar / (4 * n_bar)))) assert b.shape == (n_variants, n_alleles) # component of variance between gametes within individuals c = h_bar / 2 assert c.shape == (n_variants, n_alleles) return a, b, c def hudson_fst(ac1, ac2, fill=np.nan): """Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429 """ # flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # calculate average diversity (a.k.a. heterozygosity) within each # population within = (mean_pairwise_difference(ac1, an1, fill=fill) + mean_pairwise_difference(ac2, an2, fill=fill)) / 2 # calculate divergence (a.k.a. heterozygosity) between each population between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill) # define numerator and denominator for Fst calculations num = between - within den = between return num, den def patterson_fst(aca, acb): """Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator. """ from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None): """Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # define the statistic to compute within each window def average_fst(wa, wb, wc): return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc)) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def windowed_patterson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = patterson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statistic(a, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den = moving_statistic(a + b + c, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num / den return fst def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): """Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj def average_hudson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj def average_patterson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj # bacwkards compatibility blockwise_weir_cockerham_fst = average_weir_cockerham_fst blockwise_hudson_fst = average_hudson_fst blockwise_patterson_fst = average_patterson_fst
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/fst.py
fst.py
import logging import itertools import numpy as np from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned from allel.model.ndarray import GenotypeArray from allel.stats.window import windowed_statistic, moving_statistic from allel.stats.diversity import mean_pairwise_difference, \ mean_pairwise_difference_between from allel.stats.misc import jackknife from allel.chunked import get_blen_array logger = logging.getLogger(__name__) debug = logger.debug def weir_cockerham_fst(g, subpops, max_allele=None, blen=None): """Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. max_allele : int, optional The highest allele index to consider. blen : int, optional Block length to use for chunked computation. Returns ------- a : ndarray, float, shape (n_variants, n_alleles) Component of variance between populations. b : ndarray, float, shape (n_variants, n_alleles) Component of variance between individuals within populations. c : ndarray, float, shape (n_variants, n_alleles) Component of variance between gametes within individuals. Examples -------- Calculate variance components from some genotype data:: >>> import allel >>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]] >>> subpops = [[0, 1], [2, 3]] >>> a, b, c = allel.weir_cockerham_fst(g, subpops) >>> a array([[ 0.5 , 0.5 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ], [ 0. , -0.125, -0.125], [-0.375, -0.375, 0. ]]) >>> b array([[ 0. , 0. , 0. ], [-0.25 , -0.25 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0.125 , 0.25 ], [ 0.41666667, 0.41666667, 0. ]]) >>> c array([[0. , 0. , 0. ], [0.5 , 0.5 , 0. ], [0. , 0. , 0. ], [0.125 , 0.25 , 0.125 ], [0.16666667, 0.16666667, 0. ]]) Estimate the parameter theta (a.k.a., Fst) for each variant and each allele individually:: >>> fst = a / (a + b + c) >>> fst array([[ 1. , 1. , nan], [ 0. , 0. , nan], [ nan, nan, nan], [ 0. , -0.5, -0.5], [-1.8, -1.8, nan]]) Estimate Fst for each variant individually (averaging over alleles):: >>> fst = (np.sum(a, axis=1) / ... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1))) >>> fst array([ 1. , 0. , nan, -0.4, -1.8]) Estimate Fst averaging over all variants and alleles:: >>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c)) >>> fst -4.36809058868914e-17 Note that estimated Fst values may be negative. """ # check inputs if not hasattr(g, 'shape') or not hasattr(g, 'ndim'): g = GenotypeArray(g, copy=False) if g.ndim != 3: raise ValueError('g must have three dimensions') if g.shape[2] != 2: raise NotImplementedError('only diploid genotypes are supported') # determine highest allele index if max_allele is None: max_allele = g.max() # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(g, blen) n_variants = g.shape[0] shape = (n_variants, max_allele + 1) a = np.zeros(shape, dtype='f8') b = np.zeros(shape, dtype='f8') c = np.zeros(shape, dtype='f8') for i in range(0, n_variants, blen): j = min(n_variants, i+blen) gb = g[i:j] ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele) a[i:j] = ab b[i:j] = bb c[i:j] = cb return a, b, c # noinspection PyPep8Naming def _weir_cockerham_fst(g, subpops, max_allele): # check inputs g = GenotypeArray(g, copy=False) n_variants, n_samples, ploidy = g.shape n_alleles = max_allele + 1 # number of populations sampled r = len(subpops) n_populations = r debug('r: %r', r) # count alleles within each subpopulation ac = [g.count_alleles(subpop=s, max_allele=max_allele) for s in subpops] # stack allele counts from each sub-population into a single array ac = np.dstack(ac) assert ac.shape == (n_variants, n_alleles, n_populations) debug('ac: %s, %r', ac.shape, ac) # count number of alleles called within each population by summing # allele counts along the alleles dimension an = np.sum(ac, axis=1) assert an.shape == (n_variants, n_populations) debug('an: %s, %r', an.shape, an) # compute number of individuals sampled from each population n = an // 2 assert n.shape == (n_variants, n_populations) debug('n: %s, %r', n.shape, n) # compute the total number of individuals sampled across all populations n_total = np.sum(n, axis=1) assert n_total.shape == (n_variants,) debug('n_total: %s, %r', n_total.shape, n_total) # compute the average sample size across populations n_bar = np.mean(n, axis=1) assert n_bar.shape == (n_variants,) debug('n_bar: %s, %r', n_bar.shape, n_bar) # compute the term n sub C incorporating the coefficient of variation in # sample sizes n_C = (n_total - (np.sum(n**2, axis=1) / n_total)) / (r - 1) assert n_C.shape == (n_variants,) debug('n_C: %s, %r', n_C.shape, n_C) # compute allele frequencies within each population p = ac / an[:, np.newaxis, :] assert p.shape == (n_variants, n_alleles, n_populations) debug('p: %s, %r', p.shape, p) # compute the average sample frequency of each allele ac_total = np.sum(ac, axis=2) an_total = np.sum(an, axis=1) p_bar = ac_total / an_total[:, np.newaxis] assert p_bar.shape == (n_variants, n_alleles) debug('p_bar: %s, %r', p_bar.shape, p_bar) # add in some extra dimensions to enable broadcasting n_bar = n_bar[:, np.newaxis] n_C = n_C[:, np.newaxis] n = n[:, np.newaxis, :] p_bar = p_bar[:, :, np.newaxis] # compute the sample variance of allele frequencies over populations s_squared = ( np.sum(n * ((p - p_bar) ** 2), axis=2) / (n_bar * (r - 1)) ) assert s_squared.shape == (n_variants, n_alleles) debug('s_squared: %s, %r', s_squared.shape, s_squared) # remove extra dimensions for correct broadcasting p_bar = p_bar[:, :, 0] # compute the average heterozygosity over all populations # N.B., take only samples in subpops of interest gs = g.take(list(itertools.chain(*subpops)), axis=1) h_bar = [gs.count_het(allele=allele, axis=1) / n_total for allele in range(n_alleles)] h_bar = np.column_stack(h_bar) assert h_bar.shape == (n_variants, n_alleles) debug('h_bar: %s, %r', h_bar.shape, h_bar) # now comes the tricky bit... # component of variance between populations a = ((n_bar / n_C) * (s_squared - ((1 / (n_bar - 1)) * ((p_bar * (1 - p_bar)) - ((r - 1) * s_squared / r) - (h_bar / 4))))) assert a.shape == (n_variants, n_alleles) # component of variance between individuals within populations b = ((n_bar / (n_bar - 1)) * ((p_bar * (1 - p_bar)) - ((r - 1) * s_squared / r) - (((2 * n_bar) - 1) * h_bar / (4 * n_bar)))) assert b.shape == (n_variants, n_alleles) # component of variance between gametes within individuals c = h_bar / 2 assert c.shape == (n_variants, n_alleles) return a, b, c def hudson_fst(ac1, ac2, fill=np.nan): """Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429 """ # flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # calculate average diversity (a.k.a. heterozygosity) within each # population within = (mean_pairwise_difference(ac1, an1, fill=fill) + mean_pairwise_difference(ac2, an2, fill=fill)) / 2 # calculate divergence (a.k.a. heterozygosity) between each population between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill) # define numerator and denominator for Fst calculations num = between - within den = between return num, den def patterson_fst(aca, acb): """Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator. """ from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None): """Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # define the statistic to compute within each window def average_fst(wa, wb, wc): return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc)) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def windowed_patterson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = patterson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statistic(a, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den = moving_statistic(a + b + c, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num / den return fst def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): """Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj def average_hudson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj def average_patterson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj # bacwkards compatibility blockwise_weir_cockerham_fst = average_weir_cockerham_fst blockwise_hudson_fst = average_hudson_fst blockwise_patterson_fst = average_patterson_fst
0.795301
0.455138
import numpy as np from allel.compat import memoryview_safe from allel.model.ndarray import GenotypeArray, HaplotypeArray from allel.util import check_ploidy, check_min_samples, check_type, check_dtype from allel.opt.stats import phase_progeny_by_transmission as _opt_phase_progeny_by_transmission, \ phase_parents_by_transmission as _opt_phase_parents_by_transmission def mendel_errors(parent_genotypes, progeny_genotypes): """Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) """ # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele).astype('i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele).astype('i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me # constants to represent inheritance states INHERIT_UNDETERMINED = 0 INHERIT_PARENT1 = 1 INHERIT_PARENT2 = 2 INHERIT_NONSEG_REF = 3 INHERIT_NONSEG_ALT = 4 INHERIT_NONPARENTAL = 5 INHERIT_PARENT_MISSING = 6 INHERIT_MISSING = 7 def paint_transmission(parent_haplotypes, progeny_haplotypes): """Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8) """ # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting def phase_progeny_by_transmission(g): """Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]]) """ # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g def phase_parents_by_transmission(g, window_size): """Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible. """ # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/mendel.py
mendel.py
import numpy as np from allel.compat import memoryview_safe from allel.model.ndarray import GenotypeArray, HaplotypeArray from allel.util import check_ploidy, check_min_samples, check_type, check_dtype from allel.opt.stats import phase_progeny_by_transmission as _opt_phase_progeny_by_transmission, \ phase_parents_by_transmission as _opt_phase_parents_by_transmission def mendel_errors(parent_genotypes, progeny_genotypes): """Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) """ # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele).astype('i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele).astype('i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me # constants to represent inheritance states INHERIT_UNDETERMINED = 0 INHERIT_PARENT1 = 1 INHERIT_PARENT2 = 2 INHERIT_NONSEG_REF = 3 INHERIT_NONSEG_ALT = 4 INHERIT_NONPARENTAL = 5 INHERIT_PARENT_MISSING = 6 INHERIT_MISSING = 7 def paint_transmission(parent_haplotypes, progeny_haplotypes): """Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8) """ # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting def phase_progeny_by_transmission(g): """Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]]) """ # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g def phase_parents_by_transmission(g, window_size): """Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible. """ # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
0.684053
0.681097
from collections import OrderedDict import numpy as np from allel.compat import memoryview_safe from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, check_dim0_aligned, check_integer_dtype from allel.opt.stats import state_transitions def jackknife(values, statistic): """Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration. """ if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None): """ Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figure.figsize'][0] y = x / 7 fig, ax = plt.subplots(figsize=(x, y)) fig.tight_layout() # determine x axis limits if start is None: start = np.min(pos) if stop is None: stop = np.max(pos) loc = pos.locate_range(start, stop) pos = pos[loc] if step is None: step = len(pos) // 100 ax.set_xlim(start, stop) # plot the lines if line_kwargs is None: line_kwargs = dict() # line_kwargs.setdefault('linewidth', .5) n_variants = len(pos) for i, p in enumerate(pos[::step]): xfrom = p xto = ( start + ((i * step / n_variants) * (stop-start)) ) line = plt.Line2D([xfrom, xto], [0, 1], **line_kwargs) ax.add_line(line) # invert? if flip: ax.invert_yaxis() ax.xaxis.tick_top() else: ax.xaxis.tick_bottom() # tidy up ax.set_yticks([]) ax.xaxis.set_tick_params(direction='out') for spine in 'left', 'right': ax.spines[spine].set_visible(False) return ax def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items)) def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/misc.py
misc.py
from collections import OrderedDict import numpy as np from allel.compat import memoryview_safe from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, check_dim0_aligned, check_integer_dtype from allel.opt.stats import state_transitions def jackknife(values, statistic): """Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration. """ if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None): """ Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figure.figsize'][0] y = x / 7 fig, ax = plt.subplots(figsize=(x, y)) fig.tight_layout() # determine x axis limits if start is None: start = np.min(pos) if stop is None: stop = np.max(pos) loc = pos.locate_range(start, stop) pos = pos[loc] if step is None: step = len(pos) // 100 ax.set_xlim(start, stop) # plot the lines if line_kwargs is None: line_kwargs = dict() # line_kwargs.setdefault('linewidth', .5) n_variants = len(pos) for i, p in enumerate(pos[::step]): xfrom = p xto = ( start + ((i * step / n_variants) * (stop-start)) ) line = plt.Line2D([xfrom, xto], [0, 1], **line_kwargs) ax.add_line(line) # invert? if flip: ax.invert_yaxis() ax.xaxis.tick_top() else: ax.xaxis.tick_bottom() # tidy up ax.set_yticks([]) ax.xaxis.set_tick_params(direction='out') for spine in 'left', 'right': ax.spines[spine].set_visible(False) return ax def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items)) def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
0.919665
0.77343
import itertools import numpy as np from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, ensure_square from allel.stats.diversity import sequence_divergence from allel.chunked import get_blen_array def pairwise_distance(x, metric, chunked=False, blen=None): """Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]]) """ import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it's most efficient def f(b): # transpose as pdist expects (m, n) for m observations in an # n-dimensional space t = b.T # compute the distance matrix return scipy.spatial.distance.pdist(t, metric=metric) else: # use our own implementation, it handles multidimensional observations def f(b): return pdist(b, metric=metric) if chunked: # use block-wise implementation blen = get_blen_array(x, blen) dist = None for i in range(0, x.shape[0], blen): j = min(x.shape[0], i+blen) block = x[i:j] if dist is None: dist = f(block) else: dist += f(block) else: # standard implementation dist = f(x) return dist def pdist(x, metric): """Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form. """ if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): a = x[:, i, ...] b = x[:, j, ...] d = metric(a, b) dist.append(d) return np.array(dist) def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): """Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts """ if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist) def pcoa(dist): """Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension. """ import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix) def condensed_coords_within(pop, n): """Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)] def condensed_coords_between(pop1, pop2, n): """Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))] def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/distance.py
distance.py
import itertools import numpy as np from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, ensure_square from allel.stats.diversity import sequence_divergence from allel.chunked import get_blen_array def pairwise_distance(x, metric, chunked=False, blen=None): """Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]]) """ import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it's most efficient def f(b): # transpose as pdist expects (m, n) for m observations in an # n-dimensional space t = b.T # compute the distance matrix return scipy.spatial.distance.pdist(t, metric=metric) else: # use our own implementation, it handles multidimensional observations def f(b): return pdist(b, metric=metric) if chunked: # use block-wise implementation blen = get_blen_array(x, blen) dist = None for i in range(0, x.shape[0], blen): j = min(x.shape[0], i+blen) block = x[i:j] if dist is None: dist = f(block) else: dist += f(block) else: # standard implementation dist = f(x) return dist def pdist(x, metric): """Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form. """ if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): a = x[:, i, ...] b = x[:, j, ...] d = metric(a, b) dist.append(d) return np.array(dist) def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): """Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts """ if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist) def pcoa(dist): """Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension. """ import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix) def condensed_coords_within(pop, n): """Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)] def condensed_coords_between(pop1, pop2, n): """Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))] def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
0.915543
0.780328
from allel.model.ndarray import AlleleCountsArray from allel.util import asarray_ndim, check_dim0_aligned from allel.stats.window import moving_statistic from allel.stats.misc import jackknife import numpy as np def h_hat(ac): """Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics. """ # check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x def patterson_f2(aca, acb): """Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute allele numbers sa = aca.sum(axis=1) sb = acb.sum(axis=1) # compute heterozygosities ha = h_hat(aca) hb = h_hat(acb) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] # compute estimator x = ((a - b) ** 2) - (ha / sa) - (hb / sb) return x # noinspection PyPep8Naming def patterson_f3(acc, aca, acb): """Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc) # compute allele number and heterozygosity in test population sc = acc.sum(axis=1) hc = h_hat(acc) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] # compute estimator T = ((c - a) * (c - b)) - (hc / sc) B = 2 * hc return T, B def patterson_d(aca, acb, acc, acd): """Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den # noinspection PyPep8Naming def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True): """Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=size, start=start, stop=stop, step=step) B_bsum = moving_statistic(B, statistic=np.nansum, size=size, start=start, stop=stop, step=step) f3 = T_bsum / B_bsum else: f3 = moving_statistic(T, statistic=np.nanmean, size=size, start=start, stop=stop, step=step) return f3 def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None): """Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # compute the numerator and denominator within each window num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate the statistic values in each block d = num_sum / den_sum return d # noinspection PyPep8Naming def average_patterson_f3(acc, aca, acb, blen, normed=True): """Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3 """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall value of statistic if normed: f3 = np.nansum(T) / np.nansum(B) else: f3 = np.nanmean(T) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=blen) B_bsum = moving_statistic(B, statistic=np.nansum, size=blen) vb = T_bsum / B_bsum _, se, vj = jackknife((T_bsum, B_bsum), statistic=lambda t, b: np.sum(t) / np.sum(b)) else: vb = moving_statistic(T, statistic=np.nanmean, size=blen) _, se, vj = jackknife(vb, statistic=np.mean) # compute Z score z = f3 / se return f3, se, z, vb, vj def average_patterson_d(aca, acb, acc, acd, blen): """Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall estimate d_avg = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) # compute Z score z = d_avg / se return d_avg, se, z, vb, vj # backwards compatibility blockwise_patterson_f3 = average_patterson_f3 blockwise_patterson_d = average_patterson_d
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/admixture.py
admixture.py
from allel.model.ndarray import AlleleCountsArray from allel.util import asarray_ndim, check_dim0_aligned from allel.stats.window import moving_statistic from allel.stats.misc import jackknife import numpy as np def h_hat(ac): """Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics. """ # check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x def patterson_f2(aca, acb): """Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute allele numbers sa = aca.sum(axis=1) sb = acb.sum(axis=1) # compute heterozygosities ha = h_hat(aca) hb = h_hat(acb) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] # compute estimator x = ((a - b) ** 2) - (ha / sa) - (hb / sb) return x # noinspection PyPep8Naming def patterson_f3(acc, aca, acb): """Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc) # compute allele number and heterozygosity in test population sc = acc.sum(axis=1) hc = h_hat(acc) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] # compute estimator T = ((c - a) * (c - b)) - (hc / sc) B = 2 * hc return T, B def patterson_d(aca, acb, acc, acd): """Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den # noinspection PyPep8Naming def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True): """Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=size, start=start, stop=stop, step=step) B_bsum = moving_statistic(B, statistic=np.nansum, size=size, start=start, stop=stop, step=step) f3 = T_bsum / B_bsum else: f3 = moving_statistic(T, statistic=np.nanmean, size=size, start=start, stop=stop, step=step) return f3 def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None): """Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # compute the numerator and denominator within each window num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate the statistic values in each block d = num_sum / den_sum return d # noinspection PyPep8Naming def average_patterson_f3(acc, aca, acb, blen, normed=True): """Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3 """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall value of statistic if normed: f3 = np.nansum(T) / np.nansum(B) else: f3 = np.nanmean(T) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=blen) B_bsum = moving_statistic(B, statistic=np.nansum, size=blen) vb = T_bsum / B_bsum _, se, vj = jackknife((T_bsum, B_bsum), statistic=lambda t, b: np.sum(t) / np.sum(b)) else: vb = moving_statistic(T, statistic=np.nanmean, size=blen) _, se, vj = jackknife(vb, statistic=np.mean) # compute Z score z = f3 / se return f3, se, z, vb, vj def average_patterson_d(aca, acb, acc, acd, blen): """Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall estimate d_avg = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) # compute Z score z = d_avg / se return d_avg, se, z, vb, vj # backwards compatibility blockwise_patterson_f3 = average_patterson_f3 blockwise_patterson_d = average_patterson_d
0.91802
0.800107
import numpy as np from allel.model.ndarray import GenotypeVector from allel.util import asarray_ndim, check_dim0_aligned from allel.stats.misc import tabulate_state_blocks from allel.stats.window import equally_accessible_windows, windowed_statistic, position_windows def roh_mhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-6, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Multinomial HMM model. There are 3 observable states at each position in a chromosome/contig: 0 = Hom, 1 = Het, 2 = inaccessible (i.e., unobserved). The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. contig_size: int, optional If is_accessible not known/not provided, allows specification of total length of contig. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `hmmlearn <http://hmmlearn.readthedocs.io/en/latest/>`_ to be installed. This function currently requires around 4GB memory for a contig size of ~50Mbp. """ from hmmlearn import hmm # setup inputs if isinstance(phet_nonroh, float): phet_nonroh = phet_nonroh, gv = GenotypeVector(gv) pos = asarray_ndim(pos, 1) check_dim0_aligned(gv, pos) is_accessible = asarray_ndim(is_accessible, 1, dtype=bool, allow_none=True) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) # probability of inaccessible if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessible argument is not provided, you must provide `contig_size`") p_accessible = 1.0 else: p_accessible = is_accessible.mean() contig_size = is_accessible.size emission_mx = _mhmm_derive_emission_matrix(het_px, p_accessible) # initialize HMM # N.B., https://github.com/hmmlearn/hmmlearn/pull/429 roh_hmm = hmm.CategoricalHMM(n_components=het_px.size, n_features=3) roh_hmm.startprob_ = start_prob roh_hmm.transmat_ = transition_mx roh_hmm.emissionprob_ = emission_mx # locate heterozygous calls is_het = gv.is_het() # predict ROH state pred, obs = _mhmm_predict_roh_state(roh_hmm, is_het, pos, is_accessible, contig_size) # find ROH windows df_blocks = tabulate_state_blocks(pred, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH for col in 'state', 'support', 'start_lidx', 'stop_ridx', 'size_max': del df_roh[col] df_roh.rename(columns={'start_ridx': 'start', 'stop_lidx': 'stop', 'size_min': 'length'}, inplace=True) # make coordinates 1-based df_roh['start'] = df_roh['start'] + 1 df_roh['stop'] = df_roh['stop'] + 1 # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh, froh def _mhmm_predict_roh_state(model, is_het, pos, is_accessible, contig_size): # construct observations, one per position in contig observations = np.zeros((contig_size, 1), dtype='i1') # these are hets observations[np.compress(is_het, pos) - 1] = 1 # these are unobserved if is_accessible is not None: observations[~is_accessible] = 2 predictions = model.predict(X=observations) return predictions, observations def roh_poissonhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-3, window_size=1000, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Poisson HMM model. The chromosome is divided into equally accessible windows of specified size, then the number of hets observed in each is used to fit a Poisson HMM. Note this is much faster than `roh_mhmm`, but at the cost of some resolution. The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. This is based on windows, so a larger window size may call for a larger transitional probability window_size: integer, optional Window size (equally accessible bases) to consider as a potential ROH. Setting this window too small may result in spurious ROH calls, while too large will result in a lack of resolution. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. Although optional, highly recommended so invariant sites are distinguishable from sites where variation is inaccessible contig_size: integer, optional If is_accessible is not available, use this to specify the size of the contig, and assume all sites are accessible. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `pomegranate` (>= 0.9.0) to be installed. """ from pomegranate import HiddenMarkovModel, PoissonDistribution is_accessible = asarray_ndim(is_accessible, 1, dtype=bool, allow_none=True) # equally accessbile windows if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessible argument is not provided, you must provide `contig_size`") # given no accessibility provided use the standard window calculation roh_windows = position_windows( pos=pos, size=window_size, step=window_size, start=1, stop=contig_size) else: contig_size = is_accessible.size roh_windows = equally_accessible_windows(is_accessible, window_size) ishet = GenotypeVector(gv).is_het() counts, wins, records = windowed_statistic(pos, ishet, np.sum, windows=roh_windows) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) dists = [PoissonDistribution(x * window_size) for x in het_px] model = HiddenMarkovModel.from_matrix(transition_probabilities=transition_mx, distributions=dists, starts=start_prob) prediction = np.array(model.predict(counts[:, None])) df_blocks = tabulate_state_blocks(prediction, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH df_roh["start"] = df_roh.start_ridx.apply(lambda y: roh_windows[y, 0]) df_roh["stop"] = df_roh.stop_lidx.apply(lambda y: roh_windows[y, 1]) df_roh["length"] = df_roh.stop - df_roh.start # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh[["start", "stop", "length", "is_marginal"]], froh def _mhmm_derive_emission_matrix(het_px, p_accessible): # one row per p in prob # hom, het, unobserved mx = [[(1 - p) * p_accessible, p * p_accessible, 1 - p_accessible] for p in het_px] mx = np.array(mx) assert mx.shape == (het_px.size, 3) return mx def _hmm_derive_transition_matrix(transition, nstates): # this is a symmetric matrix mx = np.zeros((nstates, nstates)) effective_tp = transition / (nstates - 1) for i in range(nstates): for j in range(nstates): if i == j: mx[i, j] = 1 - transition else: mx[i, j] = effective_tp return mx
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/roh.py
roh.py
import numpy as np from allel.model.ndarray import GenotypeVector from allel.util import asarray_ndim, check_dim0_aligned from allel.stats.misc import tabulate_state_blocks from allel.stats.window import equally_accessible_windows, windowed_statistic, position_windows def roh_mhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-6, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Multinomial HMM model. There are 3 observable states at each position in a chromosome/contig: 0 = Hom, 1 = Het, 2 = inaccessible (i.e., unobserved). The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. contig_size: int, optional If is_accessible not known/not provided, allows specification of total length of contig. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `hmmlearn <http://hmmlearn.readthedocs.io/en/latest/>`_ to be installed. This function currently requires around 4GB memory for a contig size of ~50Mbp. """ from hmmlearn import hmm # setup inputs if isinstance(phet_nonroh, float): phet_nonroh = phet_nonroh, gv = GenotypeVector(gv) pos = asarray_ndim(pos, 1) check_dim0_aligned(gv, pos) is_accessible = asarray_ndim(is_accessible, 1, dtype=bool, allow_none=True) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) # probability of inaccessible if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessible argument is not provided, you must provide `contig_size`") p_accessible = 1.0 else: p_accessible = is_accessible.mean() contig_size = is_accessible.size emission_mx = _mhmm_derive_emission_matrix(het_px, p_accessible) # initialize HMM # N.B., https://github.com/hmmlearn/hmmlearn/pull/429 roh_hmm = hmm.CategoricalHMM(n_components=het_px.size, n_features=3) roh_hmm.startprob_ = start_prob roh_hmm.transmat_ = transition_mx roh_hmm.emissionprob_ = emission_mx # locate heterozygous calls is_het = gv.is_het() # predict ROH state pred, obs = _mhmm_predict_roh_state(roh_hmm, is_het, pos, is_accessible, contig_size) # find ROH windows df_blocks = tabulate_state_blocks(pred, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH for col in 'state', 'support', 'start_lidx', 'stop_ridx', 'size_max': del df_roh[col] df_roh.rename(columns={'start_ridx': 'start', 'stop_lidx': 'stop', 'size_min': 'length'}, inplace=True) # make coordinates 1-based df_roh['start'] = df_roh['start'] + 1 df_roh['stop'] = df_roh['stop'] + 1 # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh, froh def _mhmm_predict_roh_state(model, is_het, pos, is_accessible, contig_size): # construct observations, one per position in contig observations = np.zeros((contig_size, 1), dtype='i1') # these are hets observations[np.compress(is_het, pos) - 1] = 1 # these are unobserved if is_accessible is not None: observations[~is_accessible] = 2 predictions = model.predict(X=observations) return predictions, observations def roh_poissonhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-3, window_size=1000, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Poisson HMM model. The chromosome is divided into equally accessible windows of specified size, then the number of hets observed in each is used to fit a Poisson HMM. Note this is much faster than `roh_mhmm`, but at the cost of some resolution. The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. This is based on windows, so a larger window size may call for a larger transitional probability window_size: integer, optional Window size (equally accessible bases) to consider as a potential ROH. Setting this window too small may result in spurious ROH calls, while too large will result in a lack of resolution. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. Although optional, highly recommended so invariant sites are distinguishable from sites where variation is inaccessible contig_size: integer, optional If is_accessible is not available, use this to specify the size of the contig, and assume all sites are accessible. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `pomegranate` (>= 0.9.0) to be installed. """ from pomegranate import HiddenMarkovModel, PoissonDistribution is_accessible = asarray_ndim(is_accessible, 1, dtype=bool, allow_none=True) # equally accessbile windows if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessible argument is not provided, you must provide `contig_size`") # given no accessibility provided use the standard window calculation roh_windows = position_windows( pos=pos, size=window_size, step=window_size, start=1, stop=contig_size) else: contig_size = is_accessible.size roh_windows = equally_accessible_windows(is_accessible, window_size) ishet = GenotypeVector(gv).is_het() counts, wins, records = windowed_statistic(pos, ishet, np.sum, windows=roh_windows) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) dists = [PoissonDistribution(x * window_size) for x in het_px] model = HiddenMarkovModel.from_matrix(transition_probabilities=transition_mx, distributions=dists, starts=start_prob) prediction = np.array(model.predict(counts[:, None])) df_blocks = tabulate_state_blocks(prediction, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH df_roh["start"] = df_roh.start_ridx.apply(lambda y: roh_windows[y, 0]) df_roh["stop"] = df_roh.stop_lidx.apply(lambda y: roh_windows[y, 1]) df_roh["length"] = df_roh.stop - df_roh.start # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh[["start", "stop", "length", "is_marginal"]], froh def _mhmm_derive_emission_matrix(het_px, p_accessible): # one row per p in prob # hom, het, unobserved mx = [[(1 - p) * p_accessible, p * p_accessible, 1 - p_accessible] for p in het_px] mx = np.array(mx) assert mx.shape == (het_px.size, 3) return mx def _hmm_derive_transition_matrix(transition, nstates): # this is a symmetric matrix mx = np.zeros((nstates, nstates)) effective_tp = transition / (nstates - 1) for i in range(nstates): for j in range(nstates): if i == j: mx[i, j] = 1 - transition else: mx[i, j] = effective_tp return mx
0.890651
0.761627
import numpy as np from allel.stats.window import windowed_statistic from allel.util import asarray_ndim, ensure_square from allel.chunked import get_blen_array from allel.compat import memoryview_safe from allel.opt.stats import gn_pairwise_corrcoef_int8, gn_pairwise2_corrcoef_int8, \ gn_locate_unlinked_int8 def rogers_huff_r(gn): """Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32) """ # check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r def rogers_huff_r_between(gna, gnb): """Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form. """ # check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singletons if r.size == 1: r = r[0, 0] return r def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None): """Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008). """ # check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0], dtype='u1') # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(gn, blen) blen = max(blen, 10*size) # avoid too small chunks n_variants = gn.shape[0] for i in range(0, n_variants, blen): # N.B., ensure overlap with next window j = min(n_variants, i+blen+size) gnb = np.asarray(gn[i:j], dtype='i1') gnb = memoryview_safe(gnb) locb = loc[i:j] gn_locate_unlinked_int8(gnb, locb, size, step, threshold) return loc.astype('b1') def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50): """Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic """ # define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return [np.percentile(r_squared, p) for p in percentile] else: def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return np.percentile(r_squared, percentile) return windowed_statistic(pos, gn, statistic, size, start=start, stop=stop, step=step, windows=windows, fill=fill) def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): """Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/ld.py
ld.py
import numpy as np from allel.stats.window import windowed_statistic from allel.util import asarray_ndim, ensure_square from allel.chunked import get_blen_array from allel.compat import memoryview_safe from allel.opt.stats import gn_pairwise_corrcoef_int8, gn_pairwise2_corrcoef_int8, \ gn_locate_unlinked_int8 def rogers_huff_r(gn): """Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32) """ # check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r def rogers_huff_r_between(gna, gnb): """Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form. """ # check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singletons if r.size == 1: r = r[0, 0] return r def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None): """Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008). """ # check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0], dtype='u1') # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(gn, blen) blen = max(blen, 10*size) # avoid too small chunks n_variants = gn.shape[0] for i in range(0, n_variants, blen): # N.B., ensure overlap with next window j = min(n_variants, i+blen+size) gnb = np.asarray(gn[i:j], dtype='i1') gnb = memoryview_safe(gnb) locb = loc[i:j] gn_locate_unlinked_int8(gnb, locb, size, step, threshold) return loc.astype('b1') def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50): """Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic """ # define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return [np.percentile(r_squared, p) for p in percentile] else: def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return np.percentile(r_squared, percentile) return windowed_statistic(pos, gn, statistic, size, start=start, stop=stop, step=step, windows=windows, fill=fill) def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): """Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
0.894242
0.676697
import numpy as np from allel.util import asarray_ndim, check_integer_dtype def _check_dac_n(dac, n): dac = asarray_ndim(dac, 1) check_integer_dtype(dac) mx = np.max(dac) if n is None: n = mx elif n < mx: raise ValueError('number of chromosomes too small; expected {}, found {}' .format(n, mx)) return dac, int(n) def _check_ac_n(ac, n): ac = asarray_ndim(ac, 2) if ac.shape[1] != 2: raise ValueError('only biallelic variants are supported') check_integer_dtype(ac) mx = np.max(np.sum(ac, axis=1)) if n is None: n = mx elif n < mx: raise ValueError('number of chromosomes too small; expected {}, found {}' .format(n, mx)) return ac, int(n) def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s def sfs_scaled(dac, n=None): """Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k. """ # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out def sfs_folded_scaled(ac, n=None): """Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n). """ # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s def scale_sfs_folded(s, n): """Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum. """ k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out def joint_sfs(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s def joint_sfs_folded(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population. """ # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s def scale_joint_sfs(s): """Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum. """ i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population. """ # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s def scale_joint_sfs_folded(s, n1, n2): """Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum. """ # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out def fold_sfs(s, n): """Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum """ # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o def fold_joint_sfs(s, n1, n2): """Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum. """ # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax # noinspection PyIncorrectDocstring def plot_sfs_folded(*args, **kwargs): """Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax # noinspection PyIncorrectDocstring def plot_sfs_scaled(*args, **kwargs): """Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax # noinspection PyIncorrectDocstring def plot_sfs_folded_scaled(*args, **kwargs): """Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax def plot_joint_sfs(s, ax=None, imshow_kwargs=None): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # check inputs s = asarray_ndim(s, 2) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(w, w)) # set plotting defaults if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('aspect', 'auto') imshow_kwargs.setdefault('norm', LogNorm()) # plot data ax.imshow(s.T, **imshow_kwargs) # tidy ax.invert_yaxis() ax.set_xlabel('derived allele count (population 1)') ax.set_ylabel('derived allele count (population 2)') return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_folded(*args, **kwargs): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_joint_sfs(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_folded_scaled(*args, **kwargs): """Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/sf.py
sf.py
import numpy as np from allel.util import asarray_ndim, check_integer_dtype def _check_dac_n(dac, n): dac = asarray_ndim(dac, 1) check_integer_dtype(dac) mx = np.max(dac) if n is None: n = mx elif n < mx: raise ValueError('number of chromosomes too small; expected {}, found {}' .format(n, mx)) return dac, int(n) def _check_ac_n(ac, n): ac = asarray_ndim(ac, 2) if ac.shape[1] != 2: raise ValueError('only biallelic variants are supported') check_integer_dtype(ac) mx = np.max(np.sum(ac, axis=1)) if n is None: n = mx elif n < mx: raise ValueError('number of chromosomes too small; expected {}, found {}' .format(n, mx)) return ac, int(n) def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s def sfs_scaled(dac, n=None): """Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k. """ # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out def sfs_folded_scaled(ac, n=None): """Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n). """ # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s def scale_sfs_folded(s, n): """Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum. """ k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out def joint_sfs(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s def joint_sfs_folded(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population. """ # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s def scale_joint_sfs(s): """Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum. """ i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population. """ # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s def scale_joint_sfs_folded(s, n1, n2): """Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum. """ # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out def fold_sfs(s, n): """Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum """ # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o def fold_joint_sfs(s, n1, n2): """Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum. """ # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax # noinspection PyIncorrectDocstring def plot_sfs_folded(*args, **kwargs): """Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax # noinspection PyIncorrectDocstring def plot_sfs_scaled(*args, **kwargs): """Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax # noinspection PyIncorrectDocstring def plot_sfs_folded_scaled(*args, **kwargs): """Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax def plot_joint_sfs(s, ax=None, imshow_kwargs=None): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # check inputs s = asarray_ndim(s, 2) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(w, w)) # set plotting defaults if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('aspect', 'auto') imshow_kwargs.setdefault('norm', LogNorm()) # plot data ax.imshow(s.T, **imshow_kwargs) # tidy ax.invert_yaxis() ax.set_xlabel('derived allele count (population 1)') ax.set_ylabel('derived allele count (population 2)') return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_folded(*args, **kwargs): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_joint_sfs(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax # noinspection PyIncorrectDocstring def plot_joint_sfs_folded_scaled(*args, **kwargs): """Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
0.873714
0.639342
import numpy as np from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, ignore_invalid, check_equal_length def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): """Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24]) """ windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out def moving_mean(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=np.mean, size=size, start=start, stop=stop, step=step) def moving_std(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=np.std, size=size, start=start, stop=stop, step=step) def moving_midpoint(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=lambda v: (v[0] + v[-1])/2, size=size, start=start, stop=stop, step=step) def index_windows(values, size, start, stop, step): """Convenience function to construct windows for the :func:`moving_statistic` function. """ # determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size if window_stop > stop: # ensure all windows are equal sized return yield (window_start, window_stop) def position_windows(pos, size, start, stop, step): """Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions. """ last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(start, stop, step): # determine window stop window_stop = window_start + size if window_stop >= stop: # last window window_stop = stop last = True else: window_stop -= 1 windows.append([window_start, window_stop]) if last: break return np.asarray(windows) def window_locations(pos, windows): """Locate indices in `pos` corresponding to the start and stop positions of `windows`. """ start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None): """Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # count number of items in each window counts = np.diff(locs, axis=1).reshape(-1) return counts, windows def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tuple): # assume multiple values arrays check_equal_length(pos, *values) else: # assume a single values array check_equal_length(pos, values) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # setup outputs out = [] counts = [] # iterate over windows for start_idx, stop_idx in locs: # calculate number of values in window n = stop_idx - start_idx if n == 0: # window is empty s = fill else: if isinstance(values, tuple): # assume multiple values arrays wv = [v[start_idx:stop_idx] for v in values] s = statistic(*wv) else: # assume a single values array wv = values[start_idx:stop_idx] s = statistic(wv) # store outputs out.append(s) counts.append(n) # convert to arrays for output return np.asarray(out), windows, np.asarray(counts) def per_base(x, windows, is_accessible=None, fill=np.nan): """Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window """ # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): """Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based). """ pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/stats/window.py
window.py
import numpy as np from allel.model.ndarray import SortedIndex from allel.util import asarray_ndim, ignore_invalid, check_equal_length def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): """Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24]) """ windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out def moving_mean(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=np.mean, size=size, start=start, stop=stop, step=step) def moving_std(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=np.std, size=size, start=start, stop=stop, step=step) def moving_midpoint(values, size, start=0, stop=None, step=None): return moving_statistic(values, statistic=lambda v: (v[0] + v[-1])/2, size=size, start=start, stop=stop, step=step) def index_windows(values, size, start, stop, step): """Convenience function to construct windows for the :func:`moving_statistic` function. """ # determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size if window_stop > stop: # ensure all windows are equal sized return yield (window_start, window_stop) def position_windows(pos, size, start, stop, step): """Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions. """ last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(start, stop, step): # determine window stop window_stop = window_start + size if window_stop >= stop: # last window window_stop = stop last = True else: window_stop -= 1 windows.append([window_start, window_stop]) if last: break return np.asarray(windows) def window_locations(pos, windows): """Locate indices in `pos` corresponding to the start and stop positions of `windows`. """ start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None): """Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # count number of items in each window counts = np.diff(locs, axis=1).reshape(-1) return counts, windows def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tuple): # assume multiple values arrays check_equal_length(pos, *values) else: # assume a single values array check_equal_length(pos, values) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # setup outputs out = [] counts = [] # iterate over windows for start_idx, stop_idx in locs: # calculate number of values in window n = stop_idx - start_idx if n == 0: # window is empty s = fill else: if isinstance(values, tuple): # assume multiple values arrays wv = [v[start_idx:stop_idx] for v in values] s = statistic(*wv) else: # assume a single values array wv = values[start_idx:stop_idx] s = statistic(wv) # store outputs out.append(s) counts.append(n) # convert to arrays for output return np.asarray(out), windows, np.asarray(counts) def per_base(x, windows, is_accessible=None, fill=np.nan): """Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window """ # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): """Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based). """ pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
0.916893
0.738245
import numpy as np # internal imports from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned __all__ = ['create_allele_mapping', 'locate_private_alleles', 'locate_fixed_differences', 'sample_to_haplotype_selection'] def create_allele_mapping(ref, alt, alleles, dtype='i1'): """Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles """ ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out def locate_fixed_differences(ac1, ac2): """Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True]) """ # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles def locate_private_alleles(*acs): """Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True]) """ # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa def sample_to_haplotype_selection(indices, ploidy): return [(i * ploidy) + n for i in indices for n in range(ploidy)]
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/model/util.py
util.py
import numpy as np # internal imports from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned __all__ = ['create_allele_mapping', 'locate_private_alleles', 'locate_fixed_differences', 'sample_to_haplotype_selection'] def create_allele_mapping(ref, alt, alleles, dtype='i1'): """Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles """ ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out def locate_fixed_differences(ac1, ac2): """Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True]) """ # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles def locate_private_alleles(*acs): """Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True]) """ # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa def sample_to_haplotype_selection(indices, ploidy): return [(i * ploidy) + n for i in indices for n in range(ploidy)]
0.842053
0.557243
import numpy as np # internal imports from allel.util import contains_newaxis, check_ndim def index_genotype_vector(g, item, cls): # apply indexing operation on underlying values out = g.values[item] # decide whether to wrap the result wrap = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved out.shape[1] == g.shape[1] and # ploidy preserved not contains_newaxis(item) ) if wrap: out = cls(out) if g.mask is not None: out.mask = g.mask[item] if g.is_phased is not None: out.is_phased = g.is_phased[item] return out def index_genotype_array(g, item, array_cls, vector_cls): # apply indexing operation to underlying values out = g.values[item] # decide whether to wrap the output, if so how wrap_array = ( hasattr(out, 'ndim') and out.ndim == 3 and # dimensionality preserved out.shape[2] == g.shape[2] and # ploidy preserved not contains_newaxis(item) ) wrap_vector = ( # single row selection isinstance(item, int) or ( # other way to make a single row selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], (slice, list, np.ndarray, type(Ellipsis))) ) or ( # single column selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], (slice, list, np.ndarray)) and isinstance(item[1], int) ) ) if wrap_array: out = array_cls(out) if wrap_vector: out = vector_cls(out) if wrap_array or wrap_vector: if g.mask is not None: out.mask = g.mask[item] if g.is_phased is not None: out.is_phased = g.is_phased[item] return out def index_genotype_ac_vector(g, item, cls): # apply indexing operation on underlying values out = g.values[item] # decide whether to wrap the result wrap = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved out.shape[1] == g.shape[1] and # alleles preserved not contains_newaxis(item) ) if wrap: out = cls(out) return out def index_genotype_ac_array(g, item, array_cls, vector_cls): # apply indexing operation to underlying values out = g.values[item] # decide whether to wrap the output, if so how wrap_array = ( hasattr(out, 'ndim') and out.ndim == 3 and # dimensionality preserved out.shape[2] == g.shape[2] and # alleles preserved not contains_newaxis(item) ) wrap_vector = ( # single row selection isinstance(item, int) or ( # other way to make a single row selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], (slice, list, np.ndarray, type(Ellipsis))) ) or ( # single column selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], (slice, list, np.ndarray)) and isinstance(item[1], int) ) ) if wrap_array: out = array_cls(out) if wrap_vector: out = vector_cls(out) return out def index_haplotype_array(h, item, cls): # apply indexing operation on underlying values out = h.values[item] # decide whether to wrap the result as HaplotypeArray wrap_array = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved not contains_newaxis(item) ) if wrap_array: out = cls(out) return out def index_allele_counts_array(ac, item, cls): # apply indexing operation on underlying values out = ac.values[item] # decide whether to wrap the result as HaplotypeArray wrap_array = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved ac.shape[1] == out.shape[1] and # number of alleles preserved not contains_newaxis(item) ) if wrap_array: out = cls(out) return out def _check_condition_length(a, condition, axis): # check the length of the condition array - here we deviate from numpy behaviour, # because numpy allows condition to be shorter than the axis under selection, # however we've found this allows mistakes to creep through and so we'll be stricter here if axis is not None: expected_length = a.shape[axis] k = condition.shape[0] if k != expected_length: raise ValueError('bad length of condition; expected %s, found %s' % (expected_length, k)) def compress_genotypes(g, condition, axis, wrap_axes, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(g, condition, axis) # apply compress operation on the underlying values out = compress(condition, g.values, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: out.mask = compress(condition, g.mask, axis=axis, **kwargs) if g.is_phased is not None: out.is_phased = compress(condition, g.is_phased, axis=axis, **kwargs) return out def take_genotypes(g, indices, axis, wrap_axes, cls, take, **kwargs): # apply compress operation on the underlying values out = take(g.values, indices, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: out.mask = take(g.mask, indices, axis=axis, **kwargs) if g.is_phased is not None: out.is_phased = take(g.is_phased, indices, axis=axis, **kwargs) return out def concatenate_genotypes(g, others, axis, wrap_axes, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, # apply the concatenate operation on the underlying values tup = (g.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: tup = (g.mask,) + tuple(o.mask for o in others) out.mask = concatenate(tup, axis=axis, **kwargs) if g.is_phased is not None: tup = (g.is_phased,) + tuple(o.is_phased for o in others) out.is_phased = concatenate(tup, axis=axis, **kwargs) return out def subset_genotype_array(g, sel0, sel1, cls, subset, **kwargs): # apply the subset operation out = subset(g.values, sel0, sel1, **kwargs) # wrap the output out = cls(out) if g.mask is not None: out.mask = subset(g.mask, sel0, sel1, **kwargs) if g.is_phased is not None: out.is_phased = subset(g.is_phased, sel0, sel1, **kwargs) return out def compress_haplotype_array(h, condition, axis, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(h, condition, axis) out = compress(condition, h.values, axis=axis, **kwargs) return cls(out) def take_haplotype_array(h, indices, axis, cls, take, **kwargs): out = take(h.values, indices, axis=axis, **kwargs) return cls(out) def subset_haplotype_array(h, sel0, sel1, cls, subset, **kwargs): out = subset(h.values, sel0, sel1, **kwargs) return cls(out) def concatenate_haplotype_array(h, others, axis, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (h.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) out = cls(out) return out def compress_allele_counts_array(ac, condition, axis, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(ac, condition, axis) out = compress(condition, ac.values, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def take_allele_counts_array(ac, indices, axis, cls, take, **kwargs): out = take(ac.values, indices, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def concatenate_allele_counts_array(ac, others, axis, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (ac.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def compress_genotype_ac(g, condition, axis, wrap_axes, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(g, condition, axis) out = compress(condition, g.values, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def take_genotype_ac(g, indices, axis, wrap_axes, cls, take, **kwargs): out = take(g.values, indices, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def concatenate_genotype_ac(g, others, axis, wrap_axes, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (g.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def subset_genotype_ac_array(g, sel0, sel1, cls, subset, **kwargs): out = subset(g.values, sel0, sel1, **kwargs) out = cls(out) return out
scikit-allel
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/model/generic.py
generic.py
import numpy as np # internal imports from allel.util import contains_newaxis, check_ndim def index_genotype_vector(g, item, cls): # apply indexing operation on underlying values out = g.values[item] # decide whether to wrap the result wrap = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved out.shape[1] == g.shape[1] and # ploidy preserved not contains_newaxis(item) ) if wrap: out = cls(out) if g.mask is not None: out.mask = g.mask[item] if g.is_phased is not None: out.is_phased = g.is_phased[item] return out def index_genotype_array(g, item, array_cls, vector_cls): # apply indexing operation to underlying values out = g.values[item] # decide whether to wrap the output, if so how wrap_array = ( hasattr(out, 'ndim') and out.ndim == 3 and # dimensionality preserved out.shape[2] == g.shape[2] and # ploidy preserved not contains_newaxis(item) ) wrap_vector = ( # single row selection isinstance(item, int) or ( # other way to make a single row selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], (slice, list, np.ndarray, type(Ellipsis))) ) or ( # single column selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], (slice, list, np.ndarray)) and isinstance(item[1], int) ) ) if wrap_array: out = array_cls(out) if wrap_vector: out = vector_cls(out) if wrap_array or wrap_vector: if g.mask is not None: out.mask = g.mask[item] if g.is_phased is not None: out.is_phased = g.is_phased[item] return out def index_genotype_ac_vector(g, item, cls): # apply indexing operation on underlying values out = g.values[item] # decide whether to wrap the result wrap = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved out.shape[1] == g.shape[1] and # alleles preserved not contains_newaxis(item) ) if wrap: out = cls(out) return out def index_genotype_ac_array(g, item, array_cls, vector_cls): # apply indexing operation to underlying values out = g.values[item] # decide whether to wrap the output, if so how wrap_array = ( hasattr(out, 'ndim') and out.ndim == 3 and # dimensionality preserved out.shape[2] == g.shape[2] and # alleles preserved not contains_newaxis(item) ) wrap_vector = ( # single row selection isinstance(item, int) or ( # other way to make a single row selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], (slice, list, np.ndarray, type(Ellipsis))) ) or ( # single column selection isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], (slice, list, np.ndarray)) and isinstance(item[1], int) ) ) if wrap_array: out = array_cls(out) if wrap_vector: out = vector_cls(out) return out def index_haplotype_array(h, item, cls): # apply indexing operation on underlying values out = h.values[item] # decide whether to wrap the result as HaplotypeArray wrap_array = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved not contains_newaxis(item) ) if wrap_array: out = cls(out) return out def index_allele_counts_array(ac, item, cls): # apply indexing operation on underlying values out = ac.values[item] # decide whether to wrap the result as HaplotypeArray wrap_array = ( hasattr(out, 'ndim') and out.ndim == 2 and # dimensionality preserved ac.shape[1] == out.shape[1] and # number of alleles preserved not contains_newaxis(item) ) if wrap_array: out = cls(out) return out def _check_condition_length(a, condition, axis): # check the length of the condition array - here we deviate from numpy behaviour, # because numpy allows condition to be shorter than the axis under selection, # however we've found this allows mistakes to creep through and so we'll be stricter here if axis is not None: expected_length = a.shape[axis] k = condition.shape[0] if k != expected_length: raise ValueError('bad length of condition; expected %s, found %s' % (expected_length, k)) def compress_genotypes(g, condition, axis, wrap_axes, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(g, condition, axis) # apply compress operation on the underlying values out = compress(condition, g.values, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: out.mask = compress(condition, g.mask, axis=axis, **kwargs) if g.is_phased is not None: out.is_phased = compress(condition, g.is_phased, axis=axis, **kwargs) return out def take_genotypes(g, indices, axis, wrap_axes, cls, take, **kwargs): # apply compress operation on the underlying values out = take(g.values, indices, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: out.mask = take(g.mask, indices, axis=axis, **kwargs) if g.is_phased is not None: out.is_phased = take(g.is_phased, indices, axis=axis, **kwargs) return out def concatenate_genotypes(g, others, axis, wrap_axes, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, # apply the concatenate operation on the underlying values tup = (g.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) if g.mask is not None: tup = (g.mask,) + tuple(o.mask for o in others) out.mask = concatenate(tup, axis=axis, **kwargs) if g.is_phased is not None: tup = (g.is_phased,) + tuple(o.is_phased for o in others) out.is_phased = concatenate(tup, axis=axis, **kwargs) return out def subset_genotype_array(g, sel0, sel1, cls, subset, **kwargs): # apply the subset operation out = subset(g.values, sel0, sel1, **kwargs) # wrap the output out = cls(out) if g.mask is not None: out.mask = subset(g.mask, sel0, sel1, **kwargs) if g.is_phased is not None: out.is_phased = subset(g.is_phased, sel0, sel1, **kwargs) return out def compress_haplotype_array(h, condition, axis, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(h, condition, axis) out = compress(condition, h.values, axis=axis, **kwargs) return cls(out) def take_haplotype_array(h, indices, axis, cls, take, **kwargs): out = take(h.values, indices, axis=axis, **kwargs) return cls(out) def subset_haplotype_array(h, sel0, sel1, cls, subset, **kwargs): out = subset(h.values, sel0, sel1, **kwargs) return cls(out) def concatenate_haplotype_array(h, others, axis, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (h.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) out = cls(out) return out def compress_allele_counts_array(ac, condition, axis, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(ac, condition, axis) out = compress(condition, ac.values, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def take_allele_counts_array(ac, indices, axis, cls, take, **kwargs): out = take(ac.values, indices, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def concatenate_allele_counts_array(ac, others, axis, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (ac.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis == 0: out = cls(out) return out def compress_genotype_ac(g, condition, axis, wrap_axes, cls, compress, **kwargs): condition = np.asarray(condition, dtype=bool) check_ndim(condition, 1) _check_condition_length(g, condition, axis) out = compress(condition, g.values, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def take_genotype_ac(g, indices, axis, wrap_axes, cls, take, **kwargs): out = take(g.values, indices, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def concatenate_genotype_ac(g, others, axis, wrap_axes, cls, concatenate, **kwargs): if not isinstance(others, (tuple, list)): others = others, tup = (g.values,) + tuple(o.values for o in others) out = concatenate(tup, axis=axis, **kwargs) if axis in wrap_axes: out = cls(out) return out def subset_genotype_ac_array(g, sel0, sel1, cls, subset, **kwargs): out = subset(g.values, sel0, sel1, **kwargs) out = cls(out) return out
0.682362
0.627552
from matplotlib import pyplot as plt import numpy as np from sklearn import preprocessing from .animator import Animator class SGDRegressorAnimator(Animator): def __animation_init(self): self.ax.set_title("Degree "+str(self.deg)+" Polynomial Regression") def __animation_update(self): self.ax.scatter(self.x[:, 0], self.y, color='red') for i in range(10): self.model.partial_fit(self.scaled_x,self.y) myline = np.linspace(np.amin(self.x[:, 0]), np.amax(self.x[:, 0]), 100).reshape(-1,1) myline_scaled = preprocessing.PolynomialFeatures(degree=self.deg, include_bias=False).fit_transform(myline) myline_scaled = preprocessing.StandardScaler().fit(self.x).transform(myline_scaled) preds = self.model.predict(myline_scaled) plt.plot(myline, preds, color='blue',) self.camera.snap() def __animation_init_wc(self): self.ax[0].set_title("Degree "+str(self.deg)+" Polynomial Regression") self.ax[1].set_title("Learning Curve") self.ax[1].set_xlabel("Iteration") self.ax[1].set_ylabel("Score") self.ax[1].set_ylim([0, 1]) def __animation_update_wc(self): self.ax[0].scatter(self.x[:, 0], self.y, color='red') for i in range(10): self.model.partial_fit(self.scaled_x,self.y) myline = np.linspace(np.amin(self.x[:, 0]), np.amax(self.x[:, 0]), 100).reshape(-1,1) myline_scaled = preprocessing.PolynomialFeatures(degree=self.deg, include_bias=False).fit_transform(myline) myline_scaled = preprocessing.StandardScaler().fit(self.x).transform(myline_scaled) preds = self.model.predict(myline_scaled) self.ax[0].plot(myline, preds, color='blue') score = self.model.score(self.scaled_x, self.y) self.score_list.append(score) self.counter += 1 self.ax[1].plot(list(range(self.counter)), self.score_list, color="blue") self.camera.snap() def animate(self): if self.animate_cost: self.__animation_init_wc() for i in range(500): self.__animation_update_wc() self.animation = self.camera.animate(interval = 10, repeat = True, repeat_delay = 500) plt.show() else: self.__animation_init() for i in range(1000): self.__animation_update() self.animation = self.camera.animate(interval = 1, repeat = True, repeat_delay = 500) plt.show() def save(self, name:str, format:str="mp4"): if self.animate_cost: self.__animation_init_wc() for i in range(500): self.__animation_update_wc() self.animation = self.camera.animate(interval = 10, repeat = True, repeat_delay = 500) else: self.__animation_init() for i in range(1000): self.__animation_update() self.animation = self.camera.animate(interval = 1, repeat = True, repeat_delay = 500) filename = name + "." + format print("Creating animation...") self.animation.save(filename) print("Animation Saved")
scikit-animation
/scikit_animation-0.1.0-py3-none-any.whl/scikit_animation/sgdregressor_animator.py
sgdregressor_animator.py
from matplotlib import pyplot as plt import numpy as np from sklearn import preprocessing from .animator import Animator class SGDRegressorAnimator(Animator): def __animation_init(self): self.ax.set_title("Degree "+str(self.deg)+" Polynomial Regression") def __animation_update(self): self.ax.scatter(self.x[:, 0], self.y, color='red') for i in range(10): self.model.partial_fit(self.scaled_x,self.y) myline = np.linspace(np.amin(self.x[:, 0]), np.amax(self.x[:, 0]), 100).reshape(-1,1) myline_scaled = preprocessing.PolynomialFeatures(degree=self.deg, include_bias=False).fit_transform(myline) myline_scaled = preprocessing.StandardScaler().fit(self.x).transform(myline_scaled) preds = self.model.predict(myline_scaled) plt.plot(myline, preds, color='blue',) self.camera.snap() def __animation_init_wc(self): self.ax[0].set_title("Degree "+str(self.deg)+" Polynomial Regression") self.ax[1].set_title("Learning Curve") self.ax[1].set_xlabel("Iteration") self.ax[1].set_ylabel("Score") self.ax[1].set_ylim([0, 1]) def __animation_update_wc(self): self.ax[0].scatter(self.x[:, 0], self.y, color='red') for i in range(10): self.model.partial_fit(self.scaled_x,self.y) myline = np.linspace(np.amin(self.x[:, 0]), np.amax(self.x[:, 0]), 100).reshape(-1,1) myline_scaled = preprocessing.PolynomialFeatures(degree=self.deg, include_bias=False).fit_transform(myline) myline_scaled = preprocessing.StandardScaler().fit(self.x).transform(myline_scaled) preds = self.model.predict(myline_scaled) self.ax[0].plot(myline, preds, color='blue') score = self.model.score(self.scaled_x, self.y) self.score_list.append(score) self.counter += 1 self.ax[1].plot(list(range(self.counter)), self.score_list, color="blue") self.camera.snap() def animate(self): if self.animate_cost: self.__animation_init_wc() for i in range(500): self.__animation_update_wc() self.animation = self.camera.animate(interval = 10, repeat = True, repeat_delay = 500) plt.show() else: self.__animation_init() for i in range(1000): self.__animation_update() self.animation = self.camera.animate(interval = 1, repeat = True, repeat_delay = 500) plt.show() def save(self, name:str, format:str="mp4"): if self.animate_cost: self.__animation_init_wc() for i in range(500): self.__animation_update_wc() self.animation = self.camera.animate(interval = 10, repeat = True, repeat_delay = 500) else: self.__animation_init() for i in range(1000): self.__animation_update() self.animation = self.camera.animate(interval = 1, repeat = True, repeat_delay = 500) filename = name + "." + format print("Creating animation...") self.animation.save(filename) print("Animation Saved")
0.385953
0.481454
from matplotlib import pyplot as plt import numpy as np from sklearn import preprocessing from .animator import Animator class SGDClassifierAnimator(Animator): def __animation_init(self): self.ax.set_title("Degree "+str(self.deg)+" Polynomial Classification") self.score = 0.0 def __animation_update(self): labels = np.unique(self.y) self.model.partial_fit(self.x, self.y, labels) self.score = self.model.score(self.x, self.y) self.dict = dict.fromkeys(labels) for val in self.dict: self.dict[val] = [] for i in range(0,self.x.shape[0]): self.dict[self.y[i]].append(self.x[i]) colors = list("rgbcmyk") for x in self.dict.values(): x = np.array(x) plt.scatter(x[:, 0],x[:, 1],color=colors.pop()) h = .02 # step size in the mesh x_min, x_max = self.x[:, 0].min() - 1, self.x[:, 0].max() + 1 y_min, y_max = self.x[:, 1].min() - 1, self.x[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) xxx = preprocessing.PolynomialFeatures(degree=2, include_bias=False).fit_transform(xx) yyy = preprocessing.PolynomialFeatures(degree=2, include_bias=False).fit_transform(yy) Z = self.model.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, cmap=plt.cm.Paired) self.camera.snap() def animate(self): self.__animation_init() for i in range(100): self.__animation_update() self.animation = self.camera.animate(interval = 100, repeat = False, repeat_delay = 500) plt.show() print(self.score) def save(self, name:str, format:str="mp4"): self.__animation_init() for i in range(1000): self.__animation_update() animation = self.camera.animate(interval = 40, repeat = True, repeat_delay = 500) filename = name + "." + format print("Creating animation...") animation.save(filename) print("Animation Saved")
scikit-animation
/scikit_animation-0.1.0-py3-none-any.whl/scikit_animation/sgdclassifier_animator.py
sgdclassifier_animator.py
from matplotlib import pyplot as plt import numpy as np from sklearn import preprocessing from .animator import Animator class SGDClassifierAnimator(Animator): def __animation_init(self): self.ax.set_title("Degree "+str(self.deg)+" Polynomial Classification") self.score = 0.0 def __animation_update(self): labels = np.unique(self.y) self.model.partial_fit(self.x, self.y, labels) self.score = self.model.score(self.x, self.y) self.dict = dict.fromkeys(labels) for val in self.dict: self.dict[val] = [] for i in range(0,self.x.shape[0]): self.dict[self.y[i]].append(self.x[i]) colors = list("rgbcmyk") for x in self.dict.values(): x = np.array(x) plt.scatter(x[:, 0],x[:, 1],color=colors.pop()) h = .02 # step size in the mesh x_min, x_max = self.x[:, 0].min() - 1, self.x[:, 0].max() + 1 y_min, y_max = self.x[:, 1].min() - 1, self.x[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) xxx = preprocessing.PolynomialFeatures(degree=2, include_bias=False).fit_transform(xx) yyy = preprocessing.PolynomialFeatures(degree=2, include_bias=False).fit_transform(yy) Z = self.model.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, cmap=plt.cm.Paired) self.camera.snap() def animate(self): self.__animation_init() for i in range(100): self.__animation_update() self.animation = self.camera.animate(interval = 100, repeat = False, repeat_delay = 500) plt.show() print(self.score) def save(self, name:str, format:str="mp4"): self.__animation_init() for i in range(1000): self.__animation_update() animation = self.camera.animate(interval = 40, repeat = True, repeat_delay = 500) filename = name + "." + format print("Creating animation...") animation.save(filename) print("Animation Saved")
0.624637
0.447762
"""Validate if an input is one of the allowed named object formats.""" import collections.abc from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union, overload, ) from skbase.base import BaseObject __all__: List[str] = [ "check_sequence_named_objects", "is_named_object_tuple", "is_sequence_named_objects", ] __author__: List[str] = ["RNKuhns"] def _named_baseobject_error_msg( sequence_name: Optional[str] = None, allow_dict: bool = True ): """Create error message for non-comformance with named BaseObject api.""" name_str = f"{sequence_name}" if sequence_name is not None else "Input" allowed_types = "a sequence of (string name, BaseObject instance) tuples" if allow_dict: allowed_types += " or dict[str, BaseObject instance]" msg = f"Invalid {name_str!r}, {name_str!r} should be {allowed_types}." return msg def is_named_object_tuple( obj: Any, object_type: Optional[Union[type, Tuple[type, ...]]] = None ) -> bool: """Indicate if input is a a tuple of format (str, `object_type`). Used to validate that input follows named object tuple API format. Parameters ---------- obj : Any The object to be checked to see if it is a (str, `object_type`) tuple. object_type : class or tuple of class, default=BaseObject Class(es) that all objects are checked to be an instance of. If None, then :class:``skbase.base.BaseObject`` is used as default. Returns ------- bool True if obj is (str, `object_type`) tuple, otherwise False. See Also -------- is_sequence_named_objects : Indicate (True/False) if an input sequence follows the named object API. check_sequence_named_objects : Validate input to see if it follows sequence of named objects API. An error is raised for input that does not conform to the API format. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import is_named_object_tuple Default checks for object to be an instance of BaseOBject >>> is_named_object_tuple(("Step 1", BaseObject())) True >>> is_named_object_tuple(("Step 2", BaseEstimator())) True If a different `object_type` is provided then it is used in the isinstance check >>> is_named_object_tuple(("Step 1", BaseObject()), object_type=BaseEstimator) False >>> is_named_object_tuple(("Step 1", BaseEstimator()), object_type=BaseEstimator) True If the input is does not follow named object tuple format then False is returned >>> is_named_object_tuple({"Step 1": BaseEstimator()}) False >>> is_named_object_tuple((1, BaseObject())) False """ if object_type is None: object_type = BaseObject if not isinstance(obj, tuple) or len(obj) != 2: return False if not isinstance(obj[0], str) or not isinstance(obj[1], object_type): return False return True def is_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, ) -> bool: """Indicate if input is a sequence of named BaseObject instances. This can be a sequence of (str, BaseObject instance) tuples or a dictionary with string names as keys and BaseObject instances as values (if ``allow_dict=True``). Parameters ---------- seq_to_check : Sequence((str, BaseObject)) or Dict[str, BaseObject] The input to check for conformance with the named object interface. Conforming input are: - Sequence that contains (str, BaseObject instance) tuples - Dictionary with string names as keys and BaseObject instances as values if ``allow_dict=True`` allow_dict : bool, default=True Whether a dictionary of named objects is allowed as conforming named object type. - If True, then a dictionary with string keys and BaseObject instances is allowed format for providing a sequence of named objects. - If False, then only sequences that contain (str, BaseObject instance) tuples are considered conforming with the named object parameter API. require_unique_names : bool, default=False Whether names used in the sequence of named BaseObject instances must be unique. - If True and the names are not unique, then False is always returned. - If False, then whether or not the function returns True or False depends on whether `seq_to_check` follows sequence of named BaseObject format. object_type : class or tuple[class], default=None The class type(s) that is used to ensure that all elements of named objects match the expected type. Returns ------- bool Whether the input `seq_to_check` is a sequence that follows the API for nameed base object instances. Raises ------ ValueError If `seq_to_check` is not a sequence or ``allow_dict is False`` and `seq_to_check` is a dictionary. See Also -------- is_named_object_tuple : Indicate (True/False) if input follows the named object API format for a single named object (e.g., tupe[str, expected class type]). check_sequence_named_objects : Validate input to see if it follows sequence of named objects API. An error is raised for input that does not conform to the API format. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import is_sequence_named_objects >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> is_sequence_named_objects(named_objects) True Dictionaries are optionally allowed as sequences of named BaseObjects >>> dict_named_objects = {"Step 1": BaseObject(), "Step 2": BaseObject()} >>> is_sequence_named_objects(dict_named_objects) True >>> is_sequence_named_objects(dict_named_objects, allow_dict=False) False Invalid format due to object names not being strings >>> incorrectly_named_objects = [(1, BaseObject()), (2, BaseObject())] >>> is_sequence_named_objects(incorrectly_named_objects) False Invalid format due to named items not being BaseObject instances >>> named_items = [("1", 7), ("2", 42)] >>> is_sequence_named_objects(named_items) False The validation can require the object elements to be a certain class type >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> is_sequence_named_objects(named_objects, object_type=BaseEstimator) False >>> named_objects = [("Step 1", BaseEstimator()), ("Step 2", BaseEstimator())] >>> is_sequence_named_objects(named_objects, object_type=BaseEstimator) True """ # Want to end quickly if the input isn't sequence or is a dict and we # aren't allowing dicts if object_type is None: object_type = BaseObject is_dict = isinstance(seq_to_check, dict) if (not is_dict and not isinstance(seq_to_check, collections.abc.Sequence)) or ( not allow_dict and is_dict ): return False all_expected_format: bool all_unique_names: bool if is_dict: if TYPE_CHECKING: # pragma: no cover assert isinstance(seq_to_check, dict) # nosec B101 elements_expected_format = [ isinstance(name, str) and isinstance(obj, object_type) for name, obj in seq_to_check.items() ] all_unique_names = True else: names = [] elements_expected_format = [] for it in seq_to_check: if is_named_object_tuple(it, object_type=object_type): elements_expected_format.append(True) names.append(it[0]) else: elements_expected_format.append(False) all_unique_names = len(set(names)) == len(names) all_expected_format = all(elements_expected_format) if not all_expected_format or (require_unique_names and not all_unique_names): is_expected_format = False else: is_expected_format = True return is_expected_format @overload def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: ... # pragma: no cover @overload def check_sequence_named_objects( seq_to_check: Sequence[Tuple[str, BaseObject]], allow_dict: bool, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Sequence[Tuple[str, BaseObject]]: ... # pragma: no cover @overload def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: ... # pragma: no cover def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: """Check if input is a sequence of named BaseObject instances. `seq_to_check` is returned unchanged when it follows the allowed named BaseObject convention. The allowed format includes a sequence of (str, BaseObject instance) tuples. A dictionary with string names as keys and BaseObject instances as values is also allowed if ``allow_dict is True``. Parameters ---------- seq_to_check : Sequence((str, BaseObject)) or Dict[str, BaseObject] The input to check for conformance with the named object interface. Conforming input are: - Sequence that contains (str, BaseObject instance) tuples - Dictionary with string names as keys and BaseObject instances as values if ``allow_dict=True`` allow_dict : bool, default=True Whether a dictionary of named objects is allowed as conforming named object type. - If True, then a dictionary with string keys and BaseObject instances is allowed format for providing a sequence of named objects. - If False, then only sequences that contain (str, BaseObject instance) tuples are considered conforming with the named object parameter API. require_unique_names : bool, default=False Whether names used in the sequence of named BaseObject instances must be unique. - If True and the names are not unique, then False is always returned. - If False, then whether or not the function returns True or False depends on whether `seq_to_check` follows sequence of named BaseObject format. object_type : class or tuple[class], default=None The class type(s) that is used to ensure that all elements of named objects match the expected type. sequence_name : str, default=None Optional name used to refer to the input `seq_to_check` when raising any errors. Ignored ``raise_error=False``. Returns ------- Sequence((str, BaseObject)) or Dict[str, BaseObject] The `seq_to_check` is returned if it is a conforming named object type. - If ``allow_dict=True`` then return type is Sequence((str, BaseObject)) or Dict[str, BaseObject] - If ``allow_dict=False`` then return type is Sequence((str, BaseObject)) Raises ------ ValueError If `seq_to_check` does not conform to the named BaseObject API. See Also -------- is_named_object_tuple : Indicate (True/False) if input follows the named object API format for a single named object (e.g., tupe[str, expected class type]). is_sequence_named_objects : Indicate (True/False) if an input sequence follows the named object API. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import check_sequence_named_objects >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> check_sequence_named_objects(named_objects) [('Step 1', BaseObject()), ('Step 2', BaseObject())] Dictionaries are optionally allowed as sequences of named BaseObjects >>> named_objects = {"Step 1": BaseObject(), "Step 2": BaseObject()} >>> check_sequence_named_objects(named_objects) {'Step 1': BaseObject(), 'Step 2': BaseObject()} Raises error since dictionaries are not allowed when allow_dict is False >>> check_sequence_named_objects(named_objects, allow_dict=False) # doctest: +SKIP Raises error due to invalid format due to object names not being strings >>> incorrectly_named_objects = [(1, BaseObject()), (2, BaseObject())] >>> check_sequence_named_objects(incorrectly_named_objects) # doctest: +SKIP Raises error due to invalid format since named items are not BaseObject instances >>> named_items = [("1", 7), ("2", 42)] >>> check_sequence_named_objects(named_items) # doctest: +SKIP The validation can require the object elements to be a certain class type >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> check_sequence_named_objects( \ named_objects, object_type=BaseEstimator) # doctest: +SKIP >>> named_objects = [("Step 1", BaseEstimator()), ("Step 2", BaseEstimator())] >>> check_sequence_named_objects(named_objects, object_type=BaseEstimator) [('Step 1', BaseEstimator()), ('Step 2', BaseEstimator())] """ is_expected_format = is_sequence_named_objects( seq_to_check, allow_dict=allow_dict, require_unique_names=require_unique_names, object_type=object_type, ) # Raise error is format is not expected. if not is_expected_format: msg = _named_baseobject_error_msg( sequence_name=sequence_name, allow_dict=allow_dict ) raise ValueError(msg) return seq_to_check
scikit-base
/scikit_base-0.5.1-py3-none-any.whl/skbase/validate/_named_objects.py
_named_objects.py
"""Validate if an input is one of the allowed named object formats.""" import collections.abc from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union, overload, ) from skbase.base import BaseObject __all__: List[str] = [ "check_sequence_named_objects", "is_named_object_tuple", "is_sequence_named_objects", ] __author__: List[str] = ["RNKuhns"] def _named_baseobject_error_msg( sequence_name: Optional[str] = None, allow_dict: bool = True ): """Create error message for non-comformance with named BaseObject api.""" name_str = f"{sequence_name}" if sequence_name is not None else "Input" allowed_types = "a sequence of (string name, BaseObject instance) tuples" if allow_dict: allowed_types += " or dict[str, BaseObject instance]" msg = f"Invalid {name_str!r}, {name_str!r} should be {allowed_types}." return msg def is_named_object_tuple( obj: Any, object_type: Optional[Union[type, Tuple[type, ...]]] = None ) -> bool: """Indicate if input is a a tuple of format (str, `object_type`). Used to validate that input follows named object tuple API format. Parameters ---------- obj : Any The object to be checked to see if it is a (str, `object_type`) tuple. object_type : class or tuple of class, default=BaseObject Class(es) that all objects are checked to be an instance of. If None, then :class:``skbase.base.BaseObject`` is used as default. Returns ------- bool True if obj is (str, `object_type`) tuple, otherwise False. See Also -------- is_sequence_named_objects : Indicate (True/False) if an input sequence follows the named object API. check_sequence_named_objects : Validate input to see if it follows sequence of named objects API. An error is raised for input that does not conform to the API format. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import is_named_object_tuple Default checks for object to be an instance of BaseOBject >>> is_named_object_tuple(("Step 1", BaseObject())) True >>> is_named_object_tuple(("Step 2", BaseEstimator())) True If a different `object_type` is provided then it is used in the isinstance check >>> is_named_object_tuple(("Step 1", BaseObject()), object_type=BaseEstimator) False >>> is_named_object_tuple(("Step 1", BaseEstimator()), object_type=BaseEstimator) True If the input is does not follow named object tuple format then False is returned >>> is_named_object_tuple({"Step 1": BaseEstimator()}) False >>> is_named_object_tuple((1, BaseObject())) False """ if object_type is None: object_type = BaseObject if not isinstance(obj, tuple) or len(obj) != 2: return False if not isinstance(obj[0], str) or not isinstance(obj[1], object_type): return False return True def is_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, ) -> bool: """Indicate if input is a sequence of named BaseObject instances. This can be a sequence of (str, BaseObject instance) tuples or a dictionary with string names as keys and BaseObject instances as values (if ``allow_dict=True``). Parameters ---------- seq_to_check : Sequence((str, BaseObject)) or Dict[str, BaseObject] The input to check for conformance with the named object interface. Conforming input are: - Sequence that contains (str, BaseObject instance) tuples - Dictionary with string names as keys and BaseObject instances as values if ``allow_dict=True`` allow_dict : bool, default=True Whether a dictionary of named objects is allowed as conforming named object type. - If True, then a dictionary with string keys and BaseObject instances is allowed format for providing a sequence of named objects. - If False, then only sequences that contain (str, BaseObject instance) tuples are considered conforming with the named object parameter API. require_unique_names : bool, default=False Whether names used in the sequence of named BaseObject instances must be unique. - If True and the names are not unique, then False is always returned. - If False, then whether or not the function returns True or False depends on whether `seq_to_check` follows sequence of named BaseObject format. object_type : class or tuple[class], default=None The class type(s) that is used to ensure that all elements of named objects match the expected type. Returns ------- bool Whether the input `seq_to_check` is a sequence that follows the API for nameed base object instances. Raises ------ ValueError If `seq_to_check` is not a sequence or ``allow_dict is False`` and `seq_to_check` is a dictionary. See Also -------- is_named_object_tuple : Indicate (True/False) if input follows the named object API format for a single named object (e.g., tupe[str, expected class type]). check_sequence_named_objects : Validate input to see if it follows sequence of named objects API. An error is raised for input that does not conform to the API format. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import is_sequence_named_objects >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> is_sequence_named_objects(named_objects) True Dictionaries are optionally allowed as sequences of named BaseObjects >>> dict_named_objects = {"Step 1": BaseObject(), "Step 2": BaseObject()} >>> is_sequence_named_objects(dict_named_objects) True >>> is_sequence_named_objects(dict_named_objects, allow_dict=False) False Invalid format due to object names not being strings >>> incorrectly_named_objects = [(1, BaseObject()), (2, BaseObject())] >>> is_sequence_named_objects(incorrectly_named_objects) False Invalid format due to named items not being BaseObject instances >>> named_items = [("1", 7), ("2", 42)] >>> is_sequence_named_objects(named_items) False The validation can require the object elements to be a certain class type >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> is_sequence_named_objects(named_objects, object_type=BaseEstimator) False >>> named_objects = [("Step 1", BaseEstimator()), ("Step 2", BaseEstimator())] >>> is_sequence_named_objects(named_objects, object_type=BaseEstimator) True """ # Want to end quickly if the input isn't sequence or is a dict and we # aren't allowing dicts if object_type is None: object_type = BaseObject is_dict = isinstance(seq_to_check, dict) if (not is_dict and not isinstance(seq_to_check, collections.abc.Sequence)) or ( not allow_dict and is_dict ): return False all_expected_format: bool all_unique_names: bool if is_dict: if TYPE_CHECKING: # pragma: no cover assert isinstance(seq_to_check, dict) # nosec B101 elements_expected_format = [ isinstance(name, str) and isinstance(obj, object_type) for name, obj in seq_to_check.items() ] all_unique_names = True else: names = [] elements_expected_format = [] for it in seq_to_check: if is_named_object_tuple(it, object_type=object_type): elements_expected_format.append(True) names.append(it[0]) else: elements_expected_format.append(False) all_unique_names = len(set(names)) == len(names) all_expected_format = all(elements_expected_format) if not all_expected_format or (require_unique_names and not all_unique_names): is_expected_format = False else: is_expected_format = True return is_expected_format @overload def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: ... # pragma: no cover @overload def check_sequence_named_objects( seq_to_check: Sequence[Tuple[str, BaseObject]], allow_dict: bool, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Sequence[Tuple[str, BaseObject]]: ... # pragma: no cover @overload def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: ... # pragma: no cover def check_sequence_named_objects( seq_to_check: Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]], allow_dict: bool = True, require_unique_names=False, object_type: Optional[Union[type, Tuple[type]]] = None, sequence_name: Optional[str] = None, ) -> Union[Sequence[Tuple[str, BaseObject]], Dict[str, BaseObject]]: """Check if input is a sequence of named BaseObject instances. `seq_to_check` is returned unchanged when it follows the allowed named BaseObject convention. The allowed format includes a sequence of (str, BaseObject instance) tuples. A dictionary with string names as keys and BaseObject instances as values is also allowed if ``allow_dict is True``. Parameters ---------- seq_to_check : Sequence((str, BaseObject)) or Dict[str, BaseObject] The input to check for conformance with the named object interface. Conforming input are: - Sequence that contains (str, BaseObject instance) tuples - Dictionary with string names as keys and BaseObject instances as values if ``allow_dict=True`` allow_dict : bool, default=True Whether a dictionary of named objects is allowed as conforming named object type. - If True, then a dictionary with string keys and BaseObject instances is allowed format for providing a sequence of named objects. - If False, then only sequences that contain (str, BaseObject instance) tuples are considered conforming with the named object parameter API. require_unique_names : bool, default=False Whether names used in the sequence of named BaseObject instances must be unique. - If True and the names are not unique, then False is always returned. - If False, then whether or not the function returns True or False depends on whether `seq_to_check` follows sequence of named BaseObject format. object_type : class or tuple[class], default=None The class type(s) that is used to ensure that all elements of named objects match the expected type. sequence_name : str, default=None Optional name used to refer to the input `seq_to_check` when raising any errors. Ignored ``raise_error=False``. Returns ------- Sequence((str, BaseObject)) or Dict[str, BaseObject] The `seq_to_check` is returned if it is a conforming named object type. - If ``allow_dict=True`` then return type is Sequence((str, BaseObject)) or Dict[str, BaseObject] - If ``allow_dict=False`` then return type is Sequence((str, BaseObject)) Raises ------ ValueError If `seq_to_check` does not conform to the named BaseObject API. See Also -------- is_named_object_tuple : Indicate (True/False) if input follows the named object API format for a single named object (e.g., tupe[str, expected class type]). is_sequence_named_objects : Indicate (True/False) if an input sequence follows the named object API. Examples -------- >>> from skbase.base import BaseObject, BaseEstimator >>> from skbase.validate import check_sequence_named_objects >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> check_sequence_named_objects(named_objects) [('Step 1', BaseObject()), ('Step 2', BaseObject())] Dictionaries are optionally allowed as sequences of named BaseObjects >>> named_objects = {"Step 1": BaseObject(), "Step 2": BaseObject()} >>> check_sequence_named_objects(named_objects) {'Step 1': BaseObject(), 'Step 2': BaseObject()} Raises error since dictionaries are not allowed when allow_dict is False >>> check_sequence_named_objects(named_objects, allow_dict=False) # doctest: +SKIP Raises error due to invalid format due to object names not being strings >>> incorrectly_named_objects = [(1, BaseObject()), (2, BaseObject())] >>> check_sequence_named_objects(incorrectly_named_objects) # doctest: +SKIP Raises error due to invalid format since named items are not BaseObject instances >>> named_items = [("1", 7), ("2", 42)] >>> check_sequence_named_objects(named_items) # doctest: +SKIP The validation can require the object elements to be a certain class type >>> named_objects = [("Step 1", BaseObject()), ("Step 2", BaseObject())] >>> check_sequence_named_objects( \ named_objects, object_type=BaseEstimator) # doctest: +SKIP >>> named_objects = [("Step 1", BaseEstimator()), ("Step 2", BaseEstimator())] >>> check_sequence_named_objects(named_objects, object_type=BaseEstimator) [('Step 1', BaseEstimator()), ('Step 2', BaseEstimator())] """ is_expected_format = is_sequence_named_objects( seq_to_check, allow_dict=allow_dict, require_unique_names=require_unique_names, object_type=object_type, ) # Raise error is format is not expected. if not is_expected_format: msg = _named_baseobject_error_msg( sequence_name=sequence_name, allow_dict=allow_dict ) raise ValueError(msg) return seq_to_check
0.954009
0.417093
"""Functionality for working with nested sequences.""" import collections from typing import List __author__: List[str] = ["RNKuhns", "fkiraly"] __all__: List[str] = [ "flatten", "is_flat", "_remove_single", "unflat_len", "unflatten", ] def _remove_single(x): """Remove tuple wrapping from singleton. If the input has length 1, then the single value is extracted from the input. Otherwise, the input is returned unchanged. Parameters ---------- x : Sequence The sequence to remove a singleton value from. Returns ------- Any The singleton value of x if x[0] is a singleton, otherwise x. Examples -------- >>> from skbase.utils._nested_iter import _remove_single >>> _remove_single([1]) 1 >>> _remove_single([1, 2, 3]) [1, 2, 3] """ if len(x) == 1: return x[0] else: return x def flatten(obj): """Flatten nested list/tuple structure. Converts a nested iterable or sequence to a flat output iterable/sequence with the same and order of elements. Parameters ---------- obj : Any The object to be flattened from a nested iterable/sequence structure. Returns ------- Sequence or Iterable flat iterable/sequence, containing non-list/tuple elements in obj in same order as in obj. Examples -------- >>> from skbase.utils import flatten >>> flatten([1, 2, [3, (4, 5)], 6]) [1, 2, 3, 4, 5, 6] """ if not isinstance( obj, (collections.abc.Iterable, collections.abc.Sequence) ) or isinstance(obj, str): return [obj] else: return type(obj)([y for x in obj for y in flatten(x)]) def unflatten(obj, template): """Invert flattening given given template for nested list/tuple structure. Converts an input list or tuple to a nested structure as provided in `template` while preserving the order of elements in the input. Parameters ---------- obj : list or tuple The object to be unflattened. template : nested list/tuple structure Number of non-list/tuple elements of obj and template must be equal. Returns ------- list or tuple Input coerced to have elements with nested list/tuples structure exactly as `template` and elements in sequence exactly as `obj`. Examples -------- >>> from skbase.utils import unflatten >>> unflatten([1, 2, 3, 4, 5, 6], [6, 3, [5, (2, 4)], 1]) [1, 2, [3, (4, 5)], 6] """ if not isinstance(template, (list, tuple)): return obj[0] list_or_tuple = type(template) ls = [unflat_len(x) for x in template] for i in range(1, len(ls)): ls[i] += ls[i - 1] ls = [0] + ls res = [unflatten(obj[ls[i] : ls[i + 1]], template[i]) for i in range(len(ls) - 1)] return list_or_tuple(res) def unflat_len(obj): """Return number of elements in nested iterable or sequence structure. Determines the total number of elements in a nested iterable/sequence structure. Input that is not a iterable or sequence is considered to have length 1. Parameters ---------- obj : Any Object to determine the unflat length. Returns ------- int The unflat length of the input. Examples -------- >>> from skbase.utils import unflat_len >>> unflat_len(7) 1 >>> unflat_len((1, 2)) 2 >>> unflat_len([1, (2, 3), 4, 5]) 5 """ if not isinstance( obj, (collections.abc.Iterable, collections.abc.Sequence) ) or isinstance(obj, str): return 1 else: return sum([unflat_len(x) for x in obj]) def is_flat(obj): """Check whether iterable or sequence is flat. If any elements are iterables or sequences the object is considered to not be flat. Parameters ---------- obj : Any The object to check to see if it is flat (does not have nested iterable). Returns ------- bool Whether or not the input `obj` contains nested iterables. Examples -------- >>> from skbase.utils import is_flat >>> is_flat([1, 2, 3, 4, 5]) True >>> is_flat([1, (2, 3), 4, 5]) False """ elements_flat = ( isinstance(x, (collections.abc.Iterable, collections.abc.Sequence)) and not isinstance(x, str) for x in obj ) return not any(elements_flat)
scikit-base
/scikit_base-0.5.1-py3-none-any.whl/skbase/utils/_nested_iter.py
_nested_iter.py
"""Functionality for working with nested sequences.""" import collections from typing import List __author__: List[str] = ["RNKuhns", "fkiraly"] __all__: List[str] = [ "flatten", "is_flat", "_remove_single", "unflat_len", "unflatten", ] def _remove_single(x): """Remove tuple wrapping from singleton. If the input has length 1, then the single value is extracted from the input. Otherwise, the input is returned unchanged. Parameters ---------- x : Sequence The sequence to remove a singleton value from. Returns ------- Any The singleton value of x if x[0] is a singleton, otherwise x. Examples -------- >>> from skbase.utils._nested_iter import _remove_single >>> _remove_single([1]) 1 >>> _remove_single([1, 2, 3]) [1, 2, 3] """ if len(x) == 1: return x[0] else: return x def flatten(obj): """Flatten nested list/tuple structure. Converts a nested iterable or sequence to a flat output iterable/sequence with the same and order of elements. Parameters ---------- obj : Any The object to be flattened from a nested iterable/sequence structure. Returns ------- Sequence or Iterable flat iterable/sequence, containing non-list/tuple elements in obj in same order as in obj. Examples -------- >>> from skbase.utils import flatten >>> flatten([1, 2, [3, (4, 5)], 6]) [1, 2, 3, 4, 5, 6] """ if not isinstance( obj, (collections.abc.Iterable, collections.abc.Sequence) ) or isinstance(obj, str): return [obj] else: return type(obj)([y for x in obj for y in flatten(x)]) def unflatten(obj, template): """Invert flattening given given template for nested list/tuple structure. Converts an input list or tuple to a nested structure as provided in `template` while preserving the order of elements in the input. Parameters ---------- obj : list or tuple The object to be unflattened. template : nested list/tuple structure Number of non-list/tuple elements of obj and template must be equal. Returns ------- list or tuple Input coerced to have elements with nested list/tuples structure exactly as `template` and elements in sequence exactly as `obj`. Examples -------- >>> from skbase.utils import unflatten >>> unflatten([1, 2, 3, 4, 5, 6], [6, 3, [5, (2, 4)], 1]) [1, 2, [3, (4, 5)], 6] """ if not isinstance(template, (list, tuple)): return obj[0] list_or_tuple = type(template) ls = [unflat_len(x) for x in template] for i in range(1, len(ls)): ls[i] += ls[i - 1] ls = [0] + ls res = [unflatten(obj[ls[i] : ls[i + 1]], template[i]) for i in range(len(ls) - 1)] return list_or_tuple(res) def unflat_len(obj): """Return number of elements in nested iterable or sequence structure. Determines the total number of elements in a nested iterable/sequence structure. Input that is not a iterable or sequence is considered to have length 1. Parameters ---------- obj : Any Object to determine the unflat length. Returns ------- int The unflat length of the input. Examples -------- >>> from skbase.utils import unflat_len >>> unflat_len(7) 1 >>> unflat_len((1, 2)) 2 >>> unflat_len([1, (2, 3), 4, 5]) 5 """ if not isinstance( obj, (collections.abc.Iterable, collections.abc.Sequence) ) or isinstance(obj, str): return 1 else: return sum([unflat_len(x) for x in obj]) def is_flat(obj): """Check whether iterable or sequence is flat. If any elements are iterables or sequences the object is considered to not be flat. Parameters ---------- obj : Any The object to check to see if it is flat (does not have nested iterable). Returns ------- bool Whether or not the input `obj` contains nested iterables. Examples -------- >>> from skbase.utils import is_flat >>> is_flat([1, 2, 3, 4, 5]) True >>> is_flat([1, (2, 3), 4, 5]) False """ elements_flat = ( isinstance(x, (collections.abc.Iterable, collections.abc.Sequence)) and not isinstance(x, str) for x in obj ) return not any(elements_flat)
0.951605
0.668048
"""Functionality for working with sequences.""" from typing import Any, Iterable, List, MutableMapping, Optional, Union __author__: List[str] = ["RNKuhns"] __all__: List[str] = ["subset_dict_keys"] def subset_dict_keys( input_dict: MutableMapping[Any, Any], keys: Union[Iterable, int, float, bool, str, type], prefix: Optional[str] = None, remove_prefix: bool = True, ): """Subset dictionary so it only contains specified keys. Subsets `input_dict` so that it only contains `keys`. If `prefix` is passed, subsets to `f"{prefix}__{key}"` for all `key` in `keys`. When ``remove_prefix=True`` the the prefix is removed from the keys of the return dictionary (For any keys with prefix the return is `{key}` instead of `f"{prefix}__{key}"`). Parameters ---------- input_dict : dict Dictionary to subset by keys keys : iterable, int, float, bool, str or type The keys that should be retained in the output dictionary. prefix : str, default=None An optional prefix that is added to all keys. If `prefix` is passed, the passed keys are converted to `f"{prefix}__{key}"` when subsetting the dictionary. Results in all keys being coerced to str. remove_prefix : bool, default=True Whether to remove prefix in output keys. Returns ------- `subsetted_dict` : dict `dict_to_subset` subset to keys in `keys` described as above Notes ----- Passing `prefix` will turn non-str keys into str keys. Examples -------- >>> from skbase.utils import subset_dict_keys >>> some_dict = {"some_param__a": 1, "some_param__b": 2, "some_param__c": 3} >>> subset_dict_keys(some_dict, "some_param__a") {'some_param__a': 1} >>> subset_dict_keys(some_dict, ("some_param__a", "some_param__b")) {'some_param__a': 1, 'some_param__b': 2} >>> subset_dict_keys(some_dict, ("a", "b"), prefix="some_param") {'a': 1, 'b': 2} >>> subset_dict_keys(some_dict, ("a", "b"), prefix="some_param", \ remove_prefix=False) {'some_param__a': 1, 'some_param__b': 2} >>> subset_dict_keys(some_dict, \ (c for c in ("some_param__a", "some_param__b"))) {'some_param__a': 1, 'some_param__b': 2} """ def rem_prefix(x): if not remove_prefix or prefix is None: return x prefix__ = f"{prefix}__" if x.startswith(prefix__): return x[len(prefix__) :] # The way this is used below, this else shouldn't really execute # But its here for completeness in case something goes wrong else: return x # pragma: no cover # Handle passage of certain scalar values if isinstance(keys, (str, float, int, bool, type)): keys = [keys] if prefix is not None: keys = [f"{prefix}__{key}" for key in keys] else: keys = list(keys) subsetted_dict = {rem_prefix(k): v for k, v in input_dict.items() if k in keys} return subsetted_dict
scikit-base
/scikit_base-0.5.1-py3-none-any.whl/skbase/utils/_utils.py
_utils.py
"""Functionality for working with sequences.""" from typing import Any, Iterable, List, MutableMapping, Optional, Union __author__: List[str] = ["RNKuhns"] __all__: List[str] = ["subset_dict_keys"] def subset_dict_keys( input_dict: MutableMapping[Any, Any], keys: Union[Iterable, int, float, bool, str, type], prefix: Optional[str] = None, remove_prefix: bool = True, ): """Subset dictionary so it only contains specified keys. Subsets `input_dict` so that it only contains `keys`. If `prefix` is passed, subsets to `f"{prefix}__{key}"` for all `key` in `keys`. When ``remove_prefix=True`` the the prefix is removed from the keys of the return dictionary (For any keys with prefix the return is `{key}` instead of `f"{prefix}__{key}"`). Parameters ---------- input_dict : dict Dictionary to subset by keys keys : iterable, int, float, bool, str or type The keys that should be retained in the output dictionary. prefix : str, default=None An optional prefix that is added to all keys. If `prefix` is passed, the passed keys are converted to `f"{prefix}__{key}"` when subsetting the dictionary. Results in all keys being coerced to str. remove_prefix : bool, default=True Whether to remove prefix in output keys. Returns ------- `subsetted_dict` : dict `dict_to_subset` subset to keys in `keys` described as above Notes ----- Passing `prefix` will turn non-str keys into str keys. Examples -------- >>> from skbase.utils import subset_dict_keys >>> some_dict = {"some_param__a": 1, "some_param__b": 2, "some_param__c": 3} >>> subset_dict_keys(some_dict, "some_param__a") {'some_param__a': 1} >>> subset_dict_keys(some_dict, ("some_param__a", "some_param__b")) {'some_param__a': 1, 'some_param__b': 2} >>> subset_dict_keys(some_dict, ("a", "b"), prefix="some_param") {'a': 1, 'b': 2} >>> subset_dict_keys(some_dict, ("a", "b"), prefix="some_param", \ remove_prefix=False) {'some_param__a': 1, 'some_param__b': 2} >>> subset_dict_keys(some_dict, \ (c for c in ("some_param__a", "some_param__b"))) {'some_param__a': 1, 'some_param__b': 2} """ def rem_prefix(x): if not remove_prefix or prefix is None: return x prefix__ = f"{prefix}__" if x.startswith(prefix__): return x[len(prefix__) :] # The way this is used below, this else shouldn't really execute # But its here for completeness in case something goes wrong else: return x # pragma: no cover # Handle passage of certain scalar values if isinstance(keys, (str, float, int, bool, type)): keys = [keys] if prefix is not None: keys = [f"{prefix}__{key}" for key in keys] else: keys = list(keys) subsetted_dict = {rem_prefix(k): v for k, v in input_dict.items() if k in keys} return subsetted_dict
0.948692
0.646321
## [MAILING LIST](https://groups.google.com/forum/#!forum/scikit-beam) # scikit-beam [![Build Status](https://travis-ci.org/scikit-beam/scikit-beam.svg?branch=master)](https://travis-ci.org/scikit-beam/scikit-beam) [![codecov.io](http://codecov.io/github/scikit-beam/scikit-beam/coverage.svg?branch=master)](http://codecov.io/github/scikit-beam/scikit-beam?branch=master) [![Join the chat at https://gitter.im/scikit-beam/scikit-beam](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/scikit-xray/scikit-beam?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) **[Documentation](http://scikit-beam.github.io/scikit-beam/)** ## Examples [scikit-beam-examples repository](https://github.com/scikit-beam/scikit-beam-examples) - [Powder calibration (still needs tilt correction)](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/powder_calibration/D_estimate_demo.ipynb) - 1-time correlation - [dir](https://github.com/scikit-beam/scikit-beam-examples/tree/master/demos/1_time_correlation) - [Jupyter notebook](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/1_time_correlation/Multi_tau_one_time_correlation_example.ipynb) - Differential Phase Contrast - [dir](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/dpc) - [Jupyter notebook](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/dpc/dpc_demo.ipynb) - [Fast conversion to reciprocal space](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/reciprocal_space/recip_example.ipynb) - [X-Ray Speckle Visibility Spectroscopy](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/speckle/speckle-plotting.ipynb) - [Basic Plotting of X-Ray Fluorescence Elemental Lines](https://github.com/scikit-beam/scikit-beam-examples/blob/master/demos/xrf/plot_xrf_spectrum.ipynb) ## Quick start ### install with conda ``` conda install scikit-beam -c nsls2forge ``` ### install development version with setuptools ``` git clone [email protected]:scikit-beam/scikit-beam.git cd scikit-beam python setup.py install ``` ### set up for development ``` git clone [email protected]:scikit-beam/scikit-beam.git cd scikit-beam python setup.py develop pip install pytest coverage setuptools ``` **make sure all the tests pass!** ``` python run_tests.py ``` **and you can check the code coverage with** ``` coverage run run_tests.py coverage report -m ```
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/README.md
README.md
conda install scikit-beam -c nsls2forge git clone [email protected]:scikit-beam/scikit-beam.git cd scikit-beam python setup.py install git clone [email protected]:scikit-beam/scikit-beam.git cd scikit-beam python setup.py develop pip install pytest coverage setuptools python run_tests.py coverage run run_tests.py coverage report -m
0.468547
0.943764
================================== Getting Started with Scikit-beam ================================== Importing scikit-beam ===================== In order to encourage consistency amongst users in importing and using Scikit-beam functionality, we have put together the following guidelines. Since most of the functionality in Scikit-beam resides in sub-packages, importing scikit-beam as:: >>> import skbeam is not very useful. Instead, it is best to import the desired sub-package with the syntax:: >>> from skbeam import subpackage # doctest: +SKIP For example, to access the correlation-related functionality, you can import `skbeam.core.correlation` with:: >>> from skbeam.core import correlation as corr >>> g2 = corr.multi_tau_auto_corr(5, 3, labels, img_seq) Note that for clarity, and to avoid any issues, we recommend to **never** import any skbeam functionality using ``*``, for example:: >>> from skbeam.core.correlation import * # NOT recommended Some components of Scikit-Beam started off as standalone packages (e.g. PyFITS, PyWCS), so in cases where Scikit-Beam needs to be used as a drop-in replacement, the following syntax is also acceptable:: >>> from skbeam.io import foo as bar Getting started with subpackages ================================ .. warning:: This is not implemented in skbeam yet Because different subpackages have very different functionality, further suggestions for getting started are in the documentation for the subpackages, which you can reach by browsing the sections listed in the :ref:`user-docs`. Or, if you want to dive right in, you can either look at docstrings for particular a package or object, or access their documentation using the :func:`~skbeam.find_api_page` function. For example, doing this:: >>> from skbeam import find_api_page >>> find_api_page(corr.multi_tau_auto_corr) # doctest: +SKIP Will bring up the documentation for the :func:`~skbeam.core.correlation.multi_tau_auto_corr` in your browser.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/getting_started.rst
getting_started.rst
================================== Getting Started with Scikit-beam ================================== Importing scikit-beam ===================== In order to encourage consistency amongst users in importing and using Scikit-beam functionality, we have put together the following guidelines. Since most of the functionality in Scikit-beam resides in sub-packages, importing scikit-beam as:: >>> import skbeam is not very useful. Instead, it is best to import the desired sub-package with the syntax:: >>> from skbeam import subpackage # doctest: +SKIP For example, to access the correlation-related functionality, you can import `skbeam.core.correlation` with:: >>> from skbeam.core import correlation as corr >>> g2 = corr.multi_tau_auto_corr(5, 3, labels, img_seq) Note that for clarity, and to avoid any issues, we recommend to **never** import any skbeam functionality using ``*``, for example:: >>> from skbeam.core.correlation import * # NOT recommended Some components of Scikit-Beam started off as standalone packages (e.g. PyFITS, PyWCS), so in cases where Scikit-Beam needs to be used as a drop-in replacement, the following syntax is also acceptable:: >>> from skbeam.io import foo as bar Getting started with subpackages ================================ .. warning:: This is not implemented in skbeam yet Because different subpackages have very different functionality, further suggestions for getting started are in the documentation for the subpackages, which you can reach by browsing the sections listed in the :ref:`user-docs`. Or, if you want to dive right in, you can either look at docstrings for particular a package or object, or access their documentation using the :func:`~skbeam.find_api_page` function. For example, doing this:: >>> from skbeam import find_api_page >>> find_api_page(corr.multi_tau_auto_corr) # doctest: +SKIP Will bring up the documentation for the :func:`~skbeam.core.correlation.multi_tau_auto_corr` in your browser.
0.900076
0.709258
:tocdepth: 2 ================== Scikit-beam Core ================== The :ref:`vision<vision>` of ``scikit-beam`` is to provide simple functions useful for the X-ray, Neutron and Electron communities. The primary goal of the scikit-beam project is to provide a centralized repository for algorithms that are used in these three scientific domains. ``scikit-beam`` functions accept and return standard Python and numpy datatypes, so they integrate well with other packages from the scientific Python community. Further, the modular design of scikit-beam allows its components to be easily reused in ways not envisioned by the authors. Scikit-beam is being developed at the National Synchrotron Light Source II at Brookhaven National Lab and also in collaboration with scientists at the LCLS-II and APS. Supported techniques ==================== * Differential Phase Contrast (:mod:`~skbeam.core.dpc`) * CDI (:mod:`~skbeam.core.cdi`) * MultiTau correlation (:mod:`~skbeam.core.correlation`) * X-Ray Speckle Visibility Spectroscopy (XSVS) (:mod:`~skbeam.core.speckle`) * X-ray Fluorescence (:mod:`~skbeam.fluorescence`) * Fast histograms Other utilities =============== * Basic constants (:mod:`~skbeam.core.constants`) * Logical convenience functions (:mod:`~skbeam.core.arithmetic`) * Utilities to estimating the center of a ring pattern and the sample-to-detector distance of a powder pattern (:mod:`~skbeam.core.calibration`) * Peak extraction (:mod:`~skbeam.core.feature`) * Mask pixels based on a threshold; as a statistical outlier within a bin; for proximity to canvas edge (margin). (:mod:`~skbeam.core.mask`) * Compute reciprocol space coordinates of pixels. (:mod:`~skbeam.core.recip`) * Draw and manipulate ROI mask; draw kymograph; compute statistics on ROIs. (:mod:`~skbeam.core.roi`) * Misc. utilities (:mod:`~skbeam.core.utils`) * A thin wrapper around ``scipy.stats.binned_statistic`` (:mod:`~skbeam.core.stats`) .. _installation_tl: Installation ============ .. toctree:: :maxdepth: 1 install getting_started installation .. _reporting_issues: Reporting Issues ================ If you have found a bug in scikit-beam please report it. The preferred way is to create a new issue on the scikit-beam `GitHub issue page <http://github.com/scikit-beam/scikit-beam/issues>`_; that requires `creating a free account <https://github.com>`_ on GitHub if you do not have one. Please include an example that demonstrates the issue that will allow the developers to reproduce and fix the problem. You may be asked to also provide information about your operating system and a full Python stack trace; the Skbeam developers will walk you through obtaining a stack trace if it is necessary. API Docs -------- .. toctree:: :maxdepth: 3 resource/api/index .. _contributing: Contributing ============ The scikit-beam project is made both by and for its users, so we highly encourage contributions at all levels. This spans the gamut from sending an email mentioning a typo in the documentation or requesting a new feature all the way to developing a major new package. The full range of ways to be part of the Skbeam project are described at `Contribute to scikit-beam <http://scikit-beam.github.io/contribute.html>`_. To get started contributing code or documentation (no git or GitHub experience necessary): .. toctree:: :maxdepth: 1 development/workflow/get_devel_version development/workflow/development_workflow .. _developer-docs: Developer Documentation ======================= The developer documentation contains instructions for how to contribute to Skbeam or affiliated packages, as well as coding, documentation, and testing guidelines. For the guiding vision of this process and the project as a whole, see :doc:`development/vision`. .. toctree:: :maxdepth: 1 overview development/workflow/development_workflow development/codeguide development/docguide development/testguide development/scripts development/building development/ccython development/releasing development/workflow/maintainer_workflow development/affiliated-packages development/vision resource/dev_guide/index api_changes python_versions warnings other ===== .. toctree:: :maxdepth: 1 .. toctree:: :maxdepth: 1 whatsnew/index whats_new known_issues generated/examples/index introduction Indices and Tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/index.rst
index.rst
:tocdepth: 2 ================== Scikit-beam Core ================== The :ref:`vision<vision>` of ``scikit-beam`` is to provide simple functions useful for the X-ray, Neutron and Electron communities. The primary goal of the scikit-beam project is to provide a centralized repository for algorithms that are used in these three scientific domains. ``scikit-beam`` functions accept and return standard Python and numpy datatypes, so they integrate well with other packages from the scientific Python community. Further, the modular design of scikit-beam allows its components to be easily reused in ways not envisioned by the authors. Scikit-beam is being developed at the National Synchrotron Light Source II at Brookhaven National Lab and also in collaboration with scientists at the LCLS-II and APS. Supported techniques ==================== * Differential Phase Contrast (:mod:`~skbeam.core.dpc`) * CDI (:mod:`~skbeam.core.cdi`) * MultiTau correlation (:mod:`~skbeam.core.correlation`) * X-Ray Speckle Visibility Spectroscopy (XSVS) (:mod:`~skbeam.core.speckle`) * X-ray Fluorescence (:mod:`~skbeam.fluorescence`) * Fast histograms Other utilities =============== * Basic constants (:mod:`~skbeam.core.constants`) * Logical convenience functions (:mod:`~skbeam.core.arithmetic`) * Utilities to estimating the center of a ring pattern and the sample-to-detector distance of a powder pattern (:mod:`~skbeam.core.calibration`) * Peak extraction (:mod:`~skbeam.core.feature`) * Mask pixels based on a threshold; as a statistical outlier within a bin; for proximity to canvas edge (margin). (:mod:`~skbeam.core.mask`) * Compute reciprocol space coordinates of pixels. (:mod:`~skbeam.core.recip`) * Draw and manipulate ROI mask; draw kymograph; compute statistics on ROIs. (:mod:`~skbeam.core.roi`) * Misc. utilities (:mod:`~skbeam.core.utils`) * A thin wrapper around ``scipy.stats.binned_statistic`` (:mod:`~skbeam.core.stats`) .. _installation_tl: Installation ============ .. toctree:: :maxdepth: 1 install getting_started installation .. _reporting_issues: Reporting Issues ================ If you have found a bug in scikit-beam please report it. The preferred way is to create a new issue on the scikit-beam `GitHub issue page <http://github.com/scikit-beam/scikit-beam/issues>`_; that requires `creating a free account <https://github.com>`_ on GitHub if you do not have one. Please include an example that demonstrates the issue that will allow the developers to reproduce and fix the problem. You may be asked to also provide information about your operating system and a full Python stack trace; the Skbeam developers will walk you through obtaining a stack trace if it is necessary. API Docs -------- .. toctree:: :maxdepth: 3 resource/api/index .. _contributing: Contributing ============ The scikit-beam project is made both by and for its users, so we highly encourage contributions at all levels. This spans the gamut from sending an email mentioning a typo in the documentation or requesting a new feature all the way to developing a major new package. The full range of ways to be part of the Skbeam project are described at `Contribute to scikit-beam <http://scikit-beam.github.io/contribute.html>`_. To get started contributing code or documentation (no git or GitHub experience necessary): .. toctree:: :maxdepth: 1 development/workflow/get_devel_version development/workflow/development_workflow .. _developer-docs: Developer Documentation ======================= The developer documentation contains instructions for how to contribute to Skbeam or affiliated packages, as well as coding, documentation, and testing guidelines. For the guiding vision of this process and the project as a whole, see :doc:`development/vision`. .. toctree:: :maxdepth: 1 overview development/workflow/development_workflow development/codeguide development/docguide development/testguide development/scripts development/building development/ccython development/releasing development/workflow/maintainer_workflow development/affiliated-packages development/vision resource/dev_guide/index api_changes python_versions warnings other ===== .. toctree:: :maxdepth: 1 .. toctree:: :maxdepth: 1 whatsnew/index whats_new known_issues generated/examples/index introduction Indices and Tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
0.835215
0.778523
.. _installation: Installing scikit-beam ---------------------- For Python Novices ^^^^^^^^^^^^^^^^^^ Installation is simple on Windows, OSX, and Linux, even for Python novices. 1. Get Scientific Python """""""""""""""""""""""" To get started with Python on any platform, download and install `Anaconda <https://store.continuum.io/cshop/anaconda/>`_. It comes with the common scientific Python packages built in. 2. Install scikit-beam """""""""""""""""""""" TODO: make this actually work! Open a command prompt. On Windows, you can use the "Anaconda Command Prompt" installed by Anaconda or Start > Applications > Command Prompt. On a Mac, look for Applications > Utilities > Terminal. Type these commands: .. code-block:: bash conda update conda conda config --add channels scikit-beam # to install the latest stable release conda install scikit-beam The above installs scikit-beam and all its requirements. Our tutorials also use the IPython notebook. To install that as well, type .. code-block:: bash conda install ipython-notebook 3. Try it out! """""""""""""" Finally, to try it out, type .. code-block:: bash ipython notebook This will automatically open a browser tab, ready to interpret Python code. To get started, check out the links to tutorials at the top of this document. More Information for Experienced Python Users ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We strongly recommend using conda install scikit-beam, as described above, but pip is also supported. Essential Dependencies: * python (both 2 and 3 are supported) * setuptools * six * numpy Optional Dependencies: * scipy * scikit-image * xraylib * lmfit * netcdf4 .. code-block:: bash git clone https://github.com/scikit-beam/scikit-beam pip install -e scikit-beam Updating Your Installation -------------------------- The code is under active development. To update to the latest **stable** release, run this in the command prompt: .. code-block:: bash conda update -c scikit-beam scikit-beam The code is under active development. To update to the latest **development** release, run this in the command prompt: .. code-block:: bash conda update -c scikit-beam/channels/dev scikit-beam
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/installation.rst
installation.rst
.. _installation: Installing scikit-beam ---------------------- For Python Novices ^^^^^^^^^^^^^^^^^^ Installation is simple on Windows, OSX, and Linux, even for Python novices. 1. Get Scientific Python """""""""""""""""""""""" To get started with Python on any platform, download and install `Anaconda <https://store.continuum.io/cshop/anaconda/>`_. It comes with the common scientific Python packages built in. 2. Install scikit-beam """""""""""""""""""""" TODO: make this actually work! Open a command prompt. On Windows, you can use the "Anaconda Command Prompt" installed by Anaconda or Start > Applications > Command Prompt. On a Mac, look for Applications > Utilities > Terminal. Type these commands: .. code-block:: bash conda update conda conda config --add channels scikit-beam # to install the latest stable release conda install scikit-beam The above installs scikit-beam and all its requirements. Our tutorials also use the IPython notebook. To install that as well, type .. code-block:: bash conda install ipython-notebook 3. Try it out! """""""""""""" Finally, to try it out, type .. code-block:: bash ipython notebook This will automatically open a browser tab, ready to interpret Python code. To get started, check out the links to tutorials at the top of this document. More Information for Experienced Python Users ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We strongly recommend using conda install scikit-beam, as described above, but pip is also supported. Essential Dependencies: * python (both 2 and 3 are supported) * setuptools * six * numpy Optional Dependencies: * scipy * scikit-image * xraylib * lmfit * netcdf4 .. code-block:: bash git clone https://github.com/scikit-beam/scikit-beam pip install -e scikit-beam Updating Your Installation -------------------------- The code is under active development. To update to the latest **stable** release, run this in the command prompt: .. code-block:: bash conda update -c scikit-beam scikit-beam The code is under active development. To update to the latest **development** release, run this in the command prompt: .. code-block:: bash conda update -c scikit-beam/channels/dev scikit-beam
0.561455
0.439687
.. _python-warnings: ********************** Python warnings system ********************** .. doctest-skip-all Scikit-beam uses the Python :mod:`warnings` module to issue warning messages. The details of using the warnings module are general to Python, and apply to any Python software that uses this system. The user can suppress the warnings using the python command line argument ``-W"ignore"`` when starting an interactive python session. For example:: $ python -W"ignore" The user may also use the command line argument when running a python script as follows:: $ python -W"ignore" myscript.py It is also possible to suppress warnings from within a python script. For instance, the warnings issued from a single call to the `scikit-beam.io.fits.writeto` function may be suppressed from within a Python script using the `warnings.filterwarnings` function as follows:: >>> import warnings >>> from scikit-beam.io import fits >>> warnings.filterwarnings('ignore', category=UserWarning, append=True) >>> fits.writeto(filename, data, clobber=True) An equivalent way to insert an entry into the list of warning filter specifications for simple call `warnings.simplefilter`:: >>> warnings.simplefilter('ignore', UserWarning) Scikit-beam includes its own warning classes, `~scikit-beam.utils.exceptions.Scikit-beamWarning` and `~scikit-beam.utils.exceptions.Scikit-beamUserWarning`. All warnings from Scikit-beam are based on these warning classes (see below for the distinction between them). One can thus ignore all warnings from Scikit-beam (while still allowing through warnings from other libraries like Numpy) by using something like:: >>> from scikit-beam.utils.exceptions import Scikit-beamWarning >>> warnings.simplefilter('ignore', category=Scikit-beamWarning) Warning filters may also be modified just within a certain context using the `warnings.catch_warnings` context manager:: >>> with warnings.catch_warnings(): ... warnings.simplefilter('ignore', Scikit-beamWarning) ... fits.writeto(filename, data, clobber=True) As mentioned above, there are actually *two* base classes for Scikit-beam warnings. The main distinction is that `~scikit-beam.utils.exceptions.Scikit-beamUserWarning` is for warnings that are *intended* for typical users (e.g. "Warning: Ambiguous unit", something that might be because of improper input). In contrast, `~scikit-beam.utils.exceptions.Scikit-beamWarning` warnings that are *not* `~scikit-beam.utils.exceptions.Scikit-beamUserWarning` may be for lower-level warnings more useful for developers writing code that *uses* Scikit-beam (e.g., the deprecation warnings discussed below). So if you're a user that just wants to silence everything, the code above will suffice, but if you are a developer and want to hide development-related warnings from your users, you may wish to still allow through `~scikit-beam.utils.exceptions.Scikit-beamUserWarning`. Scikit-beam also issues warnings when deprecated API features are used. If you wish to *squelch* deprecation warnings, you can start Python with ``-Wi::Deprecation``. This sets all deprecation warnings to ignored. There is also an Scikit-beam-specific `~scikit-beam.utils.exceptions.Scikit-beamDeprecationWarning` which can be used to disable deprecation warnings from Scikit-beam only. See `the CPython documentation <http://docs.python.org/2/using/cmdline.html#cmdoption-W>`__ for more information on the -W argument.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/warnings.rst
warnings.rst
.. _python-warnings: ********************** Python warnings system ********************** .. doctest-skip-all Scikit-beam uses the Python :mod:`warnings` module to issue warning messages. The details of using the warnings module are general to Python, and apply to any Python software that uses this system. The user can suppress the warnings using the python command line argument ``-W"ignore"`` when starting an interactive python session. For example:: $ python -W"ignore" The user may also use the command line argument when running a python script as follows:: $ python -W"ignore" myscript.py It is also possible to suppress warnings from within a python script. For instance, the warnings issued from a single call to the `scikit-beam.io.fits.writeto` function may be suppressed from within a Python script using the `warnings.filterwarnings` function as follows:: >>> import warnings >>> from scikit-beam.io import fits >>> warnings.filterwarnings('ignore', category=UserWarning, append=True) >>> fits.writeto(filename, data, clobber=True) An equivalent way to insert an entry into the list of warning filter specifications for simple call `warnings.simplefilter`:: >>> warnings.simplefilter('ignore', UserWarning) Scikit-beam includes its own warning classes, `~scikit-beam.utils.exceptions.Scikit-beamWarning` and `~scikit-beam.utils.exceptions.Scikit-beamUserWarning`. All warnings from Scikit-beam are based on these warning classes (see below for the distinction between them). One can thus ignore all warnings from Scikit-beam (while still allowing through warnings from other libraries like Numpy) by using something like:: >>> from scikit-beam.utils.exceptions import Scikit-beamWarning >>> warnings.simplefilter('ignore', category=Scikit-beamWarning) Warning filters may also be modified just within a certain context using the `warnings.catch_warnings` context manager:: >>> with warnings.catch_warnings(): ... warnings.simplefilter('ignore', Scikit-beamWarning) ... fits.writeto(filename, data, clobber=True) As mentioned above, there are actually *two* base classes for Scikit-beam warnings. The main distinction is that `~scikit-beam.utils.exceptions.Scikit-beamUserWarning` is for warnings that are *intended* for typical users (e.g. "Warning: Ambiguous unit", something that might be because of improper input). In contrast, `~scikit-beam.utils.exceptions.Scikit-beamWarning` warnings that are *not* `~scikit-beam.utils.exceptions.Scikit-beamUserWarning` may be for lower-level warnings more useful for developers writing code that *uses* Scikit-beam (e.g., the deprecation warnings discussed below). So if you're a user that just wants to silence everything, the code above will suffice, but if you are a developer and want to hide development-related warnings from your users, you may wish to still allow through `~scikit-beam.utils.exceptions.Scikit-beamUserWarning`. Scikit-beam also issues warnings when deprecated API features are used. If you wish to *squelch* deprecation warnings, you can start Python with ``-Wi::Deprecation``. This sets all deprecation warnings to ignored. There is also an Scikit-beam-specific `~scikit-beam.utils.exceptions.Scikit-beamDeprecationWarning` which can be used to disable deprecation warnings from Scikit-beam only. See `the CPython documentation <http://docs.python.org/2/using/cmdline.html#cmdoption-W>`__ for more information on the -W argument.
0.816516
0.552419
.. _introduction: Introduction to scikit-beam --------------------------- Targeted Techniques ^^^^^^^^^^^^^^^^^^^ The following is a list of algorithms that are currently available in scikit-beam along with some planned work for the future. If any of the planned work looks interesting to you, please jump in and contribute on `github <https://github.com/scikit-beam/scikit-beam>`_! .. See our :doc:`/example` section for curated Jupyter notebooks that walk through using these algorithms. Currently implemented ===================== * Differential Phase Contrast (:mod:`~skbeam.core.dpc`) * CDI (:mod:`~skbeam.core.cdi`) * MultiTau correlation * Fast 2-D image conversion to Q * Fast gridding of 3-D point cloud into 2-D plane * X-Ray Speckle Visibility Spectroscopy (XSVS) * X-ray Fluorescence * `Fitting GUI <https://github.com/NSLS-II/pyxrf>`_ * Fast histograms * Access to basic constants (:mod:`~skbeam.core.constants`) Under active development ======================== * Powder Diffraction * Image Segmentation * Tomography * Absorption * Fluorescence * Correlation * 4-time Planned ======= * Ptychography * Inelastic Scattering * Coherent Diffractive Imaging * GPU implementation of Multi-tau correlation * XANES (1-D, 2-D)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/introduction.rst
introduction.rst
.. _introduction: Introduction to scikit-beam --------------------------- Targeted Techniques ^^^^^^^^^^^^^^^^^^^ The following is a list of algorithms that are currently available in scikit-beam along with some planned work for the future. If any of the planned work looks interesting to you, please jump in and contribute on `github <https://github.com/scikit-beam/scikit-beam>`_! .. See our :doc:`/example` section for curated Jupyter notebooks that walk through using these algorithms. Currently implemented ===================== * Differential Phase Contrast (:mod:`~skbeam.core.dpc`) * CDI (:mod:`~skbeam.core.cdi`) * MultiTau correlation * Fast 2-D image conversion to Q * Fast gridding of 3-D point cloud into 2-D plane * X-Ray Speckle Visibility Spectroscopy (XSVS) * X-ray Fluorescence * `Fitting GUI <https://github.com/NSLS-II/pyxrf>`_ * Fast histograms * Access to basic constants (:mod:`~skbeam.core.constants`) Under active development ======================== * Powder Diffraction * Image Segmentation * Tomography * Absorption * Fluorescence * Correlation * 4-time Planned ======= * Ptychography * Inelastic Scattering * Coherent Diffractive Imaging * GPU implementation of Multi-tau correlation * XANES (1-D, 2-D)
0.915302
0.746809
Supported versions of Python ---------------------------- The primary development target of the ``scikit-beam`` is python 3.5+. Affiliated packages are encouraged, but not rquired to support legacy python when practical. The core library will support 2.7 as long as the upstream scientific libraries do. Upstream (CPython) has made in very clear that python2 will not be supported going forward and that `no new features will be added to python 2 <https://www.python.org/dev/peps/pep-0404/>`__ . The EOL for python 2.7 was already extended from `2015 to 2020 <http://legacy.python.org/dev/peps/pep-0373/>`__ to fix `critical network security issues <https://www.python.org/dev/peps/pep-0466/>`__ for entities that have large network facing code bases they can not quickly migrate. All of the core libraries of the scientific stack fully support python 3.x and support for python 2.6 has been dropped for matplotlib and pandas and is scheduled to be dropped by numpy. The discussion in the community is not 'if' to drop legacy python support, but 'when' and 'how'. Moving to python 3.5+ will greatly simplify supporting c-extensions on windows. Currently, to compile c-extensions from legacy python requires using an unsupported version of visual studio (which was re-released as a 'community edition' for the sole reason of supporting python). Due to changes in the MS c runtime c-extensions will always be able to be compiled with the current version of visual studio.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/python_versions.rst
python_versions.rst
Supported versions of Python ---------------------------- The primary development target of the ``scikit-beam`` is python 3.5+. Affiliated packages are encouraged, but not rquired to support legacy python when practical. The core library will support 2.7 as long as the upstream scientific libraries do. Upstream (CPython) has made in very clear that python2 will not be supported going forward and that `no new features will be added to python 2 <https://www.python.org/dev/peps/pep-0404/>`__ . The EOL for python 2.7 was already extended from `2015 to 2020 <http://legacy.python.org/dev/peps/pep-0373/>`__ to fix `critical network security issues <https://www.python.org/dev/peps/pep-0466/>`__ for entities that have large network facing code bases they can not quickly migrate. All of the core libraries of the scientific stack fully support python 3.x and support for python 2.6 has been dropped for matplotlib and pandas and is scheduled to be dropped by numpy. The discussion in the community is not 'if' to drop legacy python support, but 'when' and 'how'. Moving to python 3.5+ will greatly simplify supporting c-extensions on windows. Currently, to compile c-extensions from legacy python requires using an unsupported version of visual studio (which was re-released as a 'community edition' for the sole reason of supporting python). Due to changes in the MS c runtime c-extensions will always be able to be compiled with the current version of visual studio.
0.829077
0.414425
====================== Project organization ====================== Here we describe a broad overview of the Scikit-Beam project and its parts. Scikt-beam Project Concept ========================== The "Scikit-beam Project" is distinct from the ``scikit-beam`` package. The scikit-beam Project is a process intended to facilitate communication and interoperability of python packages/codes in xray, neutron, and electron facilities. The project thus encompasses the ``scikit-beam`` core package (which provides a common framework), all "affiliated packages" (described below in `Affiliated Packages`_), and a general community aimed at bringing resources together and not duplicating efforts. ``scikit-beam`` Core Package ============================ The ``scikit-beam`` package (alternatively known as the "core" package) contains various classes, utilities, and a packaging framework intended to provide commonly-used tools. It is divided into a variety of sub-packages, which are documented in the remainder of this documentation (see :ref:`user-docs` for documentation of these components). The core also provides this documentation, and a variety of utilities that simplify starting other python astronomy/astrophysics packages. As described in the following section, these simplify the process of creating affiliated packages. Affiliated Packages =================== The Scikit-Beam project includes the concept of "affiliated packages." An affiliated package is a related python package that is not part of the ``scikit-beam`` core source code, but has requested to be included in the general community effort of the Scikit-Beam project. Such a package may be a candidate for eventual inclusion in the main ``scikit-beam`` package (although this is not required). Until then, however, it is a separate package, and may not be in the ``scikit-beam`` namespace. If you are interested in starting an affiliated package, or have a package you are interested in making more compatible with scikit-beam, the ``scikit-beam`` core package includes features that simplify and homogenize package management. Scikit-Beam provides a package template (TODO) that provides a common way to organize a package, to make your life simpler. You can use this template either with a new package you are starting or an existing package to give it most of the organizational tools Scikit-Beam provides, including the documentation, testing, and Cython-building tools. See the usage instructions in the template (TODO) for further details. To then get your package listed on the registry, take a look at the guidelines for becoming an affiliated package (TODO) and then post your intent on the (WE NEED A MAILING LIST). The Scikit-Beam coordination committee, in consultation with the community, will provide you feedback on the package, and will add it to the registry when it is approved. Community ========= Aside from the actual code, Scikit-Beam is also a community of beam line/facility- associated users and developers that agree that sharing utilities is healthy for the community and the science it produces. This community is of course central to accomplishing anything with the code itself. We follow the `Python Software Foundation Code of Conduct <http://www.python.org/psf/codeofconduct/>`_ and welcome anyone who wishes to contribute to the project.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/overview.rst
overview.rst
====================== Project organization ====================== Here we describe a broad overview of the Scikit-Beam project and its parts. Scikt-beam Project Concept ========================== The "Scikit-beam Project" is distinct from the ``scikit-beam`` package. The scikit-beam Project is a process intended to facilitate communication and interoperability of python packages/codes in xray, neutron, and electron facilities. The project thus encompasses the ``scikit-beam`` core package (which provides a common framework), all "affiliated packages" (described below in `Affiliated Packages`_), and a general community aimed at bringing resources together and not duplicating efforts. ``scikit-beam`` Core Package ============================ The ``scikit-beam`` package (alternatively known as the "core" package) contains various classes, utilities, and a packaging framework intended to provide commonly-used tools. It is divided into a variety of sub-packages, which are documented in the remainder of this documentation (see :ref:`user-docs` for documentation of these components). The core also provides this documentation, and a variety of utilities that simplify starting other python astronomy/astrophysics packages. As described in the following section, these simplify the process of creating affiliated packages. Affiliated Packages =================== The Scikit-Beam project includes the concept of "affiliated packages." An affiliated package is a related python package that is not part of the ``scikit-beam`` core source code, but has requested to be included in the general community effort of the Scikit-Beam project. Such a package may be a candidate for eventual inclusion in the main ``scikit-beam`` package (although this is not required). Until then, however, it is a separate package, and may not be in the ``scikit-beam`` namespace. If you are interested in starting an affiliated package, or have a package you are interested in making more compatible with scikit-beam, the ``scikit-beam`` core package includes features that simplify and homogenize package management. Scikit-Beam provides a package template (TODO) that provides a common way to organize a package, to make your life simpler. You can use this template either with a new package you are starting or an existing package to give it most of the organizational tools Scikit-Beam provides, including the documentation, testing, and Cython-building tools. See the usage instructions in the template (TODO) for further details. To then get your package listed on the registry, take a look at the guidelines for becoming an affiliated package (TODO) and then post your intent on the (WE NEED A MAILING LIST). The Scikit-Beam coordination committee, in consultation with the community, will provide you feedback on the package, and will add it to the registry when it is approved. Community ========= Aside from the actual code, Scikit-Beam is also a community of beam line/facility- associated users and developers that agree that sharing utilities is healthy for the community and the science it produces. This community is of course central to accomplishing anything with the code itself. We follow the `Python Software Foundation Code of Conduct <http://www.python.org/psf/codeofconduct/>`_ and welcome anyone who wishes to contribute to the project.
0.8488
0.928603
scikit-xray v0.0.5 ------------------ New Functionality ================= * X-Ray Speckle Visibility Spectroscopy `PR 293 <https://github.com/scikit-xray/scikit-xray/pull/293>`_ * Fitting 1-time correlation data to ISF equation, `PR 295 <https://github.com/scikit-xray/scikit-xray/pull/295>`_ * Kymograph (aka waterfall plot), `PR 306 <https://github.com/scikit-xray/scikit-xray/pull/306>`_ API Changes =========== * :func:`weighted_nnls_fit` was removed from :mod:`skxray.core.fitting.xrf_model`. Weighted nnls fitting was combined into :func:`nnls_fit`, which includes weights as a new argument. * :func:`extract_label_indices` is a helper function for labeled arrays and was moved to its new home in `skxray.core.roi` from `skxray.core.correlation` Other updates ============= * `PR 316 <https://github.com/scikit-xray/scikit-xray/pull/316>`_: Do a better job isolating dependencies so that our "optional" packages truly are optional * `PR 319 <https://github.com/scikit-xray/scikit-xray/pull/319>`_: Use latest lmfit version published to scikit-xray anaconda.org channel in travis build * `PR 326 <https://github.com/scikit-xray/scikit-xray/pull/326>`_: Add quick start guide and note about testing * `PR 327 <https://github.com/scikit-xray/scikit-xray/pull/327>`_: Pin to lmfit 0.8.3 in conda recipe * `PR 332 <https://github.com/scikit-xray/scikit-xray/pull/332>`_: Correct the equation in the one-time correlation docstring * `PR 333 <https://github.com/scikit-xray/scikit-xray/pull/333>`_: Update readme with new examples in `scikit-xray-examples <https://github.com/scikit-xray/scikit-xray-examples>`_
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/whatsnew/0.5.rst
0.5.rst
scikit-xray v0.0.5 ------------------ New Functionality ================= * X-Ray Speckle Visibility Spectroscopy `PR 293 <https://github.com/scikit-xray/scikit-xray/pull/293>`_ * Fitting 1-time correlation data to ISF equation, `PR 295 <https://github.com/scikit-xray/scikit-xray/pull/295>`_ * Kymograph (aka waterfall plot), `PR 306 <https://github.com/scikit-xray/scikit-xray/pull/306>`_ API Changes =========== * :func:`weighted_nnls_fit` was removed from :mod:`skxray.core.fitting.xrf_model`. Weighted nnls fitting was combined into :func:`nnls_fit`, which includes weights as a new argument. * :func:`extract_label_indices` is a helper function for labeled arrays and was moved to its new home in `skxray.core.roi` from `skxray.core.correlation` Other updates ============= * `PR 316 <https://github.com/scikit-xray/scikit-xray/pull/316>`_: Do a better job isolating dependencies so that our "optional" packages truly are optional * `PR 319 <https://github.com/scikit-xray/scikit-xray/pull/319>`_: Use latest lmfit version published to scikit-xray anaconda.org channel in travis build * `PR 326 <https://github.com/scikit-xray/scikit-xray/pull/326>`_: Add quick start guide and note about testing * `PR 327 <https://github.com/scikit-xray/scikit-xray/pull/327>`_: Pin to lmfit 0.8.3 in conda recipe * `PR 332 <https://github.com/scikit-xray/scikit-xray/pull/332>`_: Correct the equation in the one-time correlation docstring * `PR 333 <https://github.com/scikit-xray/scikit-xray/pull/333>`_: Update readme with new examples in `scikit-xray-examples <https://github.com/scikit-xray/scikit-xray-examples>`_
0.934657
0.710729
.. _vision: Vision for a Common Python Package for x-ray, electron and neutrons =================================================================== The following document summarizes a vision for a common Python package used across the whole community, and how we can best all work together to achieve this. In the following document, this common package will be referred to as the core package. This vision is not set in stone, and we are committed to adapting it to whatever process and guidelines work in practice. The ultimate goal that we seek is a package that would contain much of the core functionality and some common tools required across the community, but not *everything* a domain scientist will ever need. The aim is primarily to avoid duplication for common core tasks, and to provide a robust framework upon which to build more complex tools. Such a common package should not preclude any other package from existing, because there will always be more complex and/or specialized tools required. These tools will be able to rely on a single core library for many tasks, and thus reduce the number of dependencies, reduce duplication of functionality, and increase consistency of their interfaces. Procedure --------- With the help of the community, the coordination committee will start by identifying a few of key areas where initial development/consolidation will be needed (such as conversion to reciprocal space, non-linear fitting, detector corrections, time correlations, etc.) and will encourage teams to be formed to build standalone packages implementing this functionality. These packages will be referred to as affiliated packages (meaning that they are intended for future integration in the core package). A set of requirements will be set out concerning the interfaces and classes/methods that affiliated packages will need to make available in order to ensure consistency between the different components. As the core package grows, new potential areas/components for the core package will be identified. Competition cannot be avoided, and will not be actively discouraged, but whenever possible, developers should strive to work as a team to provide a single and robust affiliated package, for the benefit of the community. The affiliated packages will be developed outside the core package in independent repositories, which will allow the teams the choice of tool and organization. Once an affiliated package has implemented the desired functionality, and satisfies quality criteria for coding style, documentation, and testing, it will be considered for inclusion in the core package, and further development will be done directly in the core package either via direct access to the repository, or via patches/pull requests. To ensure uniformity across affiliated packages, and to facilitate integration with the core package, developers who wish to submit their affiliated packages for inclusion in the core will need to follow the layout of a ‘template' package that will be provided before development starts. Dependencies ------------ Affiliated packages should be able to be imported with only the following dependencies: - The Python Standard Library, NumPy, SciPy, pandas, and Matplotlib - Components already in the core package Other packages may be used, but must be imported as needed rather than during the initial import of the package. If a dependency is needed, but is an affiliated package, the dependent package will need to wait until the dependency is integrated into the core package before being itself considered for inclusion. In the mean time, it can make use of the other affiliated package in its current form, or other packages, so as not to stall development. Thus, the first packages to be included in the core will be those only requiring the standard library, NumPy, SciPy, pandas, and Matplotlib. If the required dependency will never be part of a main package, then by default the dependency can be included but should be imported as needed (meaning that it only prevents the importing of that component, not the entire core package), unless a strong case is made and a general consensus is reached by the community that this dependency is important enough to be required at a higher level. This system means that packages will be integrated into the core package in an order depending on the dependency tree, and also ensures that the interfaces of packages being integrated into the core package are consistent with those already in the core package. Initially, no dependency on GUI toolkits will be allowed in the core package. If the community reaches agrees on a single toolkit that could be used, then this toolkit will be allowed (but will only be imported as needed). Keeping track of affiliated packages ------------------------------------ Affiliated packages will be listed in a central location (in addition to PyPI/conda-forge). Existing Packages ----------------- Developers who already have existing packages will be encouraged to continue supporting them for the benefit of users until the core library is considered stable, contains this functionality, and is released to the community. Thereafter, developers should encourage users to transition to using the functionality in the core package, and eventually phase out their own packages, unless they provide added value over the core package.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/vision.rst
vision.rst
.. _vision: Vision for a Common Python Package for x-ray, electron and neutrons =================================================================== The following document summarizes a vision for a common Python package used across the whole community, and how we can best all work together to achieve this. In the following document, this common package will be referred to as the core package. This vision is not set in stone, and we are committed to adapting it to whatever process and guidelines work in practice. The ultimate goal that we seek is a package that would contain much of the core functionality and some common tools required across the community, but not *everything* a domain scientist will ever need. The aim is primarily to avoid duplication for common core tasks, and to provide a robust framework upon which to build more complex tools. Such a common package should not preclude any other package from existing, because there will always be more complex and/or specialized tools required. These tools will be able to rely on a single core library for many tasks, and thus reduce the number of dependencies, reduce duplication of functionality, and increase consistency of their interfaces. Procedure --------- With the help of the community, the coordination committee will start by identifying a few of key areas where initial development/consolidation will be needed (such as conversion to reciprocal space, non-linear fitting, detector corrections, time correlations, etc.) and will encourage teams to be formed to build standalone packages implementing this functionality. These packages will be referred to as affiliated packages (meaning that they are intended for future integration in the core package). A set of requirements will be set out concerning the interfaces and classes/methods that affiliated packages will need to make available in order to ensure consistency between the different components. As the core package grows, new potential areas/components for the core package will be identified. Competition cannot be avoided, and will not be actively discouraged, but whenever possible, developers should strive to work as a team to provide a single and robust affiliated package, for the benefit of the community. The affiliated packages will be developed outside the core package in independent repositories, which will allow the teams the choice of tool and organization. Once an affiliated package has implemented the desired functionality, and satisfies quality criteria for coding style, documentation, and testing, it will be considered for inclusion in the core package, and further development will be done directly in the core package either via direct access to the repository, or via patches/pull requests. To ensure uniformity across affiliated packages, and to facilitate integration with the core package, developers who wish to submit their affiliated packages for inclusion in the core will need to follow the layout of a ‘template' package that will be provided before development starts. Dependencies ------------ Affiliated packages should be able to be imported with only the following dependencies: - The Python Standard Library, NumPy, SciPy, pandas, and Matplotlib - Components already in the core package Other packages may be used, but must be imported as needed rather than during the initial import of the package. If a dependency is needed, but is an affiliated package, the dependent package will need to wait until the dependency is integrated into the core package before being itself considered for inclusion. In the mean time, it can make use of the other affiliated package in its current form, or other packages, so as not to stall development. Thus, the first packages to be included in the core will be those only requiring the standard library, NumPy, SciPy, pandas, and Matplotlib. If the required dependency will never be part of a main package, then by default the dependency can be included but should be imported as needed (meaning that it only prevents the importing of that component, not the entire core package), unless a strong case is made and a general consensus is reached by the community that this dependency is important enough to be required at a higher level. This system means that packages will be integrated into the core package in an order depending on the dependency tree, and also ensures that the interfaces of packages being integrated into the core package are consistent with those already in the core package. Initially, no dependency on GUI toolkits will be allowed in the core package. If the community reaches agrees on a single toolkit that could be used, then this toolkit will be allowed (but will only be imported as needed). Keeping track of affiliated packages ------------------------------------ Affiliated packages will be listed in a central location (in addition to PyPI/conda-forge). Existing Packages ----------------- Developers who already have existing packages will be encouraged to continue supporting them for the benefit of users until the core library is considered stable, contains this functionality, and is released to the community. Thereafter, developers should encourage users to transition to using the functionality in the core package, and eventually phase out their own packages, unless they provide added value over the core package.
0.830353
0.878314
============================ Writing Command-Line Scripts ============================ .. warning :: We currently have no CL scripts. If we want to pick any up is an open question. Leaving this text mostly un-edited from astropy. Command-line scripts in Scikit-beam should follow a consistent scheme to promote readability and compatibility. Setuptools' `"entry points"`_ are used to automatically generate wrappers with the correct extension. The scripts can live in their own module, or be part of a larger module that implements a class or function for scikit-beam library use. They should have a ``main`` function to parse the arguments and pass those arguments on to some library function so that the library function can be used programmatically when needed. The ``main`` function should accept an optional single argument that holds the ``sys.argv`` list, except for the script name (e.g., ``argv[1:]``). It must then be added to the list of entry points in the ``setup.py`` file (see the example below). Command-line options can be parsed however desired, but the :mod:`argparse` module is recommended when possible, due to its simpler and more flexible interface relative to the older :mod:`optparse`. :mod:`argparse` is only available in python >=2.7 and >=3.2, however, so it should be imported as ``from scikit-beam.utils.compat import argparse`` . .. _"entry points": https://pythonhosted.org/setuptools/setuptools.html#automatic-script-creation Examples -------- Contents of ``/scikit-beam/somepackage/somemod.py`` :: def do_something(args, option=False): for a in args: if option: ...do something... else: ...do something else... def main(args=None): from scikit-beam.utils.compat import argparse parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('-o', '--option', dest='op',action='store_true', help='Some option that turns something on.') parser.add_argument('stuff', metavar='S', nargs='+', help='Some input I should be able to get lots of.') res = parser.parse_args(args) do_something(res.stuff,res.op) Then add the script to the ``setup.py`` :: entry_points['console_scripts'] = [ 'somescript = scikit-beam.somepackage.somemod:main', ... ]
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/scripts.rst
scripts.rst
============================ Writing Command-Line Scripts ============================ .. warning :: We currently have no CL scripts. If we want to pick any up is an open question. Leaving this text mostly un-edited from astropy. Command-line scripts in Scikit-beam should follow a consistent scheme to promote readability and compatibility. Setuptools' `"entry points"`_ are used to automatically generate wrappers with the correct extension. The scripts can live in their own module, or be part of a larger module that implements a class or function for scikit-beam library use. They should have a ``main`` function to parse the arguments and pass those arguments on to some library function so that the library function can be used programmatically when needed. The ``main`` function should accept an optional single argument that holds the ``sys.argv`` list, except for the script name (e.g., ``argv[1:]``). It must then be added to the list of entry points in the ``setup.py`` file (see the example below). Command-line options can be parsed however desired, but the :mod:`argparse` module is recommended when possible, due to its simpler and more flexible interface relative to the older :mod:`optparse`. :mod:`argparse` is only available in python >=2.7 and >=3.2, however, so it should be imported as ``from scikit-beam.utils.compat import argparse`` . .. _"entry points": https://pythonhosted.org/setuptools/setuptools.html#automatic-script-creation Examples -------- Contents of ``/scikit-beam/somepackage/somemod.py`` :: def do_something(args, option=False): for a in args: if option: ...do something... else: ...do something else... def main(args=None): from scikit-beam.utils.compat import argparse parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('-o', '--option', dest='op',action='store_true', help='Some option that turns something on.') parser.add_argument('stuff', metavar='S', nargs='+', help='Some input I should be able to get lots of.') res = parser.parse_args(args) do_something(res.stuff,res.op) Then add the script to the ``setup.py`` :: entry_points['console_scripts'] = [ 'somescript = scikit-beam.somepackage.somemod:main', ... ]
0.817938
0.373533
.. _documentation-guidelines: ======================= Writing Documentation ======================= High-quality, consistent documentation for the science code is one of the major goals of the Scikit-beam project. Hence, we describe our documentation procedures and rules here. For the scikit-beam core project we try to keep to these as closely as possible, while the standards for affiliated packages are somewhat looser. (These procedures and guidelines are still recommended for affiliated packages, as they encourage useful documentation, a characteristic often lacking in scientific software.) Building the Documentation from source ====================================== For information about building the documentation from source, see the :ref:`builddocs` section in the installation instructions. Scikit-beam Documentation Rules and Guidelines ============================================== This section describes the standards for documentation format affiliated packages that must follow for consideration of integration into the core module, as well as the standard Scikit-beam docstring format. * All documentation should be written use the Sphinx documentation tool. * The template package will provide a recommended general structure for documentation. [TODO] * Docstrings must be provided for all public classes, methods, and functions. * Docstrings will be incorporated into the documentation using a version of numpydoc included with Scikit-beam, and should follow the :doc:`docrules`. [TODO do we want to vendor numpydoc or use napolean?] * Examples and/or tutorials are strongly encouraged for typical use-cases of a particular module or class. * Any external package dependencies aside from NumPy_, SciPy_, or Matplotlib_ must be explicitly mentioned in the documentation. * Configuration options using the :mod:`scikit-beam.config` mechanisms must be explicitly mentioned in the documentation. [TODO do we want to keep config] The details of the docstring format are described on a separate page: .. toctree:: docrules numpydoc Extension ------------------ This extension (and some related extensions) are a port of the `numpydoc <http://pypi.python.org/pypi/numpydoc/0.3.1>`_ extension written by the NumPy_ and SciPy_, projects, with some tweaks for Scikit-beam. Its main purposes is to reprocess docstrings from code into a form sphinx understands. Generally, there's no need to interact with it directly, as docstrings following the :doc:`docrules` will be processed automatically. .. _NumPy: http://numpy.scipy.org/ .. _numpydoc: http://pypi.python.org/pypi/numpydoc/0.3.1 .. _Matplotlib: http://matplotlib.sourceforge.net/ .. _SciPy: http://www.scipy.org .. _Sphinx: http://sphinx.pocoo.org .. _scikit-beam-helpers: https://github.com/scikit-beam/scikit-beam-helpers
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/docguide.rst
docguide.rst
.. _documentation-guidelines: ======================= Writing Documentation ======================= High-quality, consistent documentation for the science code is one of the major goals of the Scikit-beam project. Hence, we describe our documentation procedures and rules here. For the scikit-beam core project we try to keep to these as closely as possible, while the standards for affiliated packages are somewhat looser. (These procedures and guidelines are still recommended for affiliated packages, as they encourage useful documentation, a characteristic often lacking in scientific software.) Building the Documentation from source ====================================== For information about building the documentation from source, see the :ref:`builddocs` section in the installation instructions. Scikit-beam Documentation Rules and Guidelines ============================================== This section describes the standards for documentation format affiliated packages that must follow for consideration of integration into the core module, as well as the standard Scikit-beam docstring format. * All documentation should be written use the Sphinx documentation tool. * The template package will provide a recommended general structure for documentation. [TODO] * Docstrings must be provided for all public classes, methods, and functions. * Docstrings will be incorporated into the documentation using a version of numpydoc included with Scikit-beam, and should follow the :doc:`docrules`. [TODO do we want to vendor numpydoc or use napolean?] * Examples and/or tutorials are strongly encouraged for typical use-cases of a particular module or class. * Any external package dependencies aside from NumPy_, SciPy_, or Matplotlib_ must be explicitly mentioned in the documentation. * Configuration options using the :mod:`scikit-beam.config` mechanisms must be explicitly mentioned in the documentation. [TODO do we want to keep config] The details of the docstring format are described on a separate page: .. toctree:: docrules numpydoc Extension ------------------ This extension (and some related extensions) are a port of the `numpydoc <http://pypi.python.org/pypi/numpydoc/0.3.1>`_ extension written by the NumPy_ and SciPy_, projects, with some tweaks for Scikit-beam. Its main purposes is to reprocess docstrings from code into a form sphinx understands. Generally, there's no need to interact with it directly, as docstrings following the :doc:`docrules` will be processed automatically. .. _NumPy: http://numpy.scipy.org/ .. _numpydoc: http://pypi.python.org/pypi/numpydoc/0.3.1 .. _Matplotlib: http://matplotlib.sourceforge.net/ .. _SciPy: http://www.scipy.org .. _Sphinx: http://sphinx.pocoo.org .. _scikit-beam-helpers: https://github.com/scikit-beam/scikit-beam-helpers
0.577853
0.65247
============================================= Emacs setup for following coding guidelines ============================================= .. _flymake: http://www.emacswiki.org/emacs/FlyMake .. _pyflakes: http://pypi.python.org/pypi/pyflakes .. _pep8: http://pypi.python.org/pypi/pep8 .. include:: workflow/known_projects.inc The Scikit-beam coding guidelines are listed in :doc:`codeguide`. This document will describe some configuration options for Emacs, that will help in ensuring that Python code satisfies the guidelines. Emacs can be configured in several different ways. So instead of providing a drop in configuration file, only the individual configurations are presented below. For this setup we will need flymake_, pyflakes_ and the pep8_ Python script, in addition to ``python-mode``. Flymake comes with Emacs 23. The rest can be obtained from their websites, or can be installed using `pip`_. Global settings =============== No tabs ------- This setting will cause all tabs to be replaced with spaces. The number of spaces to use is set in the :ref:`basic settings` section below. .. code-block:: scheme ;; Don't use TABS for indentations. (setq-default indent-tabs-mode nil) Maximum number of characters in a line -------------------------------------- Emacs will automatically insert a new line after "fill-column" number of columns. PEP8 specifies a maximum of 79, but this can be set to a smaller value also, for example 72. .. code-block:: scheme ;; Set the number to the number of columns to use. (setq-default fill-column 79) ;; Add Autofill mode to mode hooks. (add-hook 'text-mode-hook 'turn-on-auto-fill) ;; Show line number in the mode line. (line-number-mode 1) ;; Show column number in the mode line. (column-number-mode 1) Syntax highlighting ------------------- Enable syntax highlighting. This will also highlight lines that form a region. .. code-block:: scheme (global-font-lock-mode 1) Python specific settings ======================== .. _`basic settings`: Basic settings -------------- Indentation is automatically added. When a tab is pressed it is replaced with 4 spaces. When backspace is pressed on an empty line, the cursor will jump to the previous indentation level. .. code-block:: scheme (load-library "python") (autoload 'python-mode "python-mode" "Python Mode." t) (add-to-list 'auto-mode-alist '("\\.py\\'" . python-mode)) (add-to-list 'interpreter-mode-alist '("python" . python-mode)) (setq interpreter-mode-alist (cons '("python" . python-mode) interpreter-mode-alist) python-mode-hook '(lambda () (progn (set-variable 'py-indent-offset 4) (set-variable 'indent-tabs-mode nil)))) Highlight the column where a line must stop ------------------------------------------- The "fill-column" column is highlighted in red. For this to work, download `column-marker.el <http://www.emacswiki.org/emacs/column-marker.el>`_ and place it in the Emacs configuration directory. .. code-block:: scheme ;; Highlight character at "fill-column" position. (require 'column-marker) (set-face-background 'column-marker-1 "red") (add-hook 'python-mode-hook (lambda () (interactive) (column-marker-1 fill-column))) Flymake ------- Flymake will mark lines that do not satisfy syntax requirements in red. When cursor is on such a line a message is displayed in the mini-buffer. When mouse pointer is on such a line a "tool tip" message is also shown. For flymake to work with `pep8`_ and `pyflakes`_, create an executable file named `pychecker`_ with the following contents. This file must be in the system path. .. code-block:: sh #!/bin/bash pyflakes "$1" pep8 --ignore=E221,E701,E202 --repeat "$1" true Also download `flymake-cursor.el <http://www.emacswiki.org/emacs/flymake-cursor.el>`_ and place it in the Emacs configuration directory. Then add the following code to the Emacs configuration: .. code-block:: scheme ;; Setup for Flymake code checking. (require 'flymake) (load-library "flymake-cursor") ;; Script that flymake uses to check code. This script must be ;; present in the system path. (setq pycodechecker "pychecker") (when (load "flymake" t) (defun flymake-pycodecheck-init () (let* ((temp-file (flymake-init-create-temp-buffer-copy 'flymake-create-temp-inplace)) (local-file (file-relative-name temp-file (file-name-directory buffer-file-name)))) (list pycodechecker (list local-file)))) (add-to-list 'flymake-allowed-file-name-masks '("\\.py\\'" flymake-pycodecheck-init))) (add-hook 'python-mode-hook 'flymake-mode) .. note:: Flymake will save files with suffix *_flymake* in the current directory. If it crashes for some reason, then these files will not get deleted. Sometimes there is a delay in refreshing the results. Delete trailing white spaces and blank lines -------------------------------------------- To manually delete trailing whitespaces, press ``C-t C-w``, which will run the command "delete-whitespaces`. This command is also run when a file is saved, and hence all trailing whitespaces will be deleted on saving a Python file. To make sure that all "words" are separated by only one space, type ``M-SPC`` (use the ALT key since ``M-SPC`` sometimes brings up a context menu.). To collapse a set of blank lines to one blank line, place the cursor on one of these and press ``C-x C-o``. This is useful for deleting multiple black lines at the end of a file. .. code-block:: scheme ;; Remove trailing whitespace manually by typing C-t C-w. (add-hook 'python-mode-hook (lambda () (local-set-key (kbd "C-t C-w") 'delete-trailing-whitespace))) ;; Automatically remove trailing whitespace when file is saved. (add-hook 'python-mode-hook (lambda() (add-hook 'local-write-file-hooks '(lambda() (save-excursion (delete-trailing-whitespace)))))) ;; Use M-SPC (use ALT key) to make sure that words are separated by ;; just one space. Use C-x C-o to collapse a set of empty lines ;; around the cursor to one empty line. Useful for deleting all but ;; one blank line at end of file. To do this go to end of file (M->) ;; and type C-x C-o. .. LocalWords: whitespaces
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/codeguide_emacs.rst
codeguide_emacs.rst
============================================= Emacs setup for following coding guidelines ============================================= .. _flymake: http://www.emacswiki.org/emacs/FlyMake .. _pyflakes: http://pypi.python.org/pypi/pyflakes .. _pep8: http://pypi.python.org/pypi/pep8 .. include:: workflow/known_projects.inc The Scikit-beam coding guidelines are listed in :doc:`codeguide`. This document will describe some configuration options for Emacs, that will help in ensuring that Python code satisfies the guidelines. Emacs can be configured in several different ways. So instead of providing a drop in configuration file, only the individual configurations are presented below. For this setup we will need flymake_, pyflakes_ and the pep8_ Python script, in addition to ``python-mode``. Flymake comes with Emacs 23. The rest can be obtained from their websites, or can be installed using `pip`_. Global settings =============== No tabs ------- This setting will cause all tabs to be replaced with spaces. The number of spaces to use is set in the :ref:`basic settings` section below. .. code-block:: scheme ;; Don't use TABS for indentations. (setq-default indent-tabs-mode nil) Maximum number of characters in a line -------------------------------------- Emacs will automatically insert a new line after "fill-column" number of columns. PEP8 specifies a maximum of 79, but this can be set to a smaller value also, for example 72. .. code-block:: scheme ;; Set the number to the number of columns to use. (setq-default fill-column 79) ;; Add Autofill mode to mode hooks. (add-hook 'text-mode-hook 'turn-on-auto-fill) ;; Show line number in the mode line. (line-number-mode 1) ;; Show column number in the mode line. (column-number-mode 1) Syntax highlighting ------------------- Enable syntax highlighting. This will also highlight lines that form a region. .. code-block:: scheme (global-font-lock-mode 1) Python specific settings ======================== .. _`basic settings`: Basic settings -------------- Indentation is automatically added. When a tab is pressed it is replaced with 4 spaces. When backspace is pressed on an empty line, the cursor will jump to the previous indentation level. .. code-block:: scheme (load-library "python") (autoload 'python-mode "python-mode" "Python Mode." t) (add-to-list 'auto-mode-alist '("\\.py\\'" . python-mode)) (add-to-list 'interpreter-mode-alist '("python" . python-mode)) (setq interpreter-mode-alist (cons '("python" . python-mode) interpreter-mode-alist) python-mode-hook '(lambda () (progn (set-variable 'py-indent-offset 4) (set-variable 'indent-tabs-mode nil)))) Highlight the column where a line must stop ------------------------------------------- The "fill-column" column is highlighted in red. For this to work, download `column-marker.el <http://www.emacswiki.org/emacs/column-marker.el>`_ and place it in the Emacs configuration directory. .. code-block:: scheme ;; Highlight character at "fill-column" position. (require 'column-marker) (set-face-background 'column-marker-1 "red") (add-hook 'python-mode-hook (lambda () (interactive) (column-marker-1 fill-column))) Flymake ------- Flymake will mark lines that do not satisfy syntax requirements in red. When cursor is on such a line a message is displayed in the mini-buffer. When mouse pointer is on such a line a "tool tip" message is also shown. For flymake to work with `pep8`_ and `pyflakes`_, create an executable file named `pychecker`_ with the following contents. This file must be in the system path. .. code-block:: sh #!/bin/bash pyflakes "$1" pep8 --ignore=E221,E701,E202 --repeat "$1" true Also download `flymake-cursor.el <http://www.emacswiki.org/emacs/flymake-cursor.el>`_ and place it in the Emacs configuration directory. Then add the following code to the Emacs configuration: .. code-block:: scheme ;; Setup for Flymake code checking. (require 'flymake) (load-library "flymake-cursor") ;; Script that flymake uses to check code. This script must be ;; present in the system path. (setq pycodechecker "pychecker") (when (load "flymake" t) (defun flymake-pycodecheck-init () (let* ((temp-file (flymake-init-create-temp-buffer-copy 'flymake-create-temp-inplace)) (local-file (file-relative-name temp-file (file-name-directory buffer-file-name)))) (list pycodechecker (list local-file)))) (add-to-list 'flymake-allowed-file-name-masks '("\\.py\\'" flymake-pycodecheck-init))) (add-hook 'python-mode-hook 'flymake-mode) .. note:: Flymake will save files with suffix *_flymake* in the current directory. If it crashes for some reason, then these files will not get deleted. Sometimes there is a delay in refreshing the results. Delete trailing white spaces and blank lines -------------------------------------------- To manually delete trailing whitespaces, press ``C-t C-w``, which will run the command "delete-whitespaces`. This command is also run when a file is saved, and hence all trailing whitespaces will be deleted on saving a Python file. To make sure that all "words" are separated by only one space, type ``M-SPC`` (use the ALT key since ``M-SPC`` sometimes brings up a context menu.). To collapse a set of blank lines to one blank line, place the cursor on one of these and press ``C-x C-o``. This is useful for deleting multiple black lines at the end of a file. .. code-block:: scheme ;; Remove trailing whitespace manually by typing C-t C-w. (add-hook 'python-mode-hook (lambda () (local-set-key (kbd "C-t C-w") 'delete-trailing-whitespace))) ;; Automatically remove trailing whitespace when file is saved. (add-hook 'python-mode-hook (lambda() (add-hook 'local-write-file-hooks '(lambda() (save-excursion (delete-trailing-whitespace)))))) ;; Use M-SPC (use ALT key) to make sure that words are separated by ;; just one space. Use C-x C-o to collapse a set of empty lines ;; around the cursor to one empty line. Useful for deleting all but ;; one blank line at end of file. To do this go to end of file (M->) ;; and type C-x C-o. .. LocalWords: whitespaces
0.881863
0.328287
.. _building-c-or-cython-extensions: ====================== C or Cython Extensions ====================== .. warning:: This page has not been fully adapted from astropy Scikit-beam supports using C extensions for wrapping C libraries and Cython for speeding up computationally-intensive calculations. Both Cython and C extension building can be customized using the ``get_extensions`` function of the ``setup_package.py`` file. If defined, this function must return a list of :class:`distutils.core.Extension` objects. The creation process is left to the subpackage designer, and can be customized however is relevant for the extensions in the subpackage. While C extensions must always be defined through the ``get_extensions`` mechanism, Cython files (ending in ``.pyx``) are automatically located and loaded in separate extensions if they are not in ``get_extensions``. For Cython extensions located in this way, headers for numpy C functions are included in the build, but no other external headers are included. ``.pyx`` files present in the extensions returned by ``get_extensions`` are not included in the list of extensions automatically generated extensions. Note that this allows disabling a Cython file by providing an extension that includes the Cython file, but giving it the special ``name`` 'cython_skip'. Any extension with this package name will not be built by ``setup.py``. .. note:: If an :class:`~distutils.core.Extension` object is provided for Cython source files using the ``get_extensions`` mechanism, it is very important that the ``.pyx`` files be given as the ``source``, rather than the ``.c`` files generated by Cython. Installing C header files ------------------------- If your C extension needs to be linked from other third-party C code, you probably want to install its header files along side the Python module. 1) Create an ``include`` directory inside of your package for all of the header files. 2) Use the ``get_package_data`` hook in ``setup_package.py`` to install those header files. For example, the ``scikit-beam.wcs`` package has this:: def get_package_data(): return {'scikit-beam.wcs': ['include/*.h']} Preventing importing at build time ---------------------------------- In rare cases, some packages may need to be imported at build time. Unfortunately, anything that requires a C or Cython extension will fail to import until the build phase has completed. In this cases, the ``_ASTROPY_SETUP_`` variable can be used to determine if the package is being imported as part of the build and choose to not import problematic modules. ``_ASTROPY_SETUP_`` is inserted into the builtins, and is `True` when inside of scikit-beam's ``setup.py`` script, and `False` otherwise. For example, suppose there is a subpackage ``foo`` that needs to import a module called ``version.py`` at build time in order to set some version information, and also has a C extension, ``process``, that will not be available in the source tree. In this case, ``scikit-beam/foo/__init__.py`` would probably want to check the value of ``_ASTROPY_SETUP_`` before importing the C extension:: try: from . import process except ImportError: if not _ASTROPY_SETUP_: raise from . import version
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/ccython.rst
ccython.rst
.. _building-c-or-cython-extensions: ====================== C or Cython Extensions ====================== .. warning:: This page has not been fully adapted from astropy Scikit-beam supports using C extensions for wrapping C libraries and Cython for speeding up computationally-intensive calculations. Both Cython and C extension building can be customized using the ``get_extensions`` function of the ``setup_package.py`` file. If defined, this function must return a list of :class:`distutils.core.Extension` objects. The creation process is left to the subpackage designer, and can be customized however is relevant for the extensions in the subpackage. While C extensions must always be defined through the ``get_extensions`` mechanism, Cython files (ending in ``.pyx``) are automatically located and loaded in separate extensions if they are not in ``get_extensions``. For Cython extensions located in this way, headers for numpy C functions are included in the build, but no other external headers are included. ``.pyx`` files present in the extensions returned by ``get_extensions`` are not included in the list of extensions automatically generated extensions. Note that this allows disabling a Cython file by providing an extension that includes the Cython file, but giving it the special ``name`` 'cython_skip'. Any extension with this package name will not be built by ``setup.py``. .. note:: If an :class:`~distutils.core.Extension` object is provided for Cython source files using the ``get_extensions`` mechanism, it is very important that the ``.pyx`` files be given as the ``source``, rather than the ``.c`` files generated by Cython. Installing C header files ------------------------- If your C extension needs to be linked from other third-party C code, you probably want to install its header files along side the Python module. 1) Create an ``include`` directory inside of your package for all of the header files. 2) Use the ``get_package_data`` hook in ``setup_package.py`` to install those header files. For example, the ``scikit-beam.wcs`` package has this:: def get_package_data(): return {'scikit-beam.wcs': ['include/*.h']} Preventing importing at build time ---------------------------------- In rare cases, some packages may need to be imported at build time. Unfortunately, anything that requires a C or Cython extension will fail to import until the build phase has completed. In this cases, the ``_ASTROPY_SETUP_`` variable can be used to determine if the package is being imported as part of the build and choose to not import problematic modules. ``_ASTROPY_SETUP_`` is inserted into the builtins, and is `True` when inside of scikit-beam's ``setup.py`` script, and `False` otherwise. For example, suppose there is a subpackage ``foo`` that needs to import a module called ``version.py`` at build time in order to set some version information, and also has a C extension, ``process``, that will not be available in the source tree. In this case, ``scikit-beam/foo/__init__.py`` would probably want to check the value of ``_ASTROPY_SETUP_`` before importing the C extension:: try: from . import process except ImportError: if not _ASTROPY_SETUP_: raise from . import version
0.930443
0.485905
========================================== Building Scikit-beam and its Subpackages ========================================== .. warning:: This page still needs to be adapted from astropy The build process currently uses the `setuptools <https://bitbucket.org/pypa/setuptools>`_ package to build and install the scikit-beam core (and any affiliated packages that use the template). Customizing setup/build for subpackages ======================================= As is typical, there is a single ``setup.py`` file that is used for the whole ``scikit-beam`` package. To customize setup parameters for a given sub-package, a ``setup_package.py`` file can be defined inside a package, and if it is present, the setup process will look for the following functions to customize the build process: * ``get_package_data`` This function, if defined, should return a dictionary mapping the name of the subpackage(s) that need package data to a list of data file paths (possibly including wildcards) relative to the path of the package's source code. e.g. if the source distribution has a needed data file ``scikit-beam/wcs/tests/data/3d_cd.hdr``, this function should return ``{'scikit-beam.wcs.tests':['data/3d_cd.hdr']}``. See the ``package_data`` option of the :func:`distutils.core.setup` function. It is recommended that all such data be in a directory named ``data`` inside the package within which it is supposed to be used. This package data should be accessed via the ``scikit-beam.utils.data.get_pkg_data_filename`` and ``scikit-beam.utils.data.get_pkg_data_fileobj`` functions. * ``get_extensions`` This provides information for building C or Cython extensions. If defined, it should return a list of :class:`distutils.core.Extension` objects controlling the Cython/C build process (see below for more detail). * ``get_build_options`` This function allows a package to add extra build options. It should return a list of tuples, where each element has: - *name*: The name of the option as it would appear on the commandline or in the ``setup.cfg`` file. - *doc*: A short doc string for the option, displayed by ``setup.py build --help``. - *is_bool* (optional): When `True`, the option is a boolean option and doesn't have an associated value. * ``get_external_libraries`` This function declares that the package uses libraries that are included in the scikit-beam distribution that may also be distributed elsewhere on the users system. It should return a list of library names. For each library, a new build option is created, ``'--use-system-X'`` which allows the user to request to use the system's copy of the library.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/building.rst
building.rst
========================================== Building Scikit-beam and its Subpackages ========================================== .. warning:: This page still needs to be adapted from astropy The build process currently uses the `setuptools <https://bitbucket.org/pypa/setuptools>`_ package to build and install the scikit-beam core (and any affiliated packages that use the template). Customizing setup/build for subpackages ======================================= As is typical, there is a single ``setup.py`` file that is used for the whole ``scikit-beam`` package. To customize setup parameters for a given sub-package, a ``setup_package.py`` file can be defined inside a package, and if it is present, the setup process will look for the following functions to customize the build process: * ``get_package_data`` This function, if defined, should return a dictionary mapping the name of the subpackage(s) that need package data to a list of data file paths (possibly including wildcards) relative to the path of the package's source code. e.g. if the source distribution has a needed data file ``scikit-beam/wcs/tests/data/3d_cd.hdr``, this function should return ``{'scikit-beam.wcs.tests':['data/3d_cd.hdr']}``. See the ``package_data`` option of the :func:`distutils.core.setup` function. It is recommended that all such data be in a directory named ``data`` inside the package within which it is supposed to be used. This package data should be accessed via the ``scikit-beam.utils.data.get_pkg_data_filename`` and ``scikit-beam.utils.data.get_pkg_data_fileobj`` functions. * ``get_extensions`` This provides information for building C or Cython extensions. If defined, it should return a list of :class:`distutils.core.Extension` objects controlling the Cython/C build process (see below for more detail). * ``get_build_options`` This function allows a package to add extra build options. It should return a list of tuples, where each element has: - *name*: The name of the option as it would appear on the commandline or in the ``setup.cfg`` file. - *doc*: A short doc string for the option, displayed by ``setup.py build --help``. - *is_bool* (optional): When `True`, the option is a boolean option and doesn't have an associated value. * ``get_external_libraries`` This function declares that the package uses libraries that are included in the scikit-beam distribution that may also be distributed elsewhere on the users system. It should return a list of library names. For each library, a new build option is created, ``'--use-system-X'`` which allows the user to request to use the system's copy of the library.
0.855036
0.668432
:orphan: .. _using-virtualenv: Using virtualenv ================ `virtualenv`_ is a tool for creating and activating isolated Python environments that allow installing and experimenting with Python packages without disrupting your production Python environment. When using commands such as ``python setup.py develop``, for example, it is strongly recommended to do so within a virtualenv. This is generally preferable to installing a development version of Scikit-beam into your system site-packages and having to keep track of whether or not your environment is in a "known good" configuration for production/science use. Using a virtualenv is also a good way to try out new versions of software that you're not actively doing development work on without disrupting your normal production environment. We won't provide a full tutorial on using virtualenv here |emdash| the virtualenv documentation linked to above is a better place to start. But here is a quick overview on how to set up a virtualenv for Scikit-beam development with your default Python version: #. Install virtualenv:: $ pip install virtualenv or (on Debian/Ubuntu):: $ sudo apt-get install python-virtualenv etc. #. (Recommended) Create a root directory for all your virtualenvs under a path you have write access to. For example:: $ mkdir ~/.virtualenvs #. Create the Scikit-beam virtualenv:: $ virtualenv --distribute --system-site-packages ~/.virtualenvs/scikit-beam-dev The ``--system-site-packages`` option inherits all packages already installed in your system site-packages directory; this frees you from having to reinstall packages like Numpy and Scipy in the virtualenv. However, if you would like your virtualenv to use a development version of Numpy, for example, you can still install Numpy into the virtualenv and it will take precedence over the version installed in site-packages. #. Activate the virtualenv:: $ source ~/.virtualenvs/scikit-beam-dev/bin/activate or if you're using a csh-variant:: $ source ~/.virtualenvs/scikit-beam-dev/bin/activate.csh virtualenv works on Windows too |emdash| see the documentation for details. #. If the virtualenv successfully activated its name should appear in your shell prompt:: (scikit-beam-dev) $ The virtualenv can be disabled at any time by entering:: (scikit-beam-dev) $ deactivate #. Now as long as the virtualenv is activated, packages you install with ``pip``, or by manually running ``python setup.py install`` will automatically install into your virtualenv instead of the system site-packages. Consider installing Scikit-beam in develop mode into the virtualenv as described :ref:`activate_development_scikit-beam`. Using virtualenv with IPython ----------------------------- .. note:: As of IPython 0.13 this functionality is built into IPython and these steps are not necessary for IPython to recognize that it's running with a virtualenv enabled. Each virtualenv has its own ``bin/``, and as IPython is written in pure Python one can always install IPython directly into a virtualenv. However, if you would rather not have to install IPython every time you create a virtualenv, it also suffices to make IPython virtualenv-aware. 1. Check to see if you already have an IPython profile in ``~/.ipython/profile_default/``; if not, create one:: $ ipython profile create 2. Edit ``~/.ipython/profile_default/ipython_config.py`` and add the following to the end:: import os execfile(os.path.join(os.environ['HOME'], '.ipython', 'virtualenv.py')) 3. Finally, create the ``~/.ipython/virtualenv.py`` module:: import site from os import environ from os.path import join from sys import version_info if 'VIRTUAL_ENV' in environ: virtual_env = join(environ.get('VIRTUAL_ENV'), 'lib', 'python%d.%d' % version_info[:2], 'site-packages') site.addsitedir(virtual_env) print 'VIRTUAL_ENV ->', virtual_env del virtual_env del site, environ, join, version_info Now IPython will import all packages from your virtualenv where applicable. .. note:: This is not magic. If you switch to a virtualenv that uses a different Python version from your main IPython installation this won't help you |emdash| instead use the appropriate IPython installation for the Python version in question. virtualenvwrapper ----------------- `virtualenvwrapper`_ is a set of enhancements to virtualenv mostly implemented through simple shell scripts and aliases. It automatically organizes all your virtualenvs under a single directory (as suggested above). To create a new virtualenv you can just use the ``'mkvirtualenv <env_name>'`` command and it will automatically create a new virtualenv of that name in the default location. To activate a virtualenv with virtualenvwrapper you don't need to think about the environment's location of the filesystem or which activate script to run. Simply run ``'workon <env_name>'``. You can also list all virtualenvs with ``lsvirtualenv``. That just scratches the surface of the goodies included with virtualenvwrapper. The one caveat is that it does not support csh-like shells. For csh-like shells there exists `virtualenvwrapper-csh`_, which implements most of the virtualenvwrapper functionality and is otherwise compatible with the original. There also exists `virtualenvwrapper-win`_, which ports virtualenvwrapper to Windows batch scripts. venv ---- virtualenv is so commonly used in the Python development community that its functionality was finally added to the standard library in Python 3.3 under the name `venv`_. venv has not gained wide use yet and is not explicitly supported by tools like virtualenvwrapper, but it is expected to see wider adoption in the future. .. include:: links.inc
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/virtualenv_detail.rst
virtualenv_detail.rst
:orphan: .. _using-virtualenv: Using virtualenv ================ `virtualenv`_ is a tool for creating and activating isolated Python environments that allow installing and experimenting with Python packages without disrupting your production Python environment. When using commands such as ``python setup.py develop``, for example, it is strongly recommended to do so within a virtualenv. This is generally preferable to installing a development version of Scikit-beam into your system site-packages and having to keep track of whether or not your environment is in a "known good" configuration for production/science use. Using a virtualenv is also a good way to try out new versions of software that you're not actively doing development work on without disrupting your normal production environment. We won't provide a full tutorial on using virtualenv here |emdash| the virtualenv documentation linked to above is a better place to start. But here is a quick overview on how to set up a virtualenv for Scikit-beam development with your default Python version: #. Install virtualenv:: $ pip install virtualenv or (on Debian/Ubuntu):: $ sudo apt-get install python-virtualenv etc. #. (Recommended) Create a root directory for all your virtualenvs under a path you have write access to. For example:: $ mkdir ~/.virtualenvs #. Create the Scikit-beam virtualenv:: $ virtualenv --distribute --system-site-packages ~/.virtualenvs/scikit-beam-dev The ``--system-site-packages`` option inherits all packages already installed in your system site-packages directory; this frees you from having to reinstall packages like Numpy and Scipy in the virtualenv. However, if you would like your virtualenv to use a development version of Numpy, for example, you can still install Numpy into the virtualenv and it will take precedence over the version installed in site-packages. #. Activate the virtualenv:: $ source ~/.virtualenvs/scikit-beam-dev/bin/activate or if you're using a csh-variant:: $ source ~/.virtualenvs/scikit-beam-dev/bin/activate.csh virtualenv works on Windows too |emdash| see the documentation for details. #. If the virtualenv successfully activated its name should appear in your shell prompt:: (scikit-beam-dev) $ The virtualenv can be disabled at any time by entering:: (scikit-beam-dev) $ deactivate #. Now as long as the virtualenv is activated, packages you install with ``pip``, or by manually running ``python setup.py install`` will automatically install into your virtualenv instead of the system site-packages. Consider installing Scikit-beam in develop mode into the virtualenv as described :ref:`activate_development_scikit-beam`. Using virtualenv with IPython ----------------------------- .. note:: As of IPython 0.13 this functionality is built into IPython and these steps are not necessary for IPython to recognize that it's running with a virtualenv enabled. Each virtualenv has its own ``bin/``, and as IPython is written in pure Python one can always install IPython directly into a virtualenv. However, if you would rather not have to install IPython every time you create a virtualenv, it also suffices to make IPython virtualenv-aware. 1. Check to see if you already have an IPython profile in ``~/.ipython/profile_default/``; if not, create one:: $ ipython profile create 2. Edit ``~/.ipython/profile_default/ipython_config.py`` and add the following to the end:: import os execfile(os.path.join(os.environ['HOME'], '.ipython', 'virtualenv.py')) 3. Finally, create the ``~/.ipython/virtualenv.py`` module:: import site from os import environ from os.path import join from sys import version_info if 'VIRTUAL_ENV' in environ: virtual_env = join(environ.get('VIRTUAL_ENV'), 'lib', 'python%d.%d' % version_info[:2], 'site-packages') site.addsitedir(virtual_env) print 'VIRTUAL_ENV ->', virtual_env del virtual_env del site, environ, join, version_info Now IPython will import all packages from your virtualenv where applicable. .. note:: This is not magic. If you switch to a virtualenv that uses a different Python version from your main IPython installation this won't help you |emdash| instead use the appropriate IPython installation for the Python version in question. virtualenvwrapper ----------------- `virtualenvwrapper`_ is a set of enhancements to virtualenv mostly implemented through simple shell scripts and aliases. It automatically organizes all your virtualenvs under a single directory (as suggested above). To create a new virtualenv you can just use the ``'mkvirtualenv <env_name>'`` command and it will automatically create a new virtualenv of that name in the default location. To activate a virtualenv with virtualenvwrapper you don't need to think about the environment's location of the filesystem or which activate script to run. Simply run ``'workon <env_name>'``. You can also list all virtualenvs with ``lsvirtualenv``. That just scratches the surface of the goodies included with virtualenvwrapper. The one caveat is that it does not support csh-like shells. For csh-like shells there exists `virtualenvwrapper-csh`_, which implements most of the virtualenvwrapper functionality and is otherwise compatible with the original. There also exists `virtualenvwrapper-win`_, which ports virtualenvwrapper to Windows batch scripts. venv ---- virtualenv is so commonly used in the Python development community that its functionality was finally added to the standard library in Python 3.3 under the name `venv`_. venv has not gained wide use yet and is not explicitly supported by tools like virtualenvwrapper, but it is expected to see wider adoption in the future. .. include:: links.inc
0.739893
0.466238
:orphan: .. include:: links.inc .. _scikit-beam-fix-example: ==================================================== Contributing code to Scikit-beam, a worked example ==================================================== .. warning:: This still needs to be adapted for skbeam This example is based on fixing `Issue 1761`_ from the list of `Scikit-beam issues on GitHub <https://github.com/scikit-beam/scikit-beam/issues>`_. It resulted in `pull request 1917`_. The issue title was "len() does not work for coordinates" with description "It would be nice to be able to use ``len`` on coordinate arrays to know how many coordinates are present." This particular example was chosen because it was tagged as easy in GitHub; seemed like the best place to start out! Short on time? Don't want to read a long tutorial? ================================================== There is a minimalist, command-only version of this at :ref:`command_history`. You should have `pull request 1917`_ open as you read the commands so you can see the edits made to the code. There is also a very exciting terminal-cast at :ref:`terminal_cast`. Before you begin ================ Make sure you have a local copy of scikit-beam set up as described in :ref:`get_devel`. In a nutshell, the output of ``git remote -v``, run in the directory where your local of Scikit-beam resides, should be something like this:: scikit-beam git://github.com/scikit-beam/scikit-beam.git (fetch) scikit-beam git://github.com/scikit-beam/scikit-beam.git (push) your-user-name [email protected]:your-user-name/scikit-beam.git (fetch) your-user-name [email protected]:your-user-name/scikit-beam.git (push) The precise form of the URLs for ``your-user-name`` depends on the authentication method you set up with GitHub. The important point is that ``scikit-beam`` should point to the official Scikit-beam repo and ``your-user-name`` should point to *your* copy of Scikit-beam on GitHub. Grab the latest updates to scikit-beam ====================================== A few steps in this tutorial take only a single command. They are broken out separately to outline the process in words as well as code. Inform your local copy of Scikit-beam about the latest changes in the development version with:: git fetch scikit-beam Set up an isolated workspace ============================ + Make a new `git`_ branch for fixing this issue, check it out, and let my GitHub account know about this branch:: git branch fix-1761 scikit-beam/master # branch based on latest from GitHub git checkout fix-1761 # switch to this branch git push --set-upstream origin fix-1761 # tell my github acct about it + Make a python environment just for this fix and switch to that environment. The example below shows the necessary steps in the Anaconda python distribution:: conda create -n apy-1761 --clone root # Anaconda distribution only source activate apy-1761 If you are using a different distribution, see :ref:`virtual_envs` for instructions for creating and activating a new environment. + Install our branch in this environment (will not work in python 3) with:: python setup.py develop Do you really have to set up a separate python environment for each fix? No, but you definitely want to have a python environment for your work on code contributions. Making new environments is fast, doesn't take much space and provide a way to keep your work organized. Tests first, please =================== It would be hard to overstate the importance of testing to Scikit-beam. Tests are what gives you confidence that new code does what it should and that it doesn't break old code. You should at least run the relevant tests before you make any changes to make sure that your python environment is set up properly. The first challenge is figuring out where to look for relevant tests. `Issue 1761`_ is a problem in the :obj:`~scikit-beam.coordinates` package, so the tests for it are in ``scikit-beam/coordinates/tests``. The rest of Scikit-beam has a similar layout, described at :ref:`testing-guidelines`. Change to that directory and run the current tests with:: cd scikit-beam/coordinates/test py.test The tests all pass, so I need to write a new test to expose this bug. There are several files with tests in them, though:: $ ls test_angles.py test_angular_separation.py test_api.py test_arrays.py test_distance.py test_formatting.py test_matching.py test_name_resolve.py test_transformations.py `Issue 1761`_ affects arrays of coordinates, so it seems sensible to put the new test in ``test_arrays.py``. As with all of the steps, if you are not sure, ask on the scikit-beam-dev mailing list. The goal at this point may be a little counter-intuitive: write a test that we know will fail with the current code. This test allows Scikit-beam to check, in an automated way, whether our fix actually works and to make sure future changes to code do not break our fix. Looking over the existing code in ``test_arrays.py``, each test is a function whose name starts with ``test_``; the last test in the file is ``test_array_indexing`` so an appropriate place to add the test is right after that. Give the test a reasonably clear name; I chose: ``test_array_len``. The easiest way to figure out what you need to import and how to set up the test is to look at other tests. The full test is in the traceback below and in `pull request 1917`_ Write the test, then see if it works as expected--remember, in this case we expect to *fail*. Running ``py.test test_arrays.py`` gives the expected result; an excerpt from the output is:: ================= FAILURES ============================= ______________ test_array_len __________________________ def test_array_len(): from .. import ICRS input_length = 5 ra = np.linspace(0, 360, input_length) dec = np.linspace(0, 90, input_length) c = ICRS(ra, dec, unit=(u.degree, u.degree)) > assert len(c) == input_length E TypeError: object of type 'ICRS' has no len() test_arrays.py:291: TypeError Success! Add this test to your local `git`_ repo ======================================= Keep `git`_ commits small and focused on one logical piece at a time. The test we just wrote is one logical change, so we will commit it. You could, if you prefer, wait and commit this test along with your fix. For this tutorial I'll commit the test separately. If you aren't sure what to do, ask on scikit-beam-dev mailing list. Check what was changed ---------------------- We can see what has changed with ``git status``:: $ git status On branch fix-1761 Your branch is up-to-date with 'origin/fix-1761'. Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git checkout -- <file>..." to discard changes in working directory) modified: test_arrays.py no changes added to commit (use "git add" and/or "git commit -a") There are two bits of information here: + one file changed, ``test_arrays.py`` + We have not added our changes to git yet, so it is listed under ``Changes not staged for commit``. For more extensive changes it can be useful to use ``git diff`` to see what changes have been made:: $ git diff diff --git a/scikit-beam/coordinates/tests/test_arrays.py b/scikit-beam/coordinates/test index 2785b59..7eecfbb 100644 --- a/scikit-beam/coordinates/tests/test_arrays.py +++ b/scikit-beam/coordinates/tests/test_arrays.py @@ -278,3 +278,14 @@ def test_array_indexing(): assert c2.equinox == c1.equinox assert c3.equinox == c1.equinox assert c4.equinox == c1.equinox + +def test_array_len(): + from .. import ICRS + + input_length = 5 + ra = np.linspace(0, 360, input_length) + dec = np.linspace(0, 90, input_length) + + c = ICRS(ra, dec, unit=(u.degree, u.degree)) + + assert len(c) == input_length A graphical interface to git makes keeping track of these sorts of changes even easier; see :ref:`git_gui_options` if you are interested. Stage the change ---------------- `git`_ requires you to add changes in two steps: + stage the change with ``git add test_arrays.py``; this adds the file to the list of items that will be added to the repo when you are ready to commit. + commit the change with ``git commit``; this actually adds the changes to your repo. These can be combined into one step; the advantage of doing it in two steps is that it is easier to undo staging than committing. As we will see later, ``git status`` even tells you how to do it. Staging can be very handy if you are making changes in a couple of different places that you want to commit at the same time. Make your first changes, stage it, then make your second change and stage that. Once everything is staged, commit the changes as one commit. In this case, first stage the change:: git add test_arrays.py You get no notice at the command line that anything has changed, but ``git status`` will let you know:: $ git status On branch fix-1761 Your branch is up-to-date with 'origin/fix-1761'. Changes to be committed: (use "git reset HEAD <file>..." to unstage) modified: test_arrays.py Note that `git`_ helpfully includes the command necessary to unstage the change if you want to. Commit your change ------------------ I prefer to make commits frequently, so I'll commit the test without the fix:: $ git commit -m'Add test for array coordinate length (issue #1761)' [fix-1761 dd4ef8c] Add test for array coordinate length (issue #1761) 1 file changed, 11 insertions(+) Commit messages should be short and descriptive. Including the GitHub issue number allows GitHub to automatically create links to the relevant issue. Use ``git status`` to get a recap of where we are so far:: $ git status On branch fix-1761 Your branch is ahead of 'origin/fix-1761' by 1 commit. (use "git push" to publish your local commits) nothing to commit, working directory clean In other words, we have made a change to our local copy of scikit-beam but we have not pushed (transferred) that change to our GitHub account. Fix the issue ============= Write the code -------------- Now that we have a test written, we'll fix the issue. A full discussion of the fix is beyond the scope of this tutorial, but the fix is to add a ``__len__`` method to ``scikit-beam.coordinates.SphericalCoordinatesBase`` in ``coordsystems.py``. All of the spherical coordinate systems inherit from this base class and it is this base class that implements the ``__getitem__`` method that allows indexing of coordinate arrays. See `pull request 1917`_ to view the changes to the code. .. _test_changes: Test your change ---------------- There are a few levels at which you want to test: + Does this code change make the test we wrote succeed now? Check by running ``py.test tests/test_arrays.py`` in the ``coordinates`` directory. In this case, yes! + Do the rest of the coordinate tests still pass? Check by running ``py.test`` in the ``coordinates`` directory. In this case, yes--we have not broken anything! + Do all of the scikit-beam tests still succeed? Check by moving to the top level directory (the one that contains ``setup.py``) and run ``python setup.py test``. This may take several minutes depending on the speed of your system. Success again! .. note:: Tests that are skipped or xfailed are fine. A fail or an error is not fine. If you get stuck, ask on scikit-beam-dev mailing list for help! Stage and commit your change ---------------------------- Add the file to your git repo in two steps: stage, then commit. To make this a little different than the commit we did above, make sure you are still in the top level directory and check the ``git status``:: $ git status On branch fix-1761 Your branch is ahead of 'origin/fix-1761' by 1 commit. (use "git push" to publish your local commits) Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git checkout -- <file>..." to discard changes in working directory) modified: scikit-beam/coordinates/coordsystems.py no changes added to commit (use "git add" and/or "git commit -a") Note that git knows what has changed no matter what directory you are in (as long as you are in one of the directories in the repo, that is). Stage the change with:: git add scikit-beam/coordinates/coordsystems.py For this commit it is helpful to use a multi-line commit message that will automatically close the issue on GitHub when this change is accepted. The snippet below accomplishes that in bash (and similar shells):: $ git commit -m" > Add len() to coordinates > > Closes #1761" [fix-1761 f196771] Add len() to coordinates 1 file changed, 4 insertions(+) If this was not a tutorial I would write the commit message in a git gui or run ``git commit`` without a message and git would put me in an editor. However you do it, the message after committing should look like this:: Add len() to coordinates Closes #1761 You can check the commit messages by running ``git log``. If the commit message doesn't look right, ask about fixing it at scikit-beam-dev mailing list. Push your changes to your GitHub fork of scikit-beam ==================================================== This one is easy: ``git push`` This copies the changes made on your computer to your copy of Scikit-beam on GitHub. At this point none of the Scikit-beam maintainers know anything about your change. We'll take care of that in a moment with a "pull request", but first... Stop and think: any more tests or other changes? ================================================ It never hurts to pause at this point and review whether your proposed changes are complete. In this case I realized there were some tests I could have included but didn't: + What happens when ``len()`` is called on a coordinate that is *not* an array? + Does ``len()`` work when the coordinate is an array with one entry? Both of these are mentioned in the pull request so it doesn't hurt to check them. In this case they also provide an opportunity to illustrate a feature of the `py.test`_ framework. I'll move back to the directory containing the tests with ``cd scikit-beam/coordinates/tests`` to make it a bit easier to run just the test I want. The second case is easier, so I'll handle that one first following the cycle we used above: + Make the change in ``test_arrays.py`` + Test the change The test passed; rather than committing this one change I'll also implement the check for the scalar case. One could imagine two different desirable outcomes here: + ``len(scalar_coordinate)`` behaves just like ``len(scalar_angle)``, raising a `TypeError` for a scalar coordinate. + ``len(scalar_coordinate)`` returns 1 since there is one coordinate. If you encounter a case like this and are not sure what to do, ask. The best place to ask is in GitHub on the page for the issue you are fixing. Alternatively, make a choice and be clear in your pull request on GitHub what you chose and why; instructions for that are below. Testing for an expected error ----------------------------- In this case I opted for raising a `TypeError`, because the user needs to know that the coordinate they created is not going to behave like an array of one coordinate if they try to index it later on. It also provides an opportunity to demonstrate a test when the desired result is an error. The `py.test`_ framework makes testing for an exception relatively easy; you put the code you expect to fail in a ``with`` block:: with pytest.raises(TypeError): c = ICRS(0, 0, unit=(u.degree, u.degree)) len(c) I added this to ``test_array_len`` in ``test_arrays.py`` and re-ran the test to make sure it works as desired. Aside: Python lesson--let others do your work --------------------------------------------- The actual fix to this issue was very, very short. In ``coordsystems.py`` two lines were added:: def __len__(self): return len(self.lonangle) ``lonangle`` contains the ``Angle``s that represent longitude (sometimes this is an RA, sometimes a longitude). By simply calling ``len()`` on one of the angles in the array you get, for free, whatever behavior has been defined in the ``Angle`` class for handling the case of a scalar. Adding an explicit check for the case of a scalar here would have the very big downside of having two things that need to be kept in sync: handling of scalars in ``Angle`` and in coordinates. Commit any additional changes ============================= Follow the cycle you saw above: + Check that **all** Scikit-beam tests still pass; see :ref:`test_changes` + ``git status`` to see what needs to be staged and committed + ``git add`` to stage the changes + ``git commit`` to commit the changes The `git`_ commands, without their output, are:: git status git add scikit-beam/coordinates/tests/test_arrays.py git commit -m"Add tests of len() for scalar coordinate and length 1 coordinate" Edit the changelog ================== Keeping the list of changes up to date is nearly impossible unless each contributor makes the appropriate updates as they propose changes. Changes are in the file ``CHANGES.rst`` in the top-level directory (the directory where ``setup.py`` is). Put the change under the list that matches the milestone (aka release) that is set for the issue in GitHub. If you are proposing a new feature in a pull request you may need to wait on this change until the pull request is discussed. This issue was tagged for 0.3.1, as shown in the image below, so the changelog entry went there. .. image:: milestone.png The entry in ``CHANGES.rst`` should summarize was you did and include the issue number. For writing changelog entries you don't need to know much about the markup language being used (though you can read as much as you want about it at the `Sphinx primer`_); look at other entries and imitate. For this issue the entry was the line that starts ``- Implemented``:: - ``scikit-beam.coordinates`` - Implemented `len()` for coordinate objects. [#1761] Starting the line with a ``-`` makes a bulleted list item, indenting it makes it a sublist of ``scikit-beam.coordinates`` and putting ``len()`` in single backticks makes that text render in a typewriter font. Commit your changes to the CHANGES.rst -------------------------------------- You can use ``git status`` as above or jump right to staging and committing:: git add CHANGES.rst git commit -m"Add changelog entry for 1761" Push your changes to GitHub =========================== One last push to GitHub with these changes before asking for the changes to be reviewed:: git push Ask for your changes to be merged with a pull request ===================================================== This stage requires to go to your GitHub account and navigate to *your* copy of scikit-beam; the url will be something like ``https://github.com/your-user-name/scikit-beam``. Once there, select the branch that contains your fix from the branches dropdown: .. image:: worked_example_switch_branch.png After selecting the correct branch click on the "Pull Request" button, like that in the image below: .. image:: pull_button.png Name your pull request something sensible. Include the issue number with a leading ``#`` in the description of the pull request so that a link is created to the original issue. Please see `pull request 1917`_ for the pull request from this example. Revise and push as necessary ============================ You may be asked to make changes in the discussion of the pull request. Make those changes in your local copy, commit them to your local repo and push them to GitHub. GitHub will automatically update your pull request. .. _Issue 1761: https://github.com/scikit-beam/scikit-beam/issues/1917 .. _pull request 1917: https://github.com/scikit-beam/scikit-beam/issues/1917 .. _Sphinx primer: http://sphinx-doc.org/rest.html .. _test commit: https://github.com/mwcraig/scikit-beam/commit/cf7d5ac15d7c63ae28dac638c6484339bac5f8de
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/git_edit_workflow_examples.rst
git_edit_workflow_examples.rst
:orphan: .. include:: links.inc .. _scikit-beam-fix-example: ==================================================== Contributing code to Scikit-beam, a worked example ==================================================== .. warning:: This still needs to be adapted for skbeam This example is based on fixing `Issue 1761`_ from the list of `Scikit-beam issues on GitHub <https://github.com/scikit-beam/scikit-beam/issues>`_. It resulted in `pull request 1917`_. The issue title was "len() does not work for coordinates" with description "It would be nice to be able to use ``len`` on coordinate arrays to know how many coordinates are present." This particular example was chosen because it was tagged as easy in GitHub; seemed like the best place to start out! Short on time? Don't want to read a long tutorial? ================================================== There is a minimalist, command-only version of this at :ref:`command_history`. You should have `pull request 1917`_ open as you read the commands so you can see the edits made to the code. There is also a very exciting terminal-cast at :ref:`terminal_cast`. Before you begin ================ Make sure you have a local copy of scikit-beam set up as described in :ref:`get_devel`. In a nutshell, the output of ``git remote -v``, run in the directory where your local of Scikit-beam resides, should be something like this:: scikit-beam git://github.com/scikit-beam/scikit-beam.git (fetch) scikit-beam git://github.com/scikit-beam/scikit-beam.git (push) your-user-name [email protected]:your-user-name/scikit-beam.git (fetch) your-user-name [email protected]:your-user-name/scikit-beam.git (push) The precise form of the URLs for ``your-user-name`` depends on the authentication method you set up with GitHub. The important point is that ``scikit-beam`` should point to the official Scikit-beam repo and ``your-user-name`` should point to *your* copy of Scikit-beam on GitHub. Grab the latest updates to scikit-beam ====================================== A few steps in this tutorial take only a single command. They are broken out separately to outline the process in words as well as code. Inform your local copy of Scikit-beam about the latest changes in the development version with:: git fetch scikit-beam Set up an isolated workspace ============================ + Make a new `git`_ branch for fixing this issue, check it out, and let my GitHub account know about this branch:: git branch fix-1761 scikit-beam/master # branch based on latest from GitHub git checkout fix-1761 # switch to this branch git push --set-upstream origin fix-1761 # tell my github acct about it + Make a python environment just for this fix and switch to that environment. The example below shows the necessary steps in the Anaconda python distribution:: conda create -n apy-1761 --clone root # Anaconda distribution only source activate apy-1761 If you are using a different distribution, see :ref:`virtual_envs` for instructions for creating and activating a new environment. + Install our branch in this environment (will not work in python 3) with:: python setup.py develop Do you really have to set up a separate python environment for each fix? No, but you definitely want to have a python environment for your work on code contributions. Making new environments is fast, doesn't take much space and provide a way to keep your work organized. Tests first, please =================== It would be hard to overstate the importance of testing to Scikit-beam. Tests are what gives you confidence that new code does what it should and that it doesn't break old code. You should at least run the relevant tests before you make any changes to make sure that your python environment is set up properly. The first challenge is figuring out where to look for relevant tests. `Issue 1761`_ is a problem in the :obj:`~scikit-beam.coordinates` package, so the tests for it are in ``scikit-beam/coordinates/tests``. The rest of Scikit-beam has a similar layout, described at :ref:`testing-guidelines`. Change to that directory and run the current tests with:: cd scikit-beam/coordinates/test py.test The tests all pass, so I need to write a new test to expose this bug. There are several files with tests in them, though:: $ ls test_angles.py test_angular_separation.py test_api.py test_arrays.py test_distance.py test_formatting.py test_matching.py test_name_resolve.py test_transformations.py `Issue 1761`_ affects arrays of coordinates, so it seems sensible to put the new test in ``test_arrays.py``. As with all of the steps, if you are not sure, ask on the scikit-beam-dev mailing list. The goal at this point may be a little counter-intuitive: write a test that we know will fail with the current code. This test allows Scikit-beam to check, in an automated way, whether our fix actually works and to make sure future changes to code do not break our fix. Looking over the existing code in ``test_arrays.py``, each test is a function whose name starts with ``test_``; the last test in the file is ``test_array_indexing`` so an appropriate place to add the test is right after that. Give the test a reasonably clear name; I chose: ``test_array_len``. The easiest way to figure out what you need to import and how to set up the test is to look at other tests. The full test is in the traceback below and in `pull request 1917`_ Write the test, then see if it works as expected--remember, in this case we expect to *fail*. Running ``py.test test_arrays.py`` gives the expected result; an excerpt from the output is:: ================= FAILURES ============================= ______________ test_array_len __________________________ def test_array_len(): from .. import ICRS input_length = 5 ra = np.linspace(0, 360, input_length) dec = np.linspace(0, 90, input_length) c = ICRS(ra, dec, unit=(u.degree, u.degree)) > assert len(c) == input_length E TypeError: object of type 'ICRS' has no len() test_arrays.py:291: TypeError Success! Add this test to your local `git`_ repo ======================================= Keep `git`_ commits small and focused on one logical piece at a time. The test we just wrote is one logical change, so we will commit it. You could, if you prefer, wait and commit this test along with your fix. For this tutorial I'll commit the test separately. If you aren't sure what to do, ask on scikit-beam-dev mailing list. Check what was changed ---------------------- We can see what has changed with ``git status``:: $ git status On branch fix-1761 Your branch is up-to-date with 'origin/fix-1761'. Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git checkout -- <file>..." to discard changes in working directory) modified: test_arrays.py no changes added to commit (use "git add" and/or "git commit -a") There are two bits of information here: + one file changed, ``test_arrays.py`` + We have not added our changes to git yet, so it is listed under ``Changes not staged for commit``. For more extensive changes it can be useful to use ``git diff`` to see what changes have been made:: $ git diff diff --git a/scikit-beam/coordinates/tests/test_arrays.py b/scikit-beam/coordinates/test index 2785b59..7eecfbb 100644 --- a/scikit-beam/coordinates/tests/test_arrays.py +++ b/scikit-beam/coordinates/tests/test_arrays.py @@ -278,3 +278,14 @@ def test_array_indexing(): assert c2.equinox == c1.equinox assert c3.equinox == c1.equinox assert c4.equinox == c1.equinox + +def test_array_len(): + from .. import ICRS + + input_length = 5 + ra = np.linspace(0, 360, input_length) + dec = np.linspace(0, 90, input_length) + + c = ICRS(ra, dec, unit=(u.degree, u.degree)) + + assert len(c) == input_length A graphical interface to git makes keeping track of these sorts of changes even easier; see :ref:`git_gui_options` if you are interested. Stage the change ---------------- `git`_ requires you to add changes in two steps: + stage the change with ``git add test_arrays.py``; this adds the file to the list of items that will be added to the repo when you are ready to commit. + commit the change with ``git commit``; this actually adds the changes to your repo. These can be combined into one step; the advantage of doing it in two steps is that it is easier to undo staging than committing. As we will see later, ``git status`` even tells you how to do it. Staging can be very handy if you are making changes in a couple of different places that you want to commit at the same time. Make your first changes, stage it, then make your second change and stage that. Once everything is staged, commit the changes as one commit. In this case, first stage the change:: git add test_arrays.py You get no notice at the command line that anything has changed, but ``git status`` will let you know:: $ git status On branch fix-1761 Your branch is up-to-date with 'origin/fix-1761'. Changes to be committed: (use "git reset HEAD <file>..." to unstage) modified: test_arrays.py Note that `git`_ helpfully includes the command necessary to unstage the change if you want to. Commit your change ------------------ I prefer to make commits frequently, so I'll commit the test without the fix:: $ git commit -m'Add test for array coordinate length (issue #1761)' [fix-1761 dd4ef8c] Add test for array coordinate length (issue #1761) 1 file changed, 11 insertions(+) Commit messages should be short and descriptive. Including the GitHub issue number allows GitHub to automatically create links to the relevant issue. Use ``git status`` to get a recap of where we are so far:: $ git status On branch fix-1761 Your branch is ahead of 'origin/fix-1761' by 1 commit. (use "git push" to publish your local commits) nothing to commit, working directory clean In other words, we have made a change to our local copy of scikit-beam but we have not pushed (transferred) that change to our GitHub account. Fix the issue ============= Write the code -------------- Now that we have a test written, we'll fix the issue. A full discussion of the fix is beyond the scope of this tutorial, but the fix is to add a ``__len__`` method to ``scikit-beam.coordinates.SphericalCoordinatesBase`` in ``coordsystems.py``. All of the spherical coordinate systems inherit from this base class and it is this base class that implements the ``__getitem__`` method that allows indexing of coordinate arrays. See `pull request 1917`_ to view the changes to the code. .. _test_changes: Test your change ---------------- There are a few levels at which you want to test: + Does this code change make the test we wrote succeed now? Check by running ``py.test tests/test_arrays.py`` in the ``coordinates`` directory. In this case, yes! + Do the rest of the coordinate tests still pass? Check by running ``py.test`` in the ``coordinates`` directory. In this case, yes--we have not broken anything! + Do all of the scikit-beam tests still succeed? Check by moving to the top level directory (the one that contains ``setup.py``) and run ``python setup.py test``. This may take several minutes depending on the speed of your system. Success again! .. note:: Tests that are skipped or xfailed are fine. A fail or an error is not fine. If you get stuck, ask on scikit-beam-dev mailing list for help! Stage and commit your change ---------------------------- Add the file to your git repo in two steps: stage, then commit. To make this a little different than the commit we did above, make sure you are still in the top level directory and check the ``git status``:: $ git status On branch fix-1761 Your branch is ahead of 'origin/fix-1761' by 1 commit. (use "git push" to publish your local commits) Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git checkout -- <file>..." to discard changes in working directory) modified: scikit-beam/coordinates/coordsystems.py no changes added to commit (use "git add" and/or "git commit -a") Note that git knows what has changed no matter what directory you are in (as long as you are in one of the directories in the repo, that is). Stage the change with:: git add scikit-beam/coordinates/coordsystems.py For this commit it is helpful to use a multi-line commit message that will automatically close the issue on GitHub when this change is accepted. The snippet below accomplishes that in bash (and similar shells):: $ git commit -m" > Add len() to coordinates > > Closes #1761" [fix-1761 f196771] Add len() to coordinates 1 file changed, 4 insertions(+) If this was not a tutorial I would write the commit message in a git gui or run ``git commit`` without a message and git would put me in an editor. However you do it, the message after committing should look like this:: Add len() to coordinates Closes #1761 You can check the commit messages by running ``git log``. If the commit message doesn't look right, ask about fixing it at scikit-beam-dev mailing list. Push your changes to your GitHub fork of scikit-beam ==================================================== This one is easy: ``git push`` This copies the changes made on your computer to your copy of Scikit-beam on GitHub. At this point none of the Scikit-beam maintainers know anything about your change. We'll take care of that in a moment with a "pull request", but first... Stop and think: any more tests or other changes? ================================================ It never hurts to pause at this point and review whether your proposed changes are complete. In this case I realized there were some tests I could have included but didn't: + What happens when ``len()`` is called on a coordinate that is *not* an array? + Does ``len()`` work when the coordinate is an array with one entry? Both of these are mentioned in the pull request so it doesn't hurt to check them. In this case they also provide an opportunity to illustrate a feature of the `py.test`_ framework. I'll move back to the directory containing the tests with ``cd scikit-beam/coordinates/tests`` to make it a bit easier to run just the test I want. The second case is easier, so I'll handle that one first following the cycle we used above: + Make the change in ``test_arrays.py`` + Test the change The test passed; rather than committing this one change I'll also implement the check for the scalar case. One could imagine two different desirable outcomes here: + ``len(scalar_coordinate)`` behaves just like ``len(scalar_angle)``, raising a `TypeError` for a scalar coordinate. + ``len(scalar_coordinate)`` returns 1 since there is one coordinate. If you encounter a case like this and are not sure what to do, ask. The best place to ask is in GitHub on the page for the issue you are fixing. Alternatively, make a choice and be clear in your pull request on GitHub what you chose and why; instructions for that are below. Testing for an expected error ----------------------------- In this case I opted for raising a `TypeError`, because the user needs to know that the coordinate they created is not going to behave like an array of one coordinate if they try to index it later on. It also provides an opportunity to demonstrate a test when the desired result is an error. The `py.test`_ framework makes testing for an exception relatively easy; you put the code you expect to fail in a ``with`` block:: with pytest.raises(TypeError): c = ICRS(0, 0, unit=(u.degree, u.degree)) len(c) I added this to ``test_array_len`` in ``test_arrays.py`` and re-ran the test to make sure it works as desired. Aside: Python lesson--let others do your work --------------------------------------------- The actual fix to this issue was very, very short. In ``coordsystems.py`` two lines were added:: def __len__(self): return len(self.lonangle) ``lonangle`` contains the ``Angle``s that represent longitude (sometimes this is an RA, sometimes a longitude). By simply calling ``len()`` on one of the angles in the array you get, for free, whatever behavior has been defined in the ``Angle`` class for handling the case of a scalar. Adding an explicit check for the case of a scalar here would have the very big downside of having two things that need to be kept in sync: handling of scalars in ``Angle`` and in coordinates. Commit any additional changes ============================= Follow the cycle you saw above: + Check that **all** Scikit-beam tests still pass; see :ref:`test_changes` + ``git status`` to see what needs to be staged and committed + ``git add`` to stage the changes + ``git commit`` to commit the changes The `git`_ commands, without their output, are:: git status git add scikit-beam/coordinates/tests/test_arrays.py git commit -m"Add tests of len() for scalar coordinate and length 1 coordinate" Edit the changelog ================== Keeping the list of changes up to date is nearly impossible unless each contributor makes the appropriate updates as they propose changes. Changes are in the file ``CHANGES.rst`` in the top-level directory (the directory where ``setup.py`` is). Put the change under the list that matches the milestone (aka release) that is set for the issue in GitHub. If you are proposing a new feature in a pull request you may need to wait on this change until the pull request is discussed. This issue was tagged for 0.3.1, as shown in the image below, so the changelog entry went there. .. image:: milestone.png The entry in ``CHANGES.rst`` should summarize was you did and include the issue number. For writing changelog entries you don't need to know much about the markup language being used (though you can read as much as you want about it at the `Sphinx primer`_); look at other entries and imitate. For this issue the entry was the line that starts ``- Implemented``:: - ``scikit-beam.coordinates`` - Implemented `len()` for coordinate objects. [#1761] Starting the line with a ``-`` makes a bulleted list item, indenting it makes it a sublist of ``scikit-beam.coordinates`` and putting ``len()`` in single backticks makes that text render in a typewriter font. Commit your changes to the CHANGES.rst -------------------------------------- You can use ``git status`` as above or jump right to staging and committing:: git add CHANGES.rst git commit -m"Add changelog entry for 1761" Push your changes to GitHub =========================== One last push to GitHub with these changes before asking for the changes to be reviewed:: git push Ask for your changes to be merged with a pull request ===================================================== This stage requires to go to your GitHub account and navigate to *your* copy of scikit-beam; the url will be something like ``https://github.com/your-user-name/scikit-beam``. Once there, select the branch that contains your fix from the branches dropdown: .. image:: worked_example_switch_branch.png After selecting the correct branch click on the "Pull Request" button, like that in the image below: .. image:: pull_button.png Name your pull request something sensible. Include the issue number with a leading ``#`` in the description of the pull request so that a link is created to the original issue. Please see `pull request 1917`_ for the pull request from this example. Revise and push as necessary ============================ You may be asked to make changes in the discussion of the pull request. Make those changes in your local copy, commit them to your local repo and push them to GitHub. GitHub will automatically update your pull request. .. _Issue 1761: https://github.com/scikit-beam/scikit-beam/issues/1917 .. _pull request 1917: https://github.com/scikit-beam/scikit-beam/issues/1917 .. _Sphinx primer: http://sphinx-doc.org/rest.html .. _test commit: https://github.com/mwcraig/scikit-beam/commit/cf7d5ac15d7c63ae28dac638c6484339bac5f8de
0.82485
0.68166
:orphan: .. _basic-workflow: ================ Creating patches ================ Overview -------- If you haven't already configured git:: git config --global user.name "Your Name" git config --global user.email [email protected] Then, the workflow is the following:: # Get the repository if you don't have it git clone git://github.com/scikit-beam/scikit-beam.git # Make a branch for your patching cd scikit-beam git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # Commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack # Commit work git commit -am 'BF - added fix for Funny bug' # Make the patch files git format-patch -M -C master Then, send the generated patch files to the scikit-beam-dev mailing list |emdash| where we will thank you warmly. In detail --------- #. Tell git who you are so it can label the commits you've made:: git config --global user.name "Your Name" git config --global user.email [email protected] This is only necessary if you haven't already done this, and you haven't checked to :ref:`check_git_install`. #. If you don't already have one, clone a copy of the Scikit-beam_ repository:: git clone git://github.com/scikit-beam/scikit-beam.git cd scikit-beam #. Make a 'feature branch'. This will be where you work on your bug fix. It's nice and safe and leaves you with access to an unmodified copy of the code in the main branch:: git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of #. Do some edits, and commit them as you go:: # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # Commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack # Commit work git commit -am 'BF - added fix for Funny bug' Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_. #. When you have finished, check you have committed all your changes:: git status #. Finally, make your commits into patches. You want all the commits since you branched from the ``master`` branch:: git format-patch -M -C master You will now have several files named for the commits:: 0001-BF-added-tests-for-Funny-bug.patch 0002-BF-added-fix-for-Funny-bug.patch Send these files to the scikit-beam-dev mailing list. When you are done, to switch back to the main copy of the code, just return to the ``master`` branch:: git checkout master .. include:: links.inc
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/patches.rst
patches.rst
:orphan: .. _basic-workflow: ================ Creating patches ================ Overview -------- If you haven't already configured git:: git config --global user.name "Your Name" git config --global user.email [email protected] Then, the workflow is the following:: # Get the repository if you don't have it git clone git://github.com/scikit-beam/scikit-beam.git # Make a branch for your patching cd scikit-beam git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # Commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack # Commit work git commit -am 'BF - added fix for Funny bug' # Make the patch files git format-patch -M -C master Then, send the generated patch files to the scikit-beam-dev mailing list |emdash| where we will thank you warmly. In detail --------- #. Tell git who you are so it can label the commits you've made:: git config --global user.name "Your Name" git config --global user.email [email protected] This is only necessary if you haven't already done this, and you haven't checked to :ref:`check_git_install`. #. If you don't already have one, clone a copy of the Scikit-beam_ repository:: git clone git://github.com/scikit-beam/scikit-beam.git cd scikit-beam #. Make a 'feature branch'. This will be where you work on your bug fix. It's nice and safe and leaves you with access to an unmodified copy of the code in the main branch:: git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of #. Do some edits, and commit them as you go:: # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # Commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack # Commit work git commit -am 'BF - added fix for Funny bug' Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_. #. When you have finished, check you have committed all your changes:: git status #. Finally, make your commits into patches. You want all the commits since you branched from the ``master`` branch:: git format-patch -M -C master You will now have several files named for the commits:: 0001-BF-added-tests-for-Funny-bug.patch 0002-BF-added-fix-for-Funny-bug.patch Send these files to the scikit-beam-dev mailing list. When you are done, to switch back to the main copy of the code, just return to the ``master`` branch:: git checkout master .. include:: links.inc
0.73077
0.188231
:orphan: .. include:: links.inc .. _virtual_envs: =========================== Python virtual environments =========================== If you plan to do regular work on scikit-beam you should do your development in a python virtual environment. Conceptually a virtual environment is a duplicate of the python environment you normally work in with as many (or as few) of the packages from your normal environment included in that virtual environment. It is sandboxed from your normal python environment in the sense that packages installed in the virtual environment do not affect your normal environment in any way. .. note:: "Normal python environment" means whatever python you are using when you log in. There are two options for using virtual environments; the choice of method is dictated by the python distribution you use: * If you use the anaconda python distribution you must use `conda`_ to make and manage your virtual environments. * If you use any other distribution you use `virtualenvwrapper`_; you *can not* use `conda`_. As the name suggests, `virtualenvwrapper`_ is a wrapper around `virtualenv`_. In both cases you will go through the same basic steps; the commands to accomplish each step are given for both `conda`_ and `virtualenvwrapper`_: * :ref:`setup_for_env` * :ref:`list_env` * :ref:`create_env` * :ref:`activate_env` * :ref:`deactivate_env` * :ref:`delete_env` .. note:: + You **cannot** use `virtualenvwrapper`_ or `virtualenv`_ within anaconda. + `virtualenvwrapper`_ works with bash and bash-like shells; see :ref:`using-virtualenv` for alternatives. .. _setup_for_env: Set up for virtual environments ------------------------------- * `conda`_: No setup is necessary beyond installing the anaconda python distribution. * `virtualenvwrapper`_: + First, install `virtualenvwrapper`_, which will also install `virtualenv`_, with ``pip install virtualenvwrapper``. + From the `documentation for virtualenvwrapper`_, you also need to:: export WORKON_HOME=$HOME/.virtualenvs export PROJECT_HOME=$HOME/ source /usr/local/bin/virtualenvwrapper.sh .. _list_env: List virtual environments ------------------------- You do not need to list the virtual environments you have created before using them...but sooner or later you will forget what environments you have defined and this is the easy way to find out. * `conda`_: ``conda info -e`` + you will always have at least one environment, called ``root`` + your active environment is indicated by a ``*`` * `virtualenvwrapper`_: ``workon`` + If this displays nothing you have no virtual environments + If this displays ``workon: command not found`` then you haven't done the :ref:`setup_for_env`; do that. + For more detailed information about installed environments use ``lsvirtualenv``. .. _create_env: Create a new virtual environment -------------------------------- This needs to be done once for each virtual environment you want. There is one important choice you need to make when you create a virtual environment: which, if any, of the packages installed in your normal python environment do you want in your virtual environment? Including them in your virtual environment doesn't take much extra space--they are linked into the virtual environment instead of being copied. Within the virtual environment you can install new versions of packages like Numpy or Scikit-beam that override the versions installed in your normal python environment. The easiest way to get started is to include in your virtual environment the packages installed in your your normal python environment; the instructions below do that. In everything that follows, ``ENV`` represents the name you give your virtual environment. **The name you choose cannot have spaces in it.** * `conda`_: + Make an environment called ``ENV`` with all of the packages in your main anaconda environment:: ``conda create -n ENV anaconda`` + More details, and examples that start with none of the packages from your normal python environment, are in the `documentation for the conda command`_ and the `blog post announcing anaconda environments`_. * `virtualenvwrapper`_: + Make an environment called ``ENV`` with all of the packages in your normal python environment:: ``mkvirtualenv --system-site-packages ENV`` + Omit the option ``--system-site-packages`` to create an environment without the python packages installed in your normal python environment. + Environments created with `virtualenvwrapper`_ always include `pip`_ and `setuptools <https://pythonhosted.org/setuptools/>`_ so that you can install packages within the virtual environment. + More details and examples are in the `virtualenvwrapper command documentation`_. .. _activate_env: Activate a virtual environment ------------------------------ To use a new virtual environment you may need to activate it; `virtualenvwrapper`_ will try to automatically activate your new environment when you create it. Activation does three things (which you could do manually, though it would be inconvenient): * Put the ``bin`` directory for the virtual environment at the front of your ``$PATH``. * Add the name of the virtual environment to your command prompt. If you have successfully switched to a new environment called ``ENV`` your prompt should look something like this: ``(ENV)[~] $`` * conda provides a mechanism for packages to install scripts or export environmental variables on activation. The commands below allow you to switch between virtual environments in addition to activating new ones. * ` conda`: Activate the environment ``ENV`` with:: source activate ENV * `virtualenvwrapper`_: Activate the environment ``ENV`` with:: workon ENV .. _deactivate_env: Deactivate a virtual environment -------------------------------- At some point you may want to go back to your normal python environment. Do that with: * `conda`_: ``source deactivate`` * `virtualenvwrapper`_: ``deactivate`` + Note that in ``virtualenvwrapper 4.1.1`` the output of ``mkvirtualenv`` says you should use ``source deactivate``; that does not seem to actually work. .. _delete_env: Delete a virtual environment ---------------------------- In both `virtualenvwrapper`_ and `conda`_ you can simply delete the directory in which the ``ENV`` is located; both also provide commands to make that a bit easier. * `conda`_: ``conda remove --all -n ENV`` * `virtualenvwrapper`_: ``rmvirtualenv ENV`` .. _documentation for virtualenvwrapper: http://virtualenvwrapper.readthedocs.org/en/latest/install.html .. _virtualenvwrapper command documentation: http://virtualenvwrapper.readthedocs.org/en/latest/command_ref.html .. _documentation for the conda command: http://docs.continuum.io/conda/examples/create.html .. _blog post announcing anaconda environments: http://www.continuum.io/blog/conda
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/virtual_pythons.rst
virtual_pythons.rst
:orphan: .. include:: links.inc .. _virtual_envs: =========================== Python virtual environments =========================== If you plan to do regular work on scikit-beam you should do your development in a python virtual environment. Conceptually a virtual environment is a duplicate of the python environment you normally work in with as many (or as few) of the packages from your normal environment included in that virtual environment. It is sandboxed from your normal python environment in the sense that packages installed in the virtual environment do not affect your normal environment in any way. .. note:: "Normal python environment" means whatever python you are using when you log in. There are two options for using virtual environments; the choice of method is dictated by the python distribution you use: * If you use the anaconda python distribution you must use `conda`_ to make and manage your virtual environments. * If you use any other distribution you use `virtualenvwrapper`_; you *can not* use `conda`_. As the name suggests, `virtualenvwrapper`_ is a wrapper around `virtualenv`_. In both cases you will go through the same basic steps; the commands to accomplish each step are given for both `conda`_ and `virtualenvwrapper`_: * :ref:`setup_for_env` * :ref:`list_env` * :ref:`create_env` * :ref:`activate_env` * :ref:`deactivate_env` * :ref:`delete_env` .. note:: + You **cannot** use `virtualenvwrapper`_ or `virtualenv`_ within anaconda. + `virtualenvwrapper`_ works with bash and bash-like shells; see :ref:`using-virtualenv` for alternatives. .. _setup_for_env: Set up for virtual environments ------------------------------- * `conda`_: No setup is necessary beyond installing the anaconda python distribution. * `virtualenvwrapper`_: + First, install `virtualenvwrapper`_, which will also install `virtualenv`_, with ``pip install virtualenvwrapper``. + From the `documentation for virtualenvwrapper`_, you also need to:: export WORKON_HOME=$HOME/.virtualenvs export PROJECT_HOME=$HOME/ source /usr/local/bin/virtualenvwrapper.sh .. _list_env: List virtual environments ------------------------- You do not need to list the virtual environments you have created before using them...but sooner or later you will forget what environments you have defined and this is the easy way to find out. * `conda`_: ``conda info -e`` + you will always have at least one environment, called ``root`` + your active environment is indicated by a ``*`` * `virtualenvwrapper`_: ``workon`` + If this displays nothing you have no virtual environments + If this displays ``workon: command not found`` then you haven't done the :ref:`setup_for_env`; do that. + For more detailed information about installed environments use ``lsvirtualenv``. .. _create_env: Create a new virtual environment -------------------------------- This needs to be done once for each virtual environment you want. There is one important choice you need to make when you create a virtual environment: which, if any, of the packages installed in your normal python environment do you want in your virtual environment? Including them in your virtual environment doesn't take much extra space--they are linked into the virtual environment instead of being copied. Within the virtual environment you can install new versions of packages like Numpy or Scikit-beam that override the versions installed in your normal python environment. The easiest way to get started is to include in your virtual environment the packages installed in your your normal python environment; the instructions below do that. In everything that follows, ``ENV`` represents the name you give your virtual environment. **The name you choose cannot have spaces in it.** * `conda`_: + Make an environment called ``ENV`` with all of the packages in your main anaconda environment:: ``conda create -n ENV anaconda`` + More details, and examples that start with none of the packages from your normal python environment, are in the `documentation for the conda command`_ and the `blog post announcing anaconda environments`_. * `virtualenvwrapper`_: + Make an environment called ``ENV`` with all of the packages in your normal python environment:: ``mkvirtualenv --system-site-packages ENV`` + Omit the option ``--system-site-packages`` to create an environment without the python packages installed in your normal python environment. + Environments created with `virtualenvwrapper`_ always include `pip`_ and `setuptools <https://pythonhosted.org/setuptools/>`_ so that you can install packages within the virtual environment. + More details and examples are in the `virtualenvwrapper command documentation`_. .. _activate_env: Activate a virtual environment ------------------------------ To use a new virtual environment you may need to activate it; `virtualenvwrapper`_ will try to automatically activate your new environment when you create it. Activation does three things (which you could do manually, though it would be inconvenient): * Put the ``bin`` directory for the virtual environment at the front of your ``$PATH``. * Add the name of the virtual environment to your command prompt. If you have successfully switched to a new environment called ``ENV`` your prompt should look something like this: ``(ENV)[~] $`` * conda provides a mechanism for packages to install scripts or export environmental variables on activation. The commands below allow you to switch between virtual environments in addition to activating new ones. * ` conda`: Activate the environment ``ENV`` with:: source activate ENV * `virtualenvwrapper`_: Activate the environment ``ENV`` with:: workon ENV .. _deactivate_env: Deactivate a virtual environment -------------------------------- At some point you may want to go back to your normal python environment. Do that with: * `conda`_: ``source deactivate`` * `virtualenvwrapper`_: ``deactivate`` + Note that in ``virtualenvwrapper 4.1.1`` the output of ``mkvirtualenv`` says you should use ``source deactivate``; that does not seem to actually work. .. _delete_env: Delete a virtual environment ---------------------------- In both `virtualenvwrapper`_ and `conda`_ you can simply delete the directory in which the ``ENV`` is located; both also provide commands to make that a bit easier. * `conda`_: ``conda remove --all -n ENV`` * `virtualenvwrapper`_: ``rmvirtualenv ENV`` .. _documentation for virtualenvwrapper: http://virtualenvwrapper.readthedocs.org/en/latest/install.html .. _virtualenvwrapper command documentation: http://virtualenvwrapper.readthedocs.org/en/latest/command_ref.html .. _documentation for the conda command: http://docs.continuum.io/conda/examples/create.html .. _blog post announcing anaconda environments: http://www.continuum.io/blog/conda
0.821438
0.469946
:orphan: .. include:: links.inc .. _install-git: ========================== Install and configure git ========================== Get git ------- Installers and instructions for all platforms are available at http://git-scm.com/downloads .. _essential_config: Essential configuration ----------------------- Though technically not required to install `git`_ and get it running, configure `git`_ so that you get credit for your contributions:: git config --global user.name "Your Name" git config --global user.email [email protected] .. note:: Use the same email address here that you used for setting up your GitHub account to save yourself a couple of steps later, when you connect your git to GitHub. Check it with:: $ git config --list user.name=Your Name [email protected] # ...likely followed by many other configuration values .. _git_gui_options: Get a git GUI (optional) ------------------------ There are several good, free graphical interfaces for git. Even if you are proficient with `git`_ at the command line a GUI can be useful. Mac and Windows: + `SourceTree`_ + The github client for `Mac`_ or `Windows`_ Linux, Mac and Windows: + `git-cola`_ There is a more extensive list of `git GUIs`_, including non-free options, for all platforms. .. _git GUIs: http://git-scm.com/downloads/guis .. _SourceTree: http://www.sourcetreeapp.com/ .. _Mac: http://mac.github.com/ .. _Windows: http://windows.github.com/ .. _git-cola: http://git-cola.github.io/
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/git_install.rst
git_install.rst
:orphan: .. include:: links.inc .. _install-git: ========================== Install and configure git ========================== Get git ------- Installers and instructions for all platforms are available at http://git-scm.com/downloads .. _essential_config: Essential configuration ----------------------- Though technically not required to install `git`_ and get it running, configure `git`_ so that you get credit for your contributions:: git config --global user.name "Your Name" git config --global user.email [email protected] .. note:: Use the same email address here that you used for setting up your GitHub account to save yourself a couple of steps later, when you connect your git to GitHub. Check it with:: $ git config --list user.name=Your Name [email protected] # ...likely followed by many other configuration values .. _git_gui_options: Get a git GUI (optional) ------------------------ There are several good, free graphical interfaces for git. Even if you are proficient with `git`_ at the command line a GUI can be useful. Mac and Windows: + `SourceTree`_ + The github client for `Mac`_ or `Windows`_ Linux, Mac and Windows: + `git-cola`_ There is a more extensive list of `git GUIs`_, including non-free options, for all platforms. .. _git GUIs: http://git-scm.com/downloads/guis .. _SourceTree: http://www.sourcetreeapp.com/ .. _Mac: http://mac.github.com/ .. _Windows: http://windows.github.com/ .. _git-cola: http://git-cola.github.io/
0.64232
0.172067
:orphan: .. _command_history: ======================================================= Condensed example of contributing code to Scikit-beam ======================================================= There are two versions: * :ref:`with output` (easier to read along with) * :ref:`no output` (easier to cut and paste from) .. _with output: Example, with output ==================== In this version the commands are on the lines that begin ``$`` others are comments or output. .. literalinclude:: command_history_with_output.sh :language: bash .. _no output: Example, no output ================== .. literalinclude:: command_history.sh :language: bash
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/command_history.rst
command_history.rst
:orphan: .. _command_history: ======================================================= Condensed example of contributing code to Scikit-beam ======================================================= There are two versions: * :ref:`with output` (easier to read along with) * :ref:`no output` (easier to cut and paste from) .. _with output: Example, with output ==================== In this version the commands are on the lines that begin ``$`` others are comments or output. .. literalinclude:: command_history_with_output.sh :language: bash .. _no output: Example, no output ================== .. literalinclude:: command_history.sh :language: bash
0.918403
0.437523
:orphan: .. _git-resources: ============= Git resources ============= Tutorials and summaries ======================= * `GitHub Help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials * The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. * The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ |emdash| a nice series of tutorials * `git casts`_ |emdash| video snippets giving git how-tos. * `git magic`_ |emdash| extended introduction with intermediate detail * The `git parable`_ is an easy read explaining the concepts behind git. * `git foundation`_ expands on the `git parable`_. * Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many links and tips * Fernando Perez's `ipython notebook on using git in science`_ * A good but technical page on `git concepts`_ * `git svn crash course`_: git for those of us used to subversion_ Manual pages online =================== You can get these on your own machine with (e.g) ``git help push`` or (same thing) ``git push --help``, but, for convenience, here are the online manual pages for some common commands: * `git add`_ * `git branch`_ * `git checkout`_ * `git clone`_ * `git commit`_ * `git config`_ * `git diff`_ * `git log`_ * `git pull`_ * `git push`_ * `git remote`_ * `git status`_ .. include:: links.inc
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/development/workflow/git_resources.rst
git_resources.rst
:orphan: .. _git-resources: ============= Git resources ============= Tutorials and summaries ======================= * `GitHub Help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials * The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. * The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ |emdash| a nice series of tutorials * `git casts`_ |emdash| video snippets giving git how-tos. * `git magic`_ |emdash| extended introduction with intermediate detail * The `git parable`_ is an easy read explaining the concepts behind git. * `git foundation`_ expands on the `git parable`_. * Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many links and tips * Fernando Perez's `ipython notebook on using git in science`_ * A good but technical page on `git concepts`_ * `git svn crash course`_: git for those of us used to subversion_ Manual pages online =================== You can get these on your own machine with (e.g) ``git help push`` or (same thing) ``git push --help``, but, for convenience, here are the online manual pages for some common commands: * `git add`_ * `git branch`_ * `git checkout`_ * `git clone`_ * `git commit`_ * `git config`_ * `git diff`_ * `git log`_ * `git pull`_ * `git push`_ * `git remote`_ * `git status`_ .. include:: links.inc
0.864611
0.856332
Moved extract_label_indices function from correlation.py to roi.py ------------------------------------------------------------------ The previous function is more suitable in roi module ===================== =========================== ============== function Name Original Location New Location --------------------- --------------------------- -------------- extract_label_indices /core/correlation /core/roi ===================== =========================== ==============
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/api_changes/2015-09-16-roi.rst
2015-09-16-roi.rst
Moved extract_label_indices function from correlation.py to roi.py ------------------------------------------------------------------ The previous function is more suitable in roi module ===================== =========================== ============== function Name Original Location New Location --------------------- --------------------------- -------------- extract_label_indices /core/correlation /core/roi ===================== =========================== ==============
0.743447
0.271131
Moved extract_label_indices function from correlation.py to roi.py ------------------------------------------------------------------ The previous function is more suitable in roi module ===================== =========================== ============== function Name Original Location New Location --------------------- --------------------------- -------------- extract_label_indices /core/correlation /core/roi ===================== =========================== ==============
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/api_changes/2015-09-16-correlation.rst
2015-09-16-correlation.rst
Moved extract_label_indices function from correlation.py to roi.py ------------------------------------------------------------------ The previous function is more suitable in roi module ===================== =========================== ============== function Name Original Location New Location --------------------- --------------------------- -------------- extract_label_indices /core/correlation /core/roi ===================== =========================== ==============
0.742141
0.27008
significant changes to the fitting package ------------------------------------------ ================ =========================== ============== Module Name Original Location New Location ---------------- --------------------------- -------------- physics_model.py /fitting/model/ /fitting/ physics_peak.py /fitting/model/ /fitting/ background.py /fitting/model/background/ /fitting/ api.py N/A /fitting/ ================ =========================== ============== physics_model.py and physics_peak.py were moved to the root of the fitting package and an api was created that imports all relevant things from those two modules
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/api_changes/2014-11-12-fitting.rst
2014-11-12-fitting.rst
significant changes to the fitting package ------------------------------------------ ================ =========================== ============== Module Name Original Location New Location ---------------- --------------------------- -------------- physics_model.py /fitting/model/ /fitting/ physics_peak.py /fitting/model/ /fitting/ background.py /fitting/model/background/ /fitting/ api.py N/A /fitting/ ================ =========================== ============== physics_model.py and physics_peak.py were moved to the root of the fitting package and an api was created that imports all relevant things from those two modules
0.835148
0.208763
========= Changes ========= - Changed function arguments name. - Each function has only one output. - from :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, area, epsilon=2.96) to :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, coherent_sct_amplitude, epsilon=2.96) - from :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, epsilon=2.96, matrix=False) to :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, epsilon=2.96, matrix=False)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/api_changes/2014-09-12-physics_peak.rst
2014-09-12-physics_peak.rst
========= Changes ========= - Changed function arguments name. - Each function has only one output. - from :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, area, epsilon=2.96) to :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, coherent_sct_amplitude, epsilon=2.96) - from :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, epsilon=2.96, matrix=False) to :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, epsilon=2.96, matrix=False)
0.866444
0.29557
========= Changes ========= - changed function arguments, including add center positions, and put independent variable x as first argument to function from :: def gauss_peak(area, sigma, dx) to:: def gauss_peak(x, area, center, sigma) from :: gauss_step(area, sigma, dx, peak_e) to :: gauss_step(x, area, center, sigma, peak_e) from :: def gauss_tail(area, sigma, dx, gamma) to :: gauss_tail(x, area, center, sigma, gamma) from :: elastic_peak(coherent_sct_energy,fwhm_offset, fwhm_fanoprime, area, ev, epsilon=2.96) to :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, area, epsilon=2.96) from :: def compton_peak(coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, ev, epsilon=2.96, matrix=False) to :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, epsilon=2.96, matrix=False)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/api_changes/2014-09-09-physics_peak.rst
2014-09-09-physics_peak.rst
========= Changes ========= - changed function arguments, including add center positions, and put independent variable x as first argument to function from :: def gauss_peak(area, sigma, dx) to:: def gauss_peak(x, area, center, sigma) from :: gauss_step(area, sigma, dx, peak_e) to :: gauss_step(x, area, center, sigma, peak_e) from :: def gauss_tail(area, sigma, dx, gamma) to :: gauss_tail(x, area, center, sigma, gamma) from :: elastic_peak(coherent_sct_energy,fwhm_offset, fwhm_fanoprime, area, ev, epsilon=2.96) to :: elastic_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, area, epsilon=2.96) from :: def compton_peak(coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, ev, epsilon=2.96, matrix=False) to :: def compton_peak(x, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, compton_angle, compton_fwhm_corr, compton_amplitude, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, area, epsilon=2.96, matrix=False)
0.816333
0.569494
.. _logging: Logging ======= Getting feed back from running programs is invaluable for assessing the health and performance of the code. However copious ``print`` statements are not practical on projects larger than short scripts. This is particularly true for libraries which are imported into user code; it is rude to spam the user with debugging output. This is such a common need that the tools to solve it are built into the core python library in the :mod:`logging` module. - `Demonstration <https://github.com/tacaswell/logger_demo>`_ - `Basic tutorial <https://docs.python.org/2/howto/logging.html>`_ - `Detailed reference <https://docs.python.org/2/library/logging.html>`_ - `Cookbook <https://docs.python.org/2/howto/logging-cookbook.html>`_ Rough Overview -------------- The logging module provides a frame work for generating and propagating messages. Each process has a hierarchy of :class:`Logger` objects. Using the methods on these objects you can generate error messages with a severity level attached. The log messages are then formatted (using a :class:`Formatter` object) and distributed by :class:`Handler` objects attached to the :class:`Logger`. The messages are also passed up to any parent :class:`Logger` s. Each :class:`Handler` and :class:`Logger` objects have a severity threshold, messages below that threshold are ignored. This enables easy run-time selection of the verbosity of the logging. There are five default levels of logging, listed in decreasing order of severity: +-------------------------+-----------------------------------------------+ |Level |Description | | | | +=========================+===============================================+ |Critical |The program may crash in the near future, | | |things have gone very sideways. | | | | +-------------------------+-----------------------------------------------+ |Error/Exception |Something has gone badly wrong, an operation | | |failed | | | | +-------------------------+-----------------------------------------------+ |Warning |Something has gone slightly wrong or might go | | |wrong in the future | | | | +-------------------------+-----------------------------------------------+ |Info |Status, indications everything is working | | |correctly. | | | | +-------------------------+-----------------------------------------------+ |Debug |Messages that are useful for debugging, but | | |are too detailed to be generally useful | | | | +-------------------------+-----------------------------------------------+ Nuts and Bolts -------------- The loggers are hierarchical (by dotted name). If a logger does not have a level explicitly set, it will use the level of it's parent. Unless prohibited loggers will forward all of their accepted messages to their parents. Create a message ```````````````` A :code:`logger` is defined in each module of our libraries by :code:`logger = logging.getLogger(__name__)` where :code:`__name__` is the module name. Creating messages with the various severity levels is done by :: logger.debug("this is a debug message") logger.info("this is a info message") logger.warning("this is a warning message") logger.error("this is a error message") logger.critical("this is a critical message") which will yield an error message with the body "this is a [level] message". The error messages also understand basic string formatting so :: logger.debug("this is a %s debug message no. %d", great, 42) will yield a message with the body "this is a great debug message no. 42". Attaching a Handler ``````````````````` By default the library does not attach a non-null :class:`Handler` to any of the :class:`Logger` objects (`see <https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library>`_). In order to get the messages out a :class:`Handler` (with it's own :class:`Formatter`) need to be attached to the logger :: h = logging.StreamHandler() form = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') logger.addHandler(h) The above code demonstrates the mechanism by which a `StreamHandler` is attached to the logger. `StreamHandler` writes to stderr by default. `Detailed explanations of the available handlers <https://docs.python.org/2/howto/logging.html#useful-handlers>`_. Defining a Formatter ```````````````````` The :class:`Formatters` are essentially string formatting. For a full list of the data available and the corresponding variable names, see `this list <https://docs.python.org/2/library/logging.html#logrecord-attributes>`_ For example to show the time, the severity, and the message :: form = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') Or to see the time, the level as a number, the function the logging call was in and the message :: form = logging.Formatter('%(asctime)s - %(levelno)s - %(funcName)s - %(message)s') Or to completely dis-regard everything :: form = logging.Formatter('HI MOM')
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/resource/dev_guide/logging.rst
logging.rst
A :code:`logger` is defined in each module of our libraries by :code:`logger = logging.getLogger(__name__)` where :code:`__name__` is the module name. Creating messages with the various severity levels is done by :: logger.debug("this is a debug message") logger.info("this is a info message") logger.warning("this is a warning message") logger.error("this is a error message") logger.critical("this is a critical message") which will yield an error message with the body "this is a [level] message". The error messages also understand basic string formatting so :: logger.debug("this is a %s debug message no. %d", great, 42) will yield a message with the body "this is a great debug message no. 42". Attaching a Handler By default the library does not attach a non-null :class:`Handler` to any of the :class:`Logger` objects (`see <https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library>`_). In order to get the messages out a :class:`Handler` (with it's own :class:`Formatter`) need to be attached to the logger :: h = logging.StreamHandler() form = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') logger.addHandler(h) The above code demonstrates the mechanism by which a `StreamHandler` is attached to the logger. `StreamHandler` writes to stderr by default. `Detailed explanations of the available handlers <https://docs.python.org/2/howto/logging.html#useful-handlers>`_. Defining a Formatter
0.880438
0.863392
.. _adding_files: New Sub-packages and Modules ============================ When adding new packages and modules (which map to folders and files) to the library you are required to update both the build/install system (:file:`setup.py`) and add files to the documentation folder. All python source code must be under the main :file:`skbeam` directory. Non-python sources goes in the :file:`src` directory. Build ----- For a folder in the source-tree to be a package it must have an :file:`__init__.py` file (even if it is empty). All of the python (:file:`*.py`) files in that package are then recognized as modules in that package which can be imported in other files (See Relative Imports header). In order for :mod:`distutils` to work it must be explicitly told what packages from the source tree to byte-compile and install. This is done via the :code:`packages` key word argument (kwarg) to :func:`setup` in :file:`setup.py`. If you add a package, then it's dotted-name must be added to this list. e.g. if you add a new package called :code:`utils` to the :code:`skbeam` folder, the following setup.py file: :: setup( name='scikit-beam', version='0', author='Brookhaven National Lab', packages=["skbeam"], ) would need to be modified to: setup( name='scikit-beam', version='0', author='Brookhaven National Lab', packages=["skbeam", "skbeam.utils"], <------- modification happened here ) Documentation ------------- See :ref:`doc_doc` for documentation about writing and building the documentation. Continuing the example from above where a 'utils' source code package was added, a folder called :file:`/doc/resource/api/utils` should be added. Let's also presume that you've got :file:`fitting.py` in the :file:`/skbeam/utils/`. In the documentation :file:`/doc/resource/api/utils` folder, create a file named :file:`index.rst` with the contents: :: UTILS API ========= Contents: .. toctree:: :maxdepth: 2 fitting Also, add the :file:`/doc/resource/api/utils/index.rst` to :file:`/doc/resource/api/index.rst`. This will tell ``sphinx`` to include the new package in the API documentation. Now, let's create a module called :file:`fitting.py` in the :file:`utils` package. When you add :file:`fitting.py` you need to add a corresponding file in the documentation folder structure: :file:`/doc/resource/api/utils/fitting.rst`. In :file:`fitting.rst` use the following template: :: ====================== :mod:`fitting` Module ====================== Any prose you want to add about the module, such as examples, discussion, or saying hi to your mom can go here. .. automodule:: skbeam.core.fitting :members: :show-inheritance: :undoc-members: This will automatically walk the module to extract and format the doc strings of all the classes and functions in the module. Testing ------- When you add a new module or package please add the corresponding files and folders in the :file:`skbeam/tests` folder. Packages get :file:`test_packagename` and modules get :file:`test_module_name.py` in the proper directory. Using the example above, you would create the directory :file:`/skbeam/tests/test_utils/` and the file :file:`test_fitting.py` in the :file:`test_utils` folder. Remember: Write a test for all new functionality!! Relative Imports ---------------- See the issue (#?) in the scikit-beam repo on github.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/resource/dev_guide/adding_files.rst
adding_files.rst
.. _adding_files: New Sub-packages and Modules ============================ When adding new packages and modules (which map to folders and files) to the library you are required to update both the build/install system (:file:`setup.py`) and add files to the documentation folder. All python source code must be under the main :file:`skbeam` directory. Non-python sources goes in the :file:`src` directory. Build ----- For a folder in the source-tree to be a package it must have an :file:`__init__.py` file (even if it is empty). All of the python (:file:`*.py`) files in that package are then recognized as modules in that package which can be imported in other files (See Relative Imports header). In order for :mod:`distutils` to work it must be explicitly told what packages from the source tree to byte-compile and install. This is done via the :code:`packages` key word argument (kwarg) to :func:`setup` in :file:`setup.py`. If you add a package, then it's dotted-name must be added to this list. e.g. if you add a new package called :code:`utils` to the :code:`skbeam` folder, the following setup.py file: :: setup( name='scikit-beam', version='0', author='Brookhaven National Lab', packages=["skbeam"], ) would need to be modified to: setup( name='scikit-beam', version='0', author='Brookhaven National Lab', packages=["skbeam", "skbeam.utils"], <------- modification happened here ) Documentation ------------- See :ref:`doc_doc` for documentation about writing and building the documentation. Continuing the example from above where a 'utils' source code package was added, a folder called :file:`/doc/resource/api/utils` should be added. Let's also presume that you've got :file:`fitting.py` in the :file:`/skbeam/utils/`. In the documentation :file:`/doc/resource/api/utils` folder, create a file named :file:`index.rst` with the contents: :: UTILS API ========= Contents: .. toctree:: :maxdepth: 2 fitting Also, add the :file:`/doc/resource/api/utils/index.rst` to :file:`/doc/resource/api/index.rst`. This will tell ``sphinx`` to include the new package in the API documentation. Now, let's create a module called :file:`fitting.py` in the :file:`utils` package. When you add :file:`fitting.py` you need to add a corresponding file in the documentation folder structure: :file:`/doc/resource/api/utils/fitting.rst`. In :file:`fitting.rst` use the following template: :: ====================== :mod:`fitting` Module ====================== Any prose you want to add about the module, such as examples, discussion, or saying hi to your mom can go here. .. automodule:: skbeam.core.fitting :members: :show-inheritance: :undoc-members: This will automatically walk the module to extract and format the doc strings of all the classes and functions in the module. Testing ------- When you add a new module or package please add the corresponding files and folders in the :file:`skbeam/tests` folder. Packages get :file:`test_packagename` and modules get :file:`test_module_name.py` in the proper directory. Using the example above, you would create the directory :file:`/skbeam/tests/test_utils/` and the file :file:`test_fitting.py` in the :file:`test_utils` folder. Remember: Write a test for all new functionality!! Relative Imports ---------------- See the issue (#?) in the scikit-beam repo on github.
0.801431
0.421076
.. _doc_doc: Documentation ============= This is documentation on how to build and add to our documentation. Fully documenting the library is of utmost important. It is more valuable to have fully documented library with fewer features than a feature-rich library no one can figure out how to use. Docstrings ---------- The docstrings must follow the `numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`__ format. For the 'Returns' section of numpydoc, you must include the return variable name because this variable name is needed for automated vistrails wrapping (the return variable names are the output ports). **THESE MUST BE IN THE ORDER THAT THE FUNCTION RETURNS THEM IN** Example: :: Returns ------- avg : float The average stdev : float The standard deviation Sphinx ------ We are using `sphinx <http://sphinx-doc.org/>`_ to build the documentation. In addition to `sphinx` you will also need `numpydoc <https://pypi.python.org/pypi/numpydoc>`__ installed and available. Both can be installed from pypi (:code:`pip install numpydoc` and :code:`pip install sphinx`). If you want to build a pdf version of the documentation you will also need LaTeX. To build the documentation locally, navigate to the `doc` folder and run :: make html The output website will then be in `_build/html/index.html` which you can open using any web-browser.
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/resource/dev_guide/doc_doc.rst
doc_doc.rst
.. _doc_doc: Documentation ============= This is documentation on how to build and add to our documentation. Fully documenting the library is of utmost important. It is more valuable to have fully documented library with fewer features than a feature-rich library no one can figure out how to use. Docstrings ---------- The docstrings must follow the `numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`__ format. For the 'Returns' section of numpydoc, you must include the return variable name because this variable name is needed for automated vistrails wrapping (the return variable names are the output ports). **THESE MUST BE IN THE ORDER THAT THE FUNCTION RETURNS THEM IN** Example: :: Returns ------- avg : float The average stdev : float The standard deviation Sphinx ------ We are using `sphinx <http://sphinx-doc.org/>`_ to build the documentation. In addition to `sphinx` you will also need `numpydoc <https://pypi.python.org/pypi/numpydoc>`__ installed and available. Both can be installed from pypi (:code:`pip install numpydoc` and :code:`pip install sphinx`). If you want to build a pdf version of the documentation you will also need LaTeX. To build the documentation locally, navigate to the `doc` folder and run :: make html The output website will then be in `_build/html/index.html` which you can open using any web-browser.
0.733738
0.47524
====================== :mod:`core` subpackge ====================== .. currentmodule:: skbeam.core .. _user-docs: Scientific Algorithms --------------------- .. autosummary:: cdi.cdi_recon correlation.multi_tau_auto_corr dpc.recon dpc.dpc_runner recip.process_to_q Helper Classes -------------- Dictionary-like classes ~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: utils.MD_dict utils.verbosedict utils.RCParamDict Image warping functions ----------------------- .. autosummary:: utils.img_to_relative_xyi utils.radial_grid utils.angle_grid Peak ---- Peak fitting ~~~~~~~~~~~~ .. autosummary:: feature.peak_refinement feature.refine_quadratic feature.refine_log_quadratic feature.filter_n_largest feature.filter_peak_height Peak finding ~~~~~~~~~~~~ .. autosummary:: image.find_ring_center_acorr_1D spectroscopy.find_largest_peak Image pre-processing -------------------- .. autosummary:: utils.subtract_reference_images Histograms and Integration -------------------------- Binning ~~~~~~~ .. autosummary:: utils.bin_1D utils.wedge_integration utils.grid3d Helper functions ---------------- .. autosummary:: utils.pairwise utils.geometric_series utils.multi_tau_lags utils.bin_edges utils.bin_edges_to_centers Generating ROIs --------------- .. autosummary:: roi.kymograph roi.circular_average roi.mean_intensity roi.roi_pixel_values roi.roi_max_counts roi.segmented_rings roi.ring_edges roi.rings roi.rectangles Physical relations ------------------ .. autosummary:: utils.q_to_d utils.d_to_q utils.q_to_twotheta utils.twotheta_to_q utils.radius_to_twotheta recip.hkl_to_q recip.calibrated_pixels_to_q Boolean Logic ------------- .. autosummary:: arithmetic.logical_nand arithmetic.logical_nor arithmetic.logical_sub Calibration ----------- .. autosummary:: calibration.estimate_d_blind calibration.refine_center
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/docs/source/resource/api/core.rst
core.rst
====================== :mod:`core` subpackge ====================== .. currentmodule:: skbeam.core .. _user-docs: Scientific Algorithms --------------------- .. autosummary:: cdi.cdi_recon correlation.multi_tau_auto_corr dpc.recon dpc.dpc_runner recip.process_to_q Helper Classes -------------- Dictionary-like classes ~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: utils.MD_dict utils.verbosedict utils.RCParamDict Image warping functions ----------------------- .. autosummary:: utils.img_to_relative_xyi utils.radial_grid utils.angle_grid Peak ---- Peak fitting ~~~~~~~~~~~~ .. autosummary:: feature.peak_refinement feature.refine_quadratic feature.refine_log_quadratic feature.filter_n_largest feature.filter_peak_height Peak finding ~~~~~~~~~~~~ .. autosummary:: image.find_ring_center_acorr_1D spectroscopy.find_largest_peak Image pre-processing -------------------- .. autosummary:: utils.subtract_reference_images Histograms and Integration -------------------------- Binning ~~~~~~~ .. autosummary:: utils.bin_1D utils.wedge_integration utils.grid3d Helper functions ---------------- .. autosummary:: utils.pairwise utils.geometric_series utils.multi_tau_lags utils.bin_edges utils.bin_edges_to_centers Generating ROIs --------------- .. autosummary:: roi.kymograph roi.circular_average roi.mean_intensity roi.roi_pixel_values roi.roi_max_counts roi.segmented_rings roi.ring_edges roi.rings roi.rectangles Physical relations ------------------ .. autosummary:: utils.q_to_d utils.d_to_q utils.q_to_twotheta utils.twotheta_to_q utils.radius_to_twotheta recip.hkl_to_q recip.calibrated_pixels_to_q Boolean Logic ------------- .. autosummary:: arithmetic.logical_nand arithmetic.logical_nor arithmetic.logical_sub Calibration ----------- .. autosummary:: calibration.estimate_d_blind calibration.refine_center
0.833358
0.313748
import numpy as np from scipy.signal import fftconvolve def sgolay2d(image, window_size, order, derivative=None): """ Savitzky-Golay filter for 2D image arrays. See: http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html Parameters ---------- image : ndarray, shape (N,M) image to be smoothed. window_size : int the length of the window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less then `window_size` - 1. deriv: int the order of the derivative to compute (default = 0 means only smoothing) Returns ------- smooth_image : ndarray, shape (N,M) the smoothed image . """ # number of terms in the polynomial expression n_terms = (order + 1) * (order + 2) / 2.0 if window_size % 2 == 0: raise ValueError("window_size must be odd") if window_size**2 < n_terms: raise ValueError("order is too high for the window size") half_size = window_size // 2 # exponents of the polynomial. # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... # this line gives a list of two item tuple. Each tuple contains # the exponents of the k-th term. First element of tuple is for x # second element for y. # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)] # coordinates of points ind = np.arange(-half_size, half_size + 1, dtype=np.float64) dx = np.repeat(ind, window_size) dy = np.tile(ind, [window_size, 1]).reshape( window_size**2, ) # build matrix of system of equation A = np.empty((window_size**2, len(exps))) for i, exp in enumerate(exps): A[:, i] = (dx ** exp[0]) * (dy ** exp[1]) # pad input array with appropriate values at the four borders new_shape = image.shape[0] + 2 * half_size, image.shape[1] + 2 * half_size smooth_image = np.zeros((new_shape)) # top band band = image[0, :] smooth_image[:half_size, half_size:-half_size] = band - np.abs(np.flipud(image[1 : half_size + 1, :]) - band) # bottom band band = image[-1, :] smooth_image[-half_size:, half_size:-half_size] = band + np.abs( np.flipud(image[-half_size - 1 : -1, :]) - band ) # left band band = np.tile(image[:, 0].reshape(-1, 1), [1, half_size]) smooth_image[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(image[:, 1 : half_size + 1]) - band) # right band band = np.tile(image[:, -1].reshape(-1, 1), [1, half_size]) smooth_image[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr(image[:, -half_size - 1 : -1]) - band ) # central band smooth_image[half_size:-half_size, half_size:-half_size] = image # top left corner band = image[0, 0] smooth_image[:half_size, :half_size] = band - np.abs( np.flipud(np.fliplr(image[1 : half_size + 1, 1 : half_size + 1])) - band ) # bottom right corner band = image[-1, -1] smooth_image[-half_size:, -half_size:] = band + np.abs( np.flipud(np.fliplr(image[-half_size - 1 : -1, -half_size - 1 : -1])) - band ) # top right corner band = smooth_image[half_size, -half_size:] smooth_image[:half_size, -half_size:] = band - np.abs( np.flipud(smooth_image[half_size + 1 : 2 * half_size + 1, -half_size:]) - band ) # bottom left corner band = smooth_image[-half_size:, half_size].reshape(-1, 1) smooth_image[-half_size:, :half_size] = band - np.abs( np.fliplr(smooth_image[-half_size:, half_size + 1 : 2 * half_size + 1]) - band ) # solve system and convolve if derivative is None: m = np.linalg.pinv(A)[0].reshape((window_size, -1)) return fftconvolve(smooth_image, m, mode="valid") elif derivative == "col": c = np.linalg.pinv(A)[1].reshape((window_size, -1)) return fftconvolve(smooth_image, -c, mode="valid") elif derivative == "row": r = np.linalg.pinv(A)[2].reshape((window_size, -1)) return fftconvolve(smooth_image, -r, mode="valid") elif derivative == "both": c = np.linalg.pinv(A)[1].reshape((window_size, -1)) r = np.linalg.pinv(A)[2].reshape((window_size, -1)) return fftconvolve(smooth_image, -r, mode="valid"), fftconvolve(smooth_image, -c, mode="valid")
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/smoothing.py
smoothing.py
import numpy as np from scipy.signal import fftconvolve def sgolay2d(image, window_size, order, derivative=None): """ Savitzky-Golay filter for 2D image arrays. See: http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html Parameters ---------- image : ndarray, shape (N,M) image to be smoothed. window_size : int the length of the window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less then `window_size` - 1. deriv: int the order of the derivative to compute (default = 0 means only smoothing) Returns ------- smooth_image : ndarray, shape (N,M) the smoothed image . """ # number of terms in the polynomial expression n_terms = (order + 1) * (order + 2) / 2.0 if window_size % 2 == 0: raise ValueError("window_size must be odd") if window_size**2 < n_terms: raise ValueError("order is too high for the window size") half_size = window_size // 2 # exponents of the polynomial. # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... # this line gives a list of two item tuple. Each tuple contains # the exponents of the k-th term. First element of tuple is for x # second element for y. # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)] # coordinates of points ind = np.arange(-half_size, half_size + 1, dtype=np.float64) dx = np.repeat(ind, window_size) dy = np.tile(ind, [window_size, 1]).reshape( window_size**2, ) # build matrix of system of equation A = np.empty((window_size**2, len(exps))) for i, exp in enumerate(exps): A[:, i] = (dx ** exp[0]) * (dy ** exp[1]) # pad input array with appropriate values at the four borders new_shape = image.shape[0] + 2 * half_size, image.shape[1] + 2 * half_size smooth_image = np.zeros((new_shape)) # top band band = image[0, :] smooth_image[:half_size, half_size:-half_size] = band - np.abs(np.flipud(image[1 : half_size + 1, :]) - band) # bottom band band = image[-1, :] smooth_image[-half_size:, half_size:-half_size] = band + np.abs( np.flipud(image[-half_size - 1 : -1, :]) - band ) # left band band = np.tile(image[:, 0].reshape(-1, 1), [1, half_size]) smooth_image[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(image[:, 1 : half_size + 1]) - band) # right band band = np.tile(image[:, -1].reshape(-1, 1), [1, half_size]) smooth_image[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr(image[:, -half_size - 1 : -1]) - band ) # central band smooth_image[half_size:-half_size, half_size:-half_size] = image # top left corner band = image[0, 0] smooth_image[:half_size, :half_size] = band - np.abs( np.flipud(np.fliplr(image[1 : half_size + 1, 1 : half_size + 1])) - band ) # bottom right corner band = image[-1, -1] smooth_image[-half_size:, -half_size:] = band + np.abs( np.flipud(np.fliplr(image[-half_size - 1 : -1, -half_size - 1 : -1])) - band ) # top right corner band = smooth_image[half_size, -half_size:] smooth_image[:half_size, -half_size:] = band - np.abs( np.flipud(smooth_image[half_size + 1 : 2 * half_size + 1, -half_size:]) - band ) # bottom left corner band = smooth_image[-half_size:, half_size].reshape(-1, 1) smooth_image[-half_size:, :half_size] = band - np.abs( np.fliplr(smooth_image[-half_size:, half_size + 1 : 2 * half_size + 1]) - band ) # solve system and convolve if derivative is None: m = np.linalg.pinv(A)[0].reshape((window_size, -1)) return fftconvolve(smooth_image, m, mode="valid") elif derivative == "col": c = np.linalg.pinv(A)[1].reshape((window_size, -1)) return fftconvolve(smooth_image, -c, mode="valid") elif derivative == "row": r = np.linalg.pinv(A)[2].reshape((window_size, -1)) return fftconvolve(smooth_image, -r, mode="valid") elif derivative == "both": c = np.linalg.pinv(A)[1].reshape((window_size, -1)) r = np.linalg.pinv(A)[2].reshape((window_size, -1)) return fftconvolve(smooth_image, -r, mode="valid"), fftconvolve(smooth_image, -c, mode="valid")
0.896574
0.764012
from __future__ import absolute_import, division, print_function from numpy import add, divide, logical_and, logical_not, logical_or, logical_xor, multiply, subtract __all__ = [ "add", "subtract", "multiply", "divide", "logical_and", "logical_or", "logical_nor", "logical_xor", "logical_not", "logical_sub", "logical_nand", ] def logical_nand(x1, x2, out=None): """Computes the truth value of NOT (x1 AND x2) element wise. This function enables the computation of the LOGICAL_NAND of two image or volume data sets. This function enables easy isolation of all data points NOT INCLUDED IN BOTH SOURCE DATA SETS. This function can be used for data comparison, material isolation, noise removal, or mask application/generation. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical NAND operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_nand(x1, x2) array([[ True, True, True, True, True], [False, False, False, False, False], [ True, True, True, True, True]], dtype=bool) """ return logical_not(logical_and(x1, x2, out), out) def logical_nor(x1, x2, out=None): """Compute truth value of NOT (x1 OR x2)) element wise. This function enables the computation of the LOGICAL_NOR of two image or volume data sets. This function enables easy isolation of all data points NOT INCLUDED IN EITHER OF THE SOURCE DATA SETS. This function can be used for data comparison, material isolation, noise removal, or mask application/generation. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical NOR operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_nor(x1, x2) array([[ True, True, False, True, True], [False, False, False, False, False], [False, True, False, True, False]], dtype=bool) """ return logical_not(logical_or(x1, x2, out), out) def logical_sub(x1, x2, out=None): """Compute truth value of x1 AND (NOT (x1 AND x2)) element wise. This function enables LOGICAL SUBTRACTION of one binary image or volume data set from another. This function can be used to remove phase information, interface boundaries, or noise, present in two data sets, without having to worry about mislabeling of pixels which would result from arithmetic subtraction. This function will evaluate as true for all "true" voxels present ONLY in Source Dataset 1. This function can be used for data cleanup, or boundary/interface analysis. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical SUBTRACT operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_sub(x1, x2) array([[False, False, True, False, False], [False, False, False, False, False], [ True, False, True, False, True]], dtype=bool) """ return logical_and(x1, logical_not(logical_and(x1, x2, out), out), out)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/arithmetic.py
arithmetic.py
from __future__ import absolute_import, division, print_function from numpy import add, divide, logical_and, logical_not, logical_or, logical_xor, multiply, subtract __all__ = [ "add", "subtract", "multiply", "divide", "logical_and", "logical_or", "logical_nor", "logical_xor", "logical_not", "logical_sub", "logical_nand", ] def logical_nand(x1, x2, out=None): """Computes the truth value of NOT (x1 AND x2) element wise. This function enables the computation of the LOGICAL_NAND of two image or volume data sets. This function enables easy isolation of all data points NOT INCLUDED IN BOTH SOURCE DATA SETS. This function can be used for data comparison, material isolation, noise removal, or mask application/generation. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical NAND operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_nand(x1, x2) array([[ True, True, True, True, True], [False, False, False, False, False], [ True, True, True, True, True]], dtype=bool) """ return logical_not(logical_and(x1, x2, out), out) def logical_nor(x1, x2, out=None): """Compute truth value of NOT (x1 OR x2)) element wise. This function enables the computation of the LOGICAL_NOR of two image or volume data sets. This function enables easy isolation of all data points NOT INCLUDED IN EITHER OF THE SOURCE DATA SETS. This function can be used for data comparison, material isolation, noise removal, or mask application/generation. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical NOR operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_nor(x1, x2) array([[ True, True, False, True, True], [False, False, False, False, False], [False, True, False, True, False]], dtype=bool) """ return logical_not(logical_or(x1, x2, out), out) def logical_sub(x1, x2, out=None): """Compute truth value of x1 AND (NOT (x1 AND x2)) element wise. This function enables LOGICAL SUBTRACTION of one binary image or volume data set from another. This function can be used to remove phase information, interface boundaries, or noise, present in two data sets, without having to worry about mislabeling of pixels which would result from arithmetic subtraction. This function will evaluate as true for all "true" voxels present ONLY in Source Dataset 1. This function can be used for data cleanup, or boundary/interface analysis. Parameters ---------- x1, x2 : array-like Input arrays. `x1` and `x2` must be of the same shape. output : array-like Boolean result with the same shape as `x1` and `x2` of the logical operation on corresponding elements of `x1` and `x2`. Returns ------- output : {ndarray, bool} Boolean result with the same shape as `x1` and `x2` of the logical SUBTRACT operation on corresponding elements of `x1` and `x2`. Examples -------- >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]] >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]] >>> logical_sub(x1, x2) array([[False, False, True, False, False], [False, False, False, False, False], [ True, False, True, False, True]], dtype=bool) """ return logical_and(x1, logical_not(logical_and(x1, x2, out), out), out)
0.927569
0.703397
from __future__ import absolute_import, division, print_function from collections import deque from string import Template import numpy as np import scipy.signal from .constants import calibration_standards from .feature import filter_peak_height, peak_refinement, refine_log_quadratic from .utils import angle_grid, bin_1D, bin_edges_to_centers, pairwise, radial_grid def estimate_d_blind(name, wavelength, bin_centers, ring_average, window_size, max_peak_count, thresh): """ Estimate the sample-detector distance Given a radially integrated calibration image return an estimate for the sample-detector distance. This function does not require a rough estimate of what d should be. For the peaks found the detector-sample distance is estimated via .. math :: D = \\frac{r}{\\tan 2\\theta} where :math:`r` is the distance in mm from the calibrated center to the ring on the detector and :math:`D` is the distance from the sample to the detector. Parameters ---------- name : str The name of the calibration standard. Used to look up the expected peak location Valid options: $name_ops wavelength : float The wavelength of scattered x-ray in nm bin_centers : array The distance from the calibrated center to the center of the ring's annulus in mm ring_average : array The average intensity in the given ring of a azimuthally integrated powder pattern. In counts [arb] window_size : int The number of elements on either side of a local maximum to use for locating and refining peaks. Candidates are identified as a relative maximum in a window sized (2*window_size + 1) and the same window is used for fitting the peaks to refine the location. max_peak_count : int Use at most this many peaks thresh : float Fraction of maximum peak height Returns ------- dist_sample : float The detector-sample distance in mm. This is the mean of the estimate from all of the peaks used. std_dist_sample : float The standard deviation of d computed from the peaks used. """ # get the calibration standard cal = calibration_standards[name] # find the local maximums cands = scipy.signal.argrelmax(ring_average, order=window_size)[0] # filter local maximums by size cands = filter_peak_height(ring_average, cands, thresh * np.max(ring_average), window=window_size) # TODO insert peak identification validation. This might be better than # improving the threshold value. # refine the locations of the peaks peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands, window_size, refine_log_quadratic) # compute tan(2theta) for the expected peaks tan2theta = np.tan(cal.convert_2theta(wavelength)) # figure out how many peaks we can look at slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count])) # estimate the sample-detector distance for each of the peaks d_array = peaks_x[slc] / tan2theta[slc] return np.mean(d_array), np.std(d_array) # Set an attribute for the calibration names that are valid options. This # attribute also aids in autowrapping into VisTrails estimate_d_blind.name = list(calibration_standards) if estimate_d_blind.__doc__ is not None: estimate_d_blind.__doc__ = Template(estimate_d_blind.__doc__).substitute( name_ops=repr(sorted(estimate_d_blind.name)) ) def refine_center( image, calibrated_center, pixel_size, phi_steps, max_peaks, thresh, window_size, nx=None, min_x=None, max_x=None, ): """ Refines the location of the center of the beam. This relies on being able to see the whole powder pattern. Parameters ---------- image : ndarray The image calibrated_center : tuple (row, column) the estimated center pixel_size : tuple (pixel_height, pixel_width) phi_steps : int How many regions to split the ring into, should be >10 max_peaks : int Number of rings to look it thresh : float Fraction of maximum peak height window_size : int, optional The window size to use (in bins) to use when refining peaks nx : int, optional Number of bins to use for radial binning min_x : float, optional The minimum radius to use for radial binning max_x : float, optional The maximum radius to use for radial binning Returns ------- calibrated_center : tuple The refined calibrated center. """ if nx is None: nx = int(np.mean(image.shape) * 2) phi = angle_grid(calibrated_center, image.shape, pixel_size).ravel() r = radial_grid(calibrated_center, image.shape, pixel_size).ravel() II = image.ravel() phi_steps = np.linspace(-np.pi, np.pi, phi_steps, endpoint=True) out = deque() for phi_start, phi_end in pairwise(phi_steps): mask = (phi <= phi_end) * (phi > phi_start) out.append(bin_1D(r[mask], II[mask], nx=nx, min_x=min_x, max_x=max_x)) out = list(out) ring_trace = [] for bins, b_sum, b_count in out: mask = b_sum > 10 avg = b_sum[mask] / b_count[mask] bin_centers = bin_edges_to_centers(bins)[mask] cands = scipy.signal.argrelmax(avg, order=window_size)[0] # filter local maximums by size cands = filter_peak_height(avg, cands, thresh * np.max(avg), window=window_size) ring_trace.append(bin_centers[cands[:max_peaks]]) tr_len = [len(rt) for rt in ring_trace] mm = np.min(tr_len) ring_trace = np.vstack([rt[:mm] for rt in ring_trace]).T mean_dr = np.mean(ring_trace - np.mean(ring_trace, axis=1, keepdims=True), axis=0) phi_centers = bin_edges_to_centers(phi_steps) delta = np.mean(np.diff(phi_centers)) # this is doing just one term of a Fourier series # note that we have to convert _back_ to pixels from real units # TODO do this with better integration/handle repeat better col_shift = np.sum(np.sin(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[1]) row_shift = np.sum(np.cos(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[0]) return tuple(np.array(calibrated_center) + np.array([row_shift, col_shift]))
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/calibration.py
calibration.py
from __future__ import absolute_import, division, print_function from collections import deque from string import Template import numpy as np import scipy.signal from .constants import calibration_standards from .feature import filter_peak_height, peak_refinement, refine_log_quadratic from .utils import angle_grid, bin_1D, bin_edges_to_centers, pairwise, radial_grid def estimate_d_blind(name, wavelength, bin_centers, ring_average, window_size, max_peak_count, thresh): """ Estimate the sample-detector distance Given a radially integrated calibration image return an estimate for the sample-detector distance. This function does not require a rough estimate of what d should be. For the peaks found the detector-sample distance is estimated via .. math :: D = \\frac{r}{\\tan 2\\theta} where :math:`r` is the distance in mm from the calibrated center to the ring on the detector and :math:`D` is the distance from the sample to the detector. Parameters ---------- name : str The name of the calibration standard. Used to look up the expected peak location Valid options: $name_ops wavelength : float The wavelength of scattered x-ray in nm bin_centers : array The distance from the calibrated center to the center of the ring's annulus in mm ring_average : array The average intensity in the given ring of a azimuthally integrated powder pattern. In counts [arb] window_size : int The number of elements on either side of a local maximum to use for locating and refining peaks. Candidates are identified as a relative maximum in a window sized (2*window_size + 1) and the same window is used for fitting the peaks to refine the location. max_peak_count : int Use at most this many peaks thresh : float Fraction of maximum peak height Returns ------- dist_sample : float The detector-sample distance in mm. This is the mean of the estimate from all of the peaks used. std_dist_sample : float The standard deviation of d computed from the peaks used. """ # get the calibration standard cal = calibration_standards[name] # find the local maximums cands = scipy.signal.argrelmax(ring_average, order=window_size)[0] # filter local maximums by size cands = filter_peak_height(ring_average, cands, thresh * np.max(ring_average), window=window_size) # TODO insert peak identification validation. This might be better than # improving the threshold value. # refine the locations of the peaks peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands, window_size, refine_log_quadratic) # compute tan(2theta) for the expected peaks tan2theta = np.tan(cal.convert_2theta(wavelength)) # figure out how many peaks we can look at slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count])) # estimate the sample-detector distance for each of the peaks d_array = peaks_x[slc] / tan2theta[slc] return np.mean(d_array), np.std(d_array) # Set an attribute for the calibration names that are valid options. This # attribute also aids in autowrapping into VisTrails estimate_d_blind.name = list(calibration_standards) if estimate_d_blind.__doc__ is not None: estimate_d_blind.__doc__ = Template(estimate_d_blind.__doc__).substitute( name_ops=repr(sorted(estimate_d_blind.name)) ) def refine_center( image, calibrated_center, pixel_size, phi_steps, max_peaks, thresh, window_size, nx=None, min_x=None, max_x=None, ): """ Refines the location of the center of the beam. This relies on being able to see the whole powder pattern. Parameters ---------- image : ndarray The image calibrated_center : tuple (row, column) the estimated center pixel_size : tuple (pixel_height, pixel_width) phi_steps : int How many regions to split the ring into, should be >10 max_peaks : int Number of rings to look it thresh : float Fraction of maximum peak height window_size : int, optional The window size to use (in bins) to use when refining peaks nx : int, optional Number of bins to use for radial binning min_x : float, optional The minimum radius to use for radial binning max_x : float, optional The maximum radius to use for radial binning Returns ------- calibrated_center : tuple The refined calibrated center. """ if nx is None: nx = int(np.mean(image.shape) * 2) phi = angle_grid(calibrated_center, image.shape, pixel_size).ravel() r = radial_grid(calibrated_center, image.shape, pixel_size).ravel() II = image.ravel() phi_steps = np.linspace(-np.pi, np.pi, phi_steps, endpoint=True) out = deque() for phi_start, phi_end in pairwise(phi_steps): mask = (phi <= phi_end) * (phi > phi_start) out.append(bin_1D(r[mask], II[mask], nx=nx, min_x=min_x, max_x=max_x)) out = list(out) ring_trace = [] for bins, b_sum, b_count in out: mask = b_sum > 10 avg = b_sum[mask] / b_count[mask] bin_centers = bin_edges_to_centers(bins)[mask] cands = scipy.signal.argrelmax(avg, order=window_size)[0] # filter local maximums by size cands = filter_peak_height(avg, cands, thresh * np.max(avg), window=window_size) ring_trace.append(bin_centers[cands[:max_peaks]]) tr_len = [len(rt) for rt in ring_trace] mm = np.min(tr_len) ring_trace = np.vstack([rt[:mm] for rt in ring_trace]).T mean_dr = np.mean(ring_trace - np.mean(ring_trace, axis=1, keepdims=True), axis=0) phi_centers = bin_edges_to_centers(phi_steps) delta = np.mean(np.diff(phi_centers)) # this is doing just one term of a Fourier series # note that we have to convert _back_ to pixels from real units # TODO do this with better integration/handle repeat better col_shift = np.sum(np.sin(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[1]) row_shift = np.sum(np.cos(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[0]) return tuple(np.array(calibrated_center) + np.array([row_shift, col_shift]))
0.918877
0.648439
from __future__ import absolute_import, division, print_function import time from collections import namedtuple import numpy as np from .utils import verbosedict try: from pyFAI import geometry as geo except ImportError: geo = None import logging logger = logging.getLogger(__name__) def process_to_q( setting_angles, detector_size, pixel_size, calibrated_center, dist_sample, wavelength, ub, frame_mode=None ): """ This will compute the hkl values for all pixels in a shape specified by detector_size. Parameters ---------- setting_angles : ndarray six angles of all the images - Required shape is [num_images][6] and required type is something that can be cast to a 2D numpy array Angle order: delta, theta, chi, phi, mu, gamma (degrees) detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pixel_size : tuple 2 element tuple defining the size of each pixel in mm. Order is (column_pixel_size, row_pixel_size). If not in mm, must be in the same units as `dist_sample` calibrated_center : tuple 2 element tuple defining the center of the detector in pixels. Order is (column_center, row_center)(x y) dist_sample : float distance from the sample to the detector (mm). If not in mm, must be in the same units as `pixel_size` wavelength : float wavelength of incident radiation (Angstroms) ub : ndarray UB matrix (orientation matrix) 3x3 matrix frame_mode : str, optional Frame mode defines the data collection mode and thus the desired output from this function. Defaults to hkl mode (frame_mode=4) 'theta' : Theta axis frame. 'phi' : Phi axis frame. 'cart' : Crystal cartesian frame. 'hkl' : Reciprocal lattice units frame. See the `process_to_q.frame_mode` attribute for an exact list of valid options. Returns ------- hkl : ndarray (Qx, Qy, Qz) - HKL values shape is [num_images * num_rows * num_columns][3] Notes ----- Six angles of an image: (delta, theta, chi, phi, mu, gamma ) These axes are defined according to the following references. References: text [1]_, text [2]_ .. [1] M. Lohmeier and E.Vlieg, "Angle calculations for a six-circle surface x-ray diffractometer," J. Appl. Cryst., vol 26, pp 706-716, 1993. .. [2] E. Vlieg, "A (2+3)-Type surface diffractometer: Mergence of the z-axis and (2+2)-Type geometries," J. Appl. Cryst., vol 31, pp 198-203, 1998. """ try: from ..ext import ctrans except ImportError: raise NotImplementedError( "ctrans is not available on your platform. See" "https://github.com/scikit-beam/scikit-beam/issues/418" "to follow updates to this problem." ) # Set default threads # set default frame_mode if frame_mode is None: frame_mode = 4 else: str_to_int = verbosedict((k, j + 1) for j, k in enumerate(process_to_q.frame_mode)) frame_mode = str_to_int[frame_mode] # ensure the ub matrix is an array ub = np.asarray(ub) # ensure setting angles is a 2-D setting_angles = np.atleast_2d(setting_angles) if setting_angles.ndim != 2: raise ValueError( "setting_angles is expected to be a 2-D array with" " dimensions [num_images][num_angles]. You provided " "an array with dimensions {0}" "".format(setting_angles.shape) ) if setting_angles.shape[1] != 6: raise ValueError( "It is expected that there should be six angles in " "the setting_angles parameter. You provided {0}" " angles.".format(setting_angles.shape[1]) ) # *********** Converting to Q ************** # starting time for the process t1 = time.time() # ctrans - c routines for fast data analysis hkl = ctrans.ccdToQ( angles=setting_angles * np.pi / 180.0, mode=frame_mode, ccd_size=(detector_size), ccd_pixsize=(pixel_size), ccd_cen=(calibrated_center), dist=dist_sample, wavelength=wavelength, UBinv=np.linalg.inv(ub), ) # ending time for the process t2 = time.time() logger.info( "Processing time for {0} {1} x {2} images took {3} seconds." "".format(setting_angles.shape[0], detector_size[0], detector_size[1], (t2 - t1)) ) return hkl # Assign frame_mode as an attribute to the process_to_q function so that the # autowrapping knows what the valid options are process_to_q.frame_mode = ["theta", "phi", "cart", "hkl"] def hkl_to_q(hkl_arr): """ This module compute the reciprocal space (q) values from known HKL array for each pixel of the detector for all the images Parameters ---------- hkl_arr : ndarray (Qx, Qy, Qz) - HKL array shape is [num_images * num_rows * num_columns][3] Returns ------- q_val : ndarray Reciprocal values for each pixel for all images shape is [num_images * num_rows * num_columns] """ return np.linalg.norm(hkl_arr, axis=1) def calibrated_pixels_to_q(detector_size, pyfai_kwargs): """ For a given detector and pyfai calibrated geometry give back the q value for each pixel in the detector. Parameters ---------- detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pyfai_kwargs: dict The dictionary of pyfai geometry kwargs, given by pyFAI's calibration Ex: dist, poni1, poni2, rot1, rot2, rot3, splineFile, wavelength, detector, pixel1, pixel2 Returns ------- q_val : ndarray Reciprocal values for each pixel shape is [num_rows * num_columns] """ if geo is None: raise RuntimeError("You must have pyFAI installed to use this " "function.") a = geo.Geometry(**pyfai_kwargs) return a.qArray(detector_size) gisaxs_output = namedtuple( "gisaxs_output", ["alpha_i", "theta_f", "alpha_f", "tilt_angle", "qx", "qy", "qz", "qr"] ) def gisaxs(incident_beam, reflected_beam, pixel_size, detector_size, dist_sample, wavelength, theta_i=0.0): """ This function will provide scattering wave vector(q) components(x, y, z), q parallel and incident and reflected angles for grazing-incidence small angle X-ray scattering (GISAXS) geometry. Parameters ---------- incident_beam : tuple x and y co-ordinates of the incident beam in pixels reflected_beam : tuple x and y co-ordinates of the reflected beam in pixels pixel_size : tuple pixel_size in um detector_size: tuple 2 element tuple defining no. of pixels(size) in the detector X and Y direction dist_sample : float sample to detector distance, in meters wavelength : float wavelength of the x-ray beam in Angstroms theta_i : float, optional out of plane angle, default 0.0 Returns ------- namedtuple `gisaxs_output` object is returned This `gisaxs_output` object contains, in this order: - alpha_i : float incident angle - theta_f : array out of plane angle shape (detector_size[0], detector_size[1]) - alpha_f : array exit angle shape (detector_size[0], detector_size[1]) - tilt_angle : float tilt angle - qx : array x component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qy : array y component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qz : array z component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qr : array q parallel component shape (detector_size[0], detector_size[1]) Notes ----- This implementation is based on published work. [1]_ References ---------- .. [1] R. Lazzari, "IsGISAXS: a program for grazing-incidence small- angle X-ray scattering analysis of supported islands," J. Appl. Cryst., vol 35, p 406-421, 2002. """ inc_x, inc_y = incident_beam refl_x, refl_y = reflected_beam # convert pixel_size to meters pixel_size = np.asarray(pixel_size) * 10 ** (-6) # tilt angle tilt_angle = np.arctan2((refl_x - inc_x) * pixel_size[0], (refl_y - inc_y) * pixel_size[1]) # incident angle alpha_i = np.arctan2((refl_y - inc_y) * pixel_size[1], dist_sample) / 2.0 y, x = np.indices(detector_size) # exit angle alpha_f = np.arctan2((y - inc_y) * pixel_size[1], dist_sample) - alpha_i # out of plane angle two_theta = np.arctan2((x - inc_x) * pixel_size[0], dist_sample) theta_f = two_theta / 2 - theta_i # wave number wave_number = 2 * np.pi / wavelength # x component qx = (np.cos(alpha_f) * np.cos(2 * theta_f) - np.cos(alpha_i) * np.cos(2 * theta_i)) * wave_number # y component # the variables post-fixed with an underscore are intermediate steps qy_ = np.cos(alpha_f) * np.sin(2 * theta_f) - np.cos(alpha_i) * np.sin(2 * theta_i) qz_ = np.sin(alpha_f) + np.sin(alpha_i) qy = (qz_ * np.sin(tilt_angle) + qy_ * np.cos(tilt_angle)) * wave_number # z component qz = (qz_ * np.cos(tilt_angle) - qy_ * np.sin(tilt_angle)) * wave_number # q parallel qr = np.sqrt(qx**2 + qy**2) return gisaxs_output(alpha_i, theta_f, alpha_f, tilt_angle, qx, qy, qz, qr)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/recip.py
recip.py
from __future__ import absolute_import, division, print_function import time from collections import namedtuple import numpy as np from .utils import verbosedict try: from pyFAI import geometry as geo except ImportError: geo = None import logging logger = logging.getLogger(__name__) def process_to_q( setting_angles, detector_size, pixel_size, calibrated_center, dist_sample, wavelength, ub, frame_mode=None ): """ This will compute the hkl values for all pixels in a shape specified by detector_size. Parameters ---------- setting_angles : ndarray six angles of all the images - Required shape is [num_images][6] and required type is something that can be cast to a 2D numpy array Angle order: delta, theta, chi, phi, mu, gamma (degrees) detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pixel_size : tuple 2 element tuple defining the size of each pixel in mm. Order is (column_pixel_size, row_pixel_size). If not in mm, must be in the same units as `dist_sample` calibrated_center : tuple 2 element tuple defining the center of the detector in pixels. Order is (column_center, row_center)(x y) dist_sample : float distance from the sample to the detector (mm). If not in mm, must be in the same units as `pixel_size` wavelength : float wavelength of incident radiation (Angstroms) ub : ndarray UB matrix (orientation matrix) 3x3 matrix frame_mode : str, optional Frame mode defines the data collection mode and thus the desired output from this function. Defaults to hkl mode (frame_mode=4) 'theta' : Theta axis frame. 'phi' : Phi axis frame. 'cart' : Crystal cartesian frame. 'hkl' : Reciprocal lattice units frame. See the `process_to_q.frame_mode` attribute for an exact list of valid options. Returns ------- hkl : ndarray (Qx, Qy, Qz) - HKL values shape is [num_images * num_rows * num_columns][3] Notes ----- Six angles of an image: (delta, theta, chi, phi, mu, gamma ) These axes are defined according to the following references. References: text [1]_, text [2]_ .. [1] M. Lohmeier and E.Vlieg, "Angle calculations for a six-circle surface x-ray diffractometer," J. Appl. Cryst., vol 26, pp 706-716, 1993. .. [2] E. Vlieg, "A (2+3)-Type surface diffractometer: Mergence of the z-axis and (2+2)-Type geometries," J. Appl. Cryst., vol 31, pp 198-203, 1998. """ try: from ..ext import ctrans except ImportError: raise NotImplementedError( "ctrans is not available on your platform. See" "https://github.com/scikit-beam/scikit-beam/issues/418" "to follow updates to this problem." ) # Set default threads # set default frame_mode if frame_mode is None: frame_mode = 4 else: str_to_int = verbosedict((k, j + 1) for j, k in enumerate(process_to_q.frame_mode)) frame_mode = str_to_int[frame_mode] # ensure the ub matrix is an array ub = np.asarray(ub) # ensure setting angles is a 2-D setting_angles = np.atleast_2d(setting_angles) if setting_angles.ndim != 2: raise ValueError( "setting_angles is expected to be a 2-D array with" " dimensions [num_images][num_angles]. You provided " "an array with dimensions {0}" "".format(setting_angles.shape) ) if setting_angles.shape[1] != 6: raise ValueError( "It is expected that there should be six angles in " "the setting_angles parameter. You provided {0}" " angles.".format(setting_angles.shape[1]) ) # *********** Converting to Q ************** # starting time for the process t1 = time.time() # ctrans - c routines for fast data analysis hkl = ctrans.ccdToQ( angles=setting_angles * np.pi / 180.0, mode=frame_mode, ccd_size=(detector_size), ccd_pixsize=(pixel_size), ccd_cen=(calibrated_center), dist=dist_sample, wavelength=wavelength, UBinv=np.linalg.inv(ub), ) # ending time for the process t2 = time.time() logger.info( "Processing time for {0} {1} x {2} images took {3} seconds." "".format(setting_angles.shape[0], detector_size[0], detector_size[1], (t2 - t1)) ) return hkl # Assign frame_mode as an attribute to the process_to_q function so that the # autowrapping knows what the valid options are process_to_q.frame_mode = ["theta", "phi", "cart", "hkl"] def hkl_to_q(hkl_arr): """ This module compute the reciprocal space (q) values from known HKL array for each pixel of the detector for all the images Parameters ---------- hkl_arr : ndarray (Qx, Qy, Qz) - HKL array shape is [num_images * num_rows * num_columns][3] Returns ------- q_val : ndarray Reciprocal values for each pixel for all images shape is [num_images * num_rows * num_columns] """ return np.linalg.norm(hkl_arr, axis=1) def calibrated_pixels_to_q(detector_size, pyfai_kwargs): """ For a given detector and pyfai calibrated geometry give back the q value for each pixel in the detector. Parameters ---------- detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pyfai_kwargs: dict The dictionary of pyfai geometry kwargs, given by pyFAI's calibration Ex: dist, poni1, poni2, rot1, rot2, rot3, splineFile, wavelength, detector, pixel1, pixel2 Returns ------- q_val : ndarray Reciprocal values for each pixel shape is [num_rows * num_columns] """ if geo is None: raise RuntimeError("You must have pyFAI installed to use this " "function.") a = geo.Geometry(**pyfai_kwargs) return a.qArray(detector_size) gisaxs_output = namedtuple( "gisaxs_output", ["alpha_i", "theta_f", "alpha_f", "tilt_angle", "qx", "qy", "qz", "qr"] ) def gisaxs(incident_beam, reflected_beam, pixel_size, detector_size, dist_sample, wavelength, theta_i=0.0): """ This function will provide scattering wave vector(q) components(x, y, z), q parallel and incident and reflected angles for grazing-incidence small angle X-ray scattering (GISAXS) geometry. Parameters ---------- incident_beam : tuple x and y co-ordinates of the incident beam in pixels reflected_beam : tuple x and y co-ordinates of the reflected beam in pixels pixel_size : tuple pixel_size in um detector_size: tuple 2 element tuple defining no. of pixels(size) in the detector X and Y direction dist_sample : float sample to detector distance, in meters wavelength : float wavelength of the x-ray beam in Angstroms theta_i : float, optional out of plane angle, default 0.0 Returns ------- namedtuple `gisaxs_output` object is returned This `gisaxs_output` object contains, in this order: - alpha_i : float incident angle - theta_f : array out of plane angle shape (detector_size[0], detector_size[1]) - alpha_f : array exit angle shape (detector_size[0], detector_size[1]) - tilt_angle : float tilt angle - qx : array x component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qy : array y component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qz : array z component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qr : array q parallel component shape (detector_size[0], detector_size[1]) Notes ----- This implementation is based on published work. [1]_ References ---------- .. [1] R. Lazzari, "IsGISAXS: a program for grazing-incidence small- angle X-ray scattering analysis of supported islands," J. Appl. Cryst., vol 35, p 406-421, 2002. """ inc_x, inc_y = incident_beam refl_x, refl_y = reflected_beam # convert pixel_size to meters pixel_size = np.asarray(pixel_size) * 10 ** (-6) # tilt angle tilt_angle = np.arctan2((refl_x - inc_x) * pixel_size[0], (refl_y - inc_y) * pixel_size[1]) # incident angle alpha_i = np.arctan2((refl_y - inc_y) * pixel_size[1], dist_sample) / 2.0 y, x = np.indices(detector_size) # exit angle alpha_f = np.arctan2((y - inc_y) * pixel_size[1], dist_sample) - alpha_i # out of plane angle two_theta = np.arctan2((x - inc_x) * pixel_size[0], dist_sample) theta_f = two_theta / 2 - theta_i # wave number wave_number = 2 * np.pi / wavelength # x component qx = (np.cos(alpha_f) * np.cos(2 * theta_f) - np.cos(alpha_i) * np.cos(2 * theta_i)) * wave_number # y component # the variables post-fixed with an underscore are intermediate steps qy_ = np.cos(alpha_f) * np.sin(2 * theta_f) - np.cos(alpha_i) * np.sin(2 * theta_i) qz_ = np.sin(alpha_f) + np.sin(alpha_i) qy = (qz_ * np.sin(tilt_angle) + qy_ * np.cos(tilt_angle)) * wave_number # z component qz = (qz_ * np.cos(tilt_angle) - qy_ * np.sin(tilt_angle)) * wave_number # q parallel qr = np.sqrt(qx**2 + qy**2) return gisaxs_output(alpha_i, theta_f, alpha_f, tilt_angle, qx, qy, qz, qr)
0.950926
0.685581
from __future__ import absolute_import, division, print_function import collections import logging import numpy as np from scipy import ndimage from skimage import color, draw, feature, img_as_float from skimage.draw import line from skimage.measure import CircleModel, ransac from . import utils logger = logging.getLogger(__name__) def rectangles(coords, shape): """ This function wil provide the indices array for rectangle region of interests. Parameters ---------- coords : iterable coordinates of the upper-left corner and width and height of each rectangle: e.g., [(x, y, w, h), (x, y, w, h)] shape : tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in coords. Order is (rr, cc). """ labels_grid = np.zeros(shape, dtype=np.int64) for i, (col_coor, row_coor, col_val, row_val) in enumerate(coords): left, right = np.max([col_coor, 0]), np.min([col_coor + col_val, shape[0]]) top, bottom = np.max([row_coor, 0]), np.min([row_coor + row_val, shape[1]]) slc1 = slice(left, right) slc2 = slice(top, bottom) if np.any(labels_grid[slc1, slc2]): raise ValueError("overlapping ROIs") # assign a different scalar for each roi labels_grid[slc1, slc2] = i + 1 return labels_grid def rings(edges, center, shape): """ Draw annual (ring-shaped) shaped regions of interest. Each ring will be labeled with an integer. Regions outside any ring will be filled with zeros. Parameters ---------- edges: list giving the inner and outer radius of each ring e.g., [(1, 2), (11, 12), (21, 22)] center: tuple point in image where r=0; may be a float giving subpixel precision. Order is (rr, cc). shape: tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges. """ edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer radii for each ring" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each ring from " "r=0 outward" ) r_coord = utils.radial_grid(center, shape).ravel() return _make_roi(r_coord, edges, shape) def ring_edges(inner_radius, width, spacing=0, num_rings=None): """Calculate the inner and outer radius of a set of rings. The number of rings, their widths, and any spacing between rings can be specified. They can be uniform or varied. Parameters ---------- inner_radius : float inner radius of the inner-most ring width : float or list of floats ring thickness If a float, all rings will have the same thickness. spacing : float or list of floats, optional margin between rings, 0 by default If a float, all rings will have the same spacing. If a list, the length of the list must be one less than the number of rings. num_rings : int, optional number of rings Required if width and spacing are not lists and number cannot thereby be inferred. If it is given and can also be inferred, input is checked for consistency. Returns ------- edges : array inner and outer radius for each ring Examples -------- # Make two rings starting at r=1px, each 5px wide >>> ring_edges(inner_radius=1, width=5, num_rings=2) [(1, 6), (6, 11)] # Make three rings of different widths and spacings. # Since the width and spacings are given individually, the number of # rings here is simply inferred. >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) [(1, 6), (7, 11), (13, 16)] """ # All of this input validation merely checks that width, spacing, and # num_rings are self-consistent and complete. width_is_list = isinstance(width, collections.abc.Iterable) spacing_is_list = isinstance(spacing, collections.abc.Iterable) if width_is_list and spacing_is_list: if len(width) != len(spacing) - 1: raise ValueError("List of spacings must be one less than list " "of widths.") if num_rings is None: try: num_rings = len(width) except TypeError: try: num_rings = len(spacing) + 1 except TypeError: raise ValueError( "Since width and spacing are constant, " "num_rings cannot be inferred and must be " "specified." ) else: if width_is_list: if num_rings != len(width): raise ValueError("num_rings does not match width list") if spacing_is_list: if num_rings - 1 != len(spacing): raise ValueError("num_rings does not match spacing list") # Now regularlize the input. if not width_is_list: width = np.ones(num_rings) * width if not spacing_is_list: spacing = np.ones(num_rings - 1) * spacing # The inner radius is the first "spacing." all_spacings = np.insert(spacing, 0, inner_radius) steps = np.array([all_spacings, width]).T.ravel() edges = np.cumsum(steps).reshape(-1, 2) return edges def segmented_rings(edges, segments, center, shape, offset_angle=0): """ Parameters ---------- edges : array inner and outer radius for each ring segments : int or list number of pie slices or list of angles in radians That is, 8 produces eight equal-sized angular segments, whereas a list can be used to produce segments of unequal size. center : tuple point in image where r=0; may be a float giving subpixel precision. Order is (rr, cc). shape: tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). angle_offset : float or array, optional offset in radians from offset_angle=0 along the positive X axis Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges and segments See Also -------- ring_edges : Calculate the inner and outer radius of a set of rings. """ edges = np.asarray(edges).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer radii for each ring" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each ring from " "r=0 outward" ) agrid = utils.angle_grid(center, shape) agrid[agrid < 0] = 2 * np.pi + agrid[agrid < 0] segments_is_list = isinstance(segments, collections.abc.Iterable) if segments_is_list: segments = np.asarray(segments) + offset_angle else: # N equal segments requires N+1 bin edges spanning 0 to 2pi. segments = np.linspace(0, 2 * np.pi, num=1 + segments, endpoint=True) segments += offset_angle # the indices of the bins(angles) to which each value in input # array(angle_grid) belongs. ind_grid = (np.digitize(np.ravel(agrid), segments, right=False)).reshape(shape) label_array = np.zeros(shape, dtype=np.int64) # radius grid for the image_shape rgrid = utils.radial_grid(center, shape) # assign indices value according to angles then rings len_segments = len(segments) for i in range(len(edges) // 2): indices = (edges[2 * i] <= rgrid) & (rgrid < edges[2 * i + 1]) # Combine "segment #" and "ring #" to get unique label for each. label_array[indices] = ind_grid[indices] + (len_segments - 1) * i return label_array def roi_max_counts(images_sets, label_array): """ Return the brightest pixel in any ROI in any image in the image set. Parameters ---------- images_sets : array iterable of 4D arrays shapes is: (len(images_sets), ) label_array : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). Returns ------- max_counts : int maximum pixel counts """ max_cts = 0 for img_set in images_sets: for img in img_set: max_cts = max(max_cts, ndimage.maximum(img, label_array)) return max_cts def roi_pixel_values(image, labels, index=None): """ This will provide intensities of the ROI's of the labeled array according to the pixel list eg: intensities of the rings of the labeled array Parameters ---------- image : array image data dimensions are: (rr, cc) labels : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). index_list : list, optional labels list eg: 5 ROI's index = [1, 2, 3, 4, 5] Returns ------- roi_pix : list intensities of the ROI's of the labeled array according to the pixel list """ if labels.shape != image.shape: raise ValueError("Shape of the image data should be equal to" " shape of the labeled array") if index is None: index = np.arange(1, np.max(labels) + 1) roi_pix = [] for n in index: roi_pix.append(image[labels == n]) return roi_pix, index def mean_intensity(images, labeled_array, index=None): """Compute the mean intensity for each ROI in the image list Parameters ---------- images : list List of images labeled_array : array labeled array; 0 is background. Each ROI is represented by a nonzero integer. It is not required that the ROI labels are contiguous index : int, list, optional The ROI's to use. If None, this function will extract averages for all ROIs Returns ------- mean_intensity : array The mean intensity of each ROI for all `images` Dimensions: - len(mean_intensity) == len(index) - len(mean_intensity[0]) == len(images) index : list The labels for each element of the `mean_intensity` list """ if labeled_array.shape != images[0].shape[0:]: raise ValueError( "`images` shape (%s) needs to be equal to the labeled_array shape" "(%s)" % (images[0].shape, labeled_array.shape) ) # handle various input for `index` if index is None: index = list(np.unique(labeled_array)) index.remove(0) try: len(index) except TypeError: index = [index] # pre-allocate an array for performance # might be able to use list comprehension to make this faster mean_intensity = np.zeros((images.shape[0], len(index))) for n, img in enumerate(images): # use a mean that is mask-aware mean_intensity[n] = ndimage.mean(img, labeled_array, index=index) return mean_intensity, index def circular_average( image, calibrated_center, threshold=0, nx=100, pixel_size=(1, 1), min_x=None, max_x=None, mask=None ): """Circular average of the the image data The circular average is also known as the radial integration Parameters ---------- image : array Image to compute the average as a function of radius calibrated_center : tuple The center of the image in pixel units argument order should be (row, col) threshold : int, optional Ignore counts below `threshold` default is zero nx : int, optional number of bins in x defaults is 100 bins pixel_size : tuple, optional The size of a pixel (in a real unit, like mm). argument order should be (pixel_height, pixel_width) default is (1, 1) min_x : float, optional number of pixels Left edge of first bin defaults to minimum value of x max_x : float, optional number of pixels Right edge of last bin defaults to maximum value of x mask : mask for 2D data. Assumes 1 is non masked and 0 masked. None defaults to no mask. Returns ------- bin_centers : array The center of each bin in R. shape is (nx, ) ring_averages : array Radial average of the image. shape is (nx, ). See Also -------- bad_to_nan_gen : Create a mask with np.nan entries bin_grid : Bin and integrate an image, given the radial array of pixels Useful for nonlinear spacing (Ewald curvature) """ radial_val = utils.radial_grid(calibrated_center, image.shape, pixel_size) if mask is not None: w = np.where(mask == 1) radial_val = radial_val[w] image = image[w] bin_edges, sums, counts = utils.bin_1D(np.ravel(radial_val), np.ravel(image), nx, min_x=min_x, max_x=max_x) th_mask = counts > threshold ring_averages = sums[th_mask] / counts[th_mask] bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask] return bin_centers, ring_averages def kymograph(images, labels, num): """ This function will provide data for graphical representation of pixels variation over time for required ROI. Parameters ---------- images : array Image stack. dimensions are: (num_img, num_rows, num_cols) labels : array labeled array; 0 is background. Each ROI is represented by an integer num : int The ROI to turn into a kymograph Returns ------- kymograph : array data for graphical representation of pixels variation over time for required ROI """ kymo = [] for n, img in enumerate(images): kymo.append((roi_pixel_values(img, labels == num)[0])) return np.vstack(kymo) def extract_label_indices(labels): """ This will find the label's required region of interests (roi's), number of roi's count the number of pixels in each roi's and pixels list for the required roi's. Parameters ---------- labels : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). Returns ------- label_mask : array 1D array labeling each foreground pixel e.g., [1, 1, 1, 1, 2, 2, 1, 1] indices : array 1D array of indices into the raveled image for all foreground pixels (labeled nonzero) e.g., [5, 6, 7, 8, 14, 15, 21, 22] """ img_dim = labels.shape # TODO Make this tighter. w = np.where(np.ravel(labels) > 0) grid = np.indices((img_dim[0], img_dim[1])) pixel_list = np.ravel((grid[0] * img_dim[1] + grid[1]))[w] # discard the zeros label_mask = labels[labels > 0] return label_mask, pixel_list def _make_roi(coords, edges, shape): """Helper function to create ring rois and bar rois Parameters ---------- coords : array shape is image shape edges : list List of tuples of inner (left or top) and outer (right or bottom) edges of each roi. e.g., edges=[(1, 2), (11, 12), (21, 22)] shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in `edges`. Has shape=`image shape` """ label_array = np.digitize(coords, edges, right=False) # Even elements of label_array are in the space between rings. label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2 return label_array.reshape(shape) def bar(edges, shape, horizontal=True, values=None): """Draw bars defined by `edges` from one edge to the other of `image_shape` Bars will be horizontal or vertical depending on the value of `horizontal` Parameters ---------- edges : list List of tuples of inner (left or top) and outer (right or bottom) edges of each bar. e.g., edges=[(1, 2), (11, 12), (21, 22)] shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) horizontal : bool, optional True: Make horizontal bars False: Make vertical bars Defaults to True values : array, optional image pixels co-ordinates Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in `edges`. Has shape=`image shape` Notes ----- The primary use case is in GISAXS. """ edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer edge value for each bar" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each bar from " "r=0 outward" ) if values is None: values = np.repeat(range(shape[0]), shape[1]) if not horizontal: values = np.tile(range(shape[1]), shape[0]) return _make_roi(values, edges, shape) def box(shape, v_edges, h_edges=None, h_values=None, v_values=None): """Draw box shaped rois when the horizontal and vertical edges are provided. Parameters ---------- shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) v_edges : list giving the inner and outer edges of each vertical bar e.g., [(1, 2), (11, 12), (21, 22)] h_edges : list, optional giving the inner and outer edges of each horizontal bar e.g., [(1, 2), (11, 12), (21, 22)] h_values : array, optional image pixels co-ordinates in horizontal direction shape has to be image shape v_values : array, optional image pixels co-ordinates in vertical direction shape has to be image shape Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges. Notes ----- To draw boxes according to the image pixels co-ordinates has to provide both h_values and v_values. The primary use case is in GISAXS. e.g., v_values=gisaxs_qy, h_values=gisaxs_qx """ if h_edges is None: h_edges = v_edges if h_values is None and v_values is None: v_values, h_values = np.mgrid[: shape[0], : shape[1]] elif h_values.shape != v_values.shape: raise ValueError("Shape of the h_values array should be equal to" " shape of the v_values array") for edges in (h_edges, v_edges): edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer edges for each roi" ) coords = [] for h in h_edges: for v in v_edges: coords.append((h[0], v[0], h[1] - h[0], v[1] - v[0])) return rectangles(coords, v_values.shape) def lines(end_points, shape): """ Parameters ---------- end_points : iterable coordinates of the starting point and the ending point of each line: e.g., [(start_x, start_y, end_x, end_y), (x1, y1, x2, y2)] shape : tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in coords. Order is (rr, cc). """ label_array = np.zeros(shape, dtype=np.int64) label = 0 for points in end_points: if len(points) != 4: raise ValueError( "end points should have four number of" " elements, giving starting co-ordinates," " ending co-ordinates for each line" ) rr, cc = line( np.max([points[0], 0]), np.max([points[1], 0]), np.min([points[2], shape[0] - 1]), np.min([points[3], shape[1] - 1]), ) label += 1 label_array[rr, cc] = label return label_array def auto_find_center_rings(avg_img, sigma=1, no_rings=4, min_samples=3, residual_threshold=1, max_trials=1000): """This will find the center of the speckle pattern and the radii of the most intense rings. Parameters ---------- avg_img : 2D array 2D (grayscale) or 3D (RGB) array. The last dimension of RGB image must be 3. sigma : float, optional Standard deviation of the Gaussian filter. no_rings : int, optional number of rings min_sample : int, optional The minimum number of data points to fit a model to. residual_threshold : float, optional Maximum distance for a data point to be classified as an inlier. max_trials : int, optional Maximum number of iterations for random sample selection. Returns ------- center : tuple center co-ordinates of the speckle pattern image : 2D array Indices of pixels that belong to the rings, directly index into an array radii : list values of the radii of the rings Notes ----- scikit-image ransac method(http://www.imagexd.org/tutorial/lessons/1_ransac.html) is used to automatically find the center and the most intense rings. """ if avg_img.ndim == 3: image_tmp = color.rgb2gray(avg_img) else: image_tmp = avg_img image = img_as_float(image_tmp, force_copy=True) edges = feature.canny(image, sigma) coords = np.column_stack(np.nonzero(edges)) edge_pts_xy = coords[:, ::-1] radii = [] for i in range(no_rings): model_robust, inliers = ransac( edge_pts_xy, CircleModel, min_samples, residual_threshold, max_trials=max_trials ) if i == 0: center = int(model_robust.params[0]), int(model_robust.params[1]) radii.append(model_robust.params[2]) rr, cc = draw.circle_perimeter(center[1], center[0], int(model_robust.params[2]), shape=image.shape) image[rr, cc] = i + 1 edge_pts_xy = edge_pts_xy[~inliers] return center, image, radii
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/roi.py
roi.py
from __future__ import absolute_import, division, print_function import collections import logging import numpy as np from scipy import ndimage from skimage import color, draw, feature, img_as_float from skimage.draw import line from skimage.measure import CircleModel, ransac from . import utils logger = logging.getLogger(__name__) def rectangles(coords, shape): """ This function wil provide the indices array for rectangle region of interests. Parameters ---------- coords : iterable coordinates of the upper-left corner and width and height of each rectangle: e.g., [(x, y, w, h), (x, y, w, h)] shape : tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in coords. Order is (rr, cc). """ labels_grid = np.zeros(shape, dtype=np.int64) for i, (col_coor, row_coor, col_val, row_val) in enumerate(coords): left, right = np.max([col_coor, 0]), np.min([col_coor + col_val, shape[0]]) top, bottom = np.max([row_coor, 0]), np.min([row_coor + row_val, shape[1]]) slc1 = slice(left, right) slc2 = slice(top, bottom) if np.any(labels_grid[slc1, slc2]): raise ValueError("overlapping ROIs") # assign a different scalar for each roi labels_grid[slc1, slc2] = i + 1 return labels_grid def rings(edges, center, shape): """ Draw annual (ring-shaped) shaped regions of interest. Each ring will be labeled with an integer. Regions outside any ring will be filled with zeros. Parameters ---------- edges: list giving the inner and outer radius of each ring e.g., [(1, 2), (11, 12), (21, 22)] center: tuple point in image where r=0; may be a float giving subpixel precision. Order is (rr, cc). shape: tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges. """ edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer radii for each ring" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each ring from " "r=0 outward" ) r_coord = utils.radial_grid(center, shape).ravel() return _make_roi(r_coord, edges, shape) def ring_edges(inner_radius, width, spacing=0, num_rings=None): """Calculate the inner and outer radius of a set of rings. The number of rings, their widths, and any spacing between rings can be specified. They can be uniform or varied. Parameters ---------- inner_radius : float inner radius of the inner-most ring width : float or list of floats ring thickness If a float, all rings will have the same thickness. spacing : float or list of floats, optional margin between rings, 0 by default If a float, all rings will have the same spacing. If a list, the length of the list must be one less than the number of rings. num_rings : int, optional number of rings Required if width and spacing are not lists and number cannot thereby be inferred. If it is given and can also be inferred, input is checked for consistency. Returns ------- edges : array inner and outer radius for each ring Examples -------- # Make two rings starting at r=1px, each 5px wide >>> ring_edges(inner_radius=1, width=5, num_rings=2) [(1, 6), (6, 11)] # Make three rings of different widths and spacings. # Since the width and spacings are given individually, the number of # rings here is simply inferred. >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2)) [(1, 6), (7, 11), (13, 16)] """ # All of this input validation merely checks that width, spacing, and # num_rings are self-consistent and complete. width_is_list = isinstance(width, collections.abc.Iterable) spacing_is_list = isinstance(spacing, collections.abc.Iterable) if width_is_list and spacing_is_list: if len(width) != len(spacing) - 1: raise ValueError("List of spacings must be one less than list " "of widths.") if num_rings is None: try: num_rings = len(width) except TypeError: try: num_rings = len(spacing) + 1 except TypeError: raise ValueError( "Since width and spacing are constant, " "num_rings cannot be inferred and must be " "specified." ) else: if width_is_list: if num_rings != len(width): raise ValueError("num_rings does not match width list") if spacing_is_list: if num_rings - 1 != len(spacing): raise ValueError("num_rings does not match spacing list") # Now regularlize the input. if not width_is_list: width = np.ones(num_rings) * width if not spacing_is_list: spacing = np.ones(num_rings - 1) * spacing # The inner radius is the first "spacing." all_spacings = np.insert(spacing, 0, inner_radius) steps = np.array([all_spacings, width]).T.ravel() edges = np.cumsum(steps).reshape(-1, 2) return edges def segmented_rings(edges, segments, center, shape, offset_angle=0): """ Parameters ---------- edges : array inner and outer radius for each ring segments : int or list number of pie slices or list of angles in radians That is, 8 produces eight equal-sized angular segments, whereas a list can be used to produce segments of unequal size. center : tuple point in image where r=0; may be a float giving subpixel precision. Order is (rr, cc). shape: tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). angle_offset : float or array, optional offset in radians from offset_angle=0 along the positive X axis Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges and segments See Also -------- ring_edges : Calculate the inner and outer radius of a set of rings. """ edges = np.asarray(edges).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer radii for each ring" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each ring from " "r=0 outward" ) agrid = utils.angle_grid(center, shape) agrid[agrid < 0] = 2 * np.pi + agrid[agrid < 0] segments_is_list = isinstance(segments, collections.abc.Iterable) if segments_is_list: segments = np.asarray(segments) + offset_angle else: # N equal segments requires N+1 bin edges spanning 0 to 2pi. segments = np.linspace(0, 2 * np.pi, num=1 + segments, endpoint=True) segments += offset_angle # the indices of the bins(angles) to which each value in input # array(angle_grid) belongs. ind_grid = (np.digitize(np.ravel(agrid), segments, right=False)).reshape(shape) label_array = np.zeros(shape, dtype=np.int64) # radius grid for the image_shape rgrid = utils.radial_grid(center, shape) # assign indices value according to angles then rings len_segments = len(segments) for i in range(len(edges) // 2): indices = (edges[2 * i] <= rgrid) & (rgrid < edges[2 * i + 1]) # Combine "segment #" and "ring #" to get unique label for each. label_array[indices] = ind_grid[indices] + (len_segments - 1) * i return label_array def roi_max_counts(images_sets, label_array): """ Return the brightest pixel in any ROI in any image in the image set. Parameters ---------- images_sets : array iterable of 4D arrays shapes is: (len(images_sets), ) label_array : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). Returns ------- max_counts : int maximum pixel counts """ max_cts = 0 for img_set in images_sets: for img in img_set: max_cts = max(max_cts, ndimage.maximum(img, label_array)) return max_cts def roi_pixel_values(image, labels, index=None): """ This will provide intensities of the ROI's of the labeled array according to the pixel list eg: intensities of the rings of the labeled array Parameters ---------- image : array image data dimensions are: (rr, cc) labels : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). index_list : list, optional labels list eg: 5 ROI's index = [1, 2, 3, 4, 5] Returns ------- roi_pix : list intensities of the ROI's of the labeled array according to the pixel list """ if labels.shape != image.shape: raise ValueError("Shape of the image data should be equal to" " shape of the labeled array") if index is None: index = np.arange(1, np.max(labels) + 1) roi_pix = [] for n in index: roi_pix.append(image[labels == n]) return roi_pix, index def mean_intensity(images, labeled_array, index=None): """Compute the mean intensity for each ROI in the image list Parameters ---------- images : list List of images labeled_array : array labeled array; 0 is background. Each ROI is represented by a nonzero integer. It is not required that the ROI labels are contiguous index : int, list, optional The ROI's to use. If None, this function will extract averages for all ROIs Returns ------- mean_intensity : array The mean intensity of each ROI for all `images` Dimensions: - len(mean_intensity) == len(index) - len(mean_intensity[0]) == len(images) index : list The labels for each element of the `mean_intensity` list """ if labeled_array.shape != images[0].shape[0:]: raise ValueError( "`images` shape (%s) needs to be equal to the labeled_array shape" "(%s)" % (images[0].shape, labeled_array.shape) ) # handle various input for `index` if index is None: index = list(np.unique(labeled_array)) index.remove(0) try: len(index) except TypeError: index = [index] # pre-allocate an array for performance # might be able to use list comprehension to make this faster mean_intensity = np.zeros((images.shape[0], len(index))) for n, img in enumerate(images): # use a mean that is mask-aware mean_intensity[n] = ndimage.mean(img, labeled_array, index=index) return mean_intensity, index def circular_average( image, calibrated_center, threshold=0, nx=100, pixel_size=(1, 1), min_x=None, max_x=None, mask=None ): """Circular average of the the image data The circular average is also known as the radial integration Parameters ---------- image : array Image to compute the average as a function of radius calibrated_center : tuple The center of the image in pixel units argument order should be (row, col) threshold : int, optional Ignore counts below `threshold` default is zero nx : int, optional number of bins in x defaults is 100 bins pixel_size : tuple, optional The size of a pixel (in a real unit, like mm). argument order should be (pixel_height, pixel_width) default is (1, 1) min_x : float, optional number of pixels Left edge of first bin defaults to minimum value of x max_x : float, optional number of pixels Right edge of last bin defaults to maximum value of x mask : mask for 2D data. Assumes 1 is non masked and 0 masked. None defaults to no mask. Returns ------- bin_centers : array The center of each bin in R. shape is (nx, ) ring_averages : array Radial average of the image. shape is (nx, ). See Also -------- bad_to_nan_gen : Create a mask with np.nan entries bin_grid : Bin and integrate an image, given the radial array of pixels Useful for nonlinear spacing (Ewald curvature) """ radial_val = utils.radial_grid(calibrated_center, image.shape, pixel_size) if mask is not None: w = np.where(mask == 1) radial_val = radial_val[w] image = image[w] bin_edges, sums, counts = utils.bin_1D(np.ravel(radial_val), np.ravel(image), nx, min_x=min_x, max_x=max_x) th_mask = counts > threshold ring_averages = sums[th_mask] / counts[th_mask] bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask] return bin_centers, ring_averages def kymograph(images, labels, num): """ This function will provide data for graphical representation of pixels variation over time for required ROI. Parameters ---------- images : array Image stack. dimensions are: (num_img, num_rows, num_cols) labels : array labeled array; 0 is background. Each ROI is represented by an integer num : int The ROI to turn into a kymograph Returns ------- kymograph : array data for graphical representation of pixels variation over time for required ROI """ kymo = [] for n, img in enumerate(images): kymo.append((roi_pixel_values(img, labels == num)[0])) return np.vstack(kymo) def extract_label_indices(labels): """ This will find the label's required region of interests (roi's), number of roi's count the number of pixels in each roi's and pixels list for the required roi's. Parameters ---------- labels : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). Returns ------- label_mask : array 1D array labeling each foreground pixel e.g., [1, 1, 1, 1, 2, 2, 1, 1] indices : array 1D array of indices into the raveled image for all foreground pixels (labeled nonzero) e.g., [5, 6, 7, 8, 14, 15, 21, 22] """ img_dim = labels.shape # TODO Make this tighter. w = np.where(np.ravel(labels) > 0) grid = np.indices((img_dim[0], img_dim[1])) pixel_list = np.ravel((grid[0] * img_dim[1] + grid[1]))[w] # discard the zeros label_mask = labels[labels > 0] return label_mask, pixel_list def _make_roi(coords, edges, shape): """Helper function to create ring rois and bar rois Parameters ---------- coords : array shape is image shape edges : list List of tuples of inner (left or top) and outer (right or bottom) edges of each roi. e.g., edges=[(1, 2), (11, 12), (21, 22)] shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in `edges`. Has shape=`image shape` """ label_array = np.digitize(coords, edges, right=False) # Even elements of label_array are in the space between rings. label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2 return label_array.reshape(shape) def bar(edges, shape, horizontal=True, values=None): """Draw bars defined by `edges` from one edge to the other of `image_shape` Bars will be horizontal or vertical depending on the value of `horizontal` Parameters ---------- edges : list List of tuples of inner (left or top) and outer (right or bottom) edges of each bar. e.g., edges=[(1, 2), (11, 12), (21, 22)] shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) horizontal : bool, optional True: Make horizontal bars False: Make vertical bars Defaults to True values : array, optional image pixels co-ordinates Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in `edges`. Has shape=`image shape` Notes ----- The primary use case is in GISAXS. """ edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer edge value for each bar" ) if not np.all(np.diff(edges) >= 0): raise ValueError( "edges are expected to be monotonically increasing, " "giving inner and outer radii of each bar from " "r=0 outward" ) if values is None: values = np.repeat(range(shape[0]), shape[1]) if not horizontal: values = np.tile(range(shape[1]), shape[0]) return _make_roi(values, edges, shape) def box(shape, v_edges, h_edges=None, h_values=None, v_values=None): """Draw box shaped rois when the horizontal and vertical edges are provided. Parameters ---------- shape : tuple Shape of the image in which to create the ROIs e.g., shape=(512, 512) v_edges : list giving the inner and outer edges of each vertical bar e.g., [(1, 2), (11, 12), (21, 22)] h_edges : list, optional giving the inner and outer edges of each horizontal bar e.g., [(1, 2), (11, 12), (21, 22)] h_values : array, optional image pixels co-ordinates in horizontal direction shape has to be image shape v_values : array, optional image pixels co-ordinates in vertical direction shape has to be image shape Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in edges. Notes ----- To draw boxes according to the image pixels co-ordinates has to provide both h_values and v_values. The primary use case is in GISAXS. e.g., v_values=gisaxs_qy, h_values=gisaxs_qx """ if h_edges is None: h_edges = v_edges if h_values is None and v_values is None: v_values, h_values = np.mgrid[: shape[0], : shape[1]] elif h_values.shape != v_values.shape: raise ValueError("Shape of the h_values array should be equal to" " shape of the v_values array") for edges in (h_edges, v_edges): edges = np.atleast_2d(np.asarray(edges)).ravel() if not 0 == len(edges) % 2: raise ValueError( "edges should have an even number of elements, " "giving inner, outer edges for each roi" ) coords = [] for h in h_edges: for v in v_edges: coords.append((h[0], v[0], h[1] - h[0], v[1] - v[0])) return rectangles(coords, v_values.shape) def lines(end_points, shape): """ Parameters ---------- end_points : iterable coordinates of the starting point and the ending point of each line: e.g., [(start_x, start_y, end_x, end_y), (x1, y1, x2, y2)] shape : tuple Image shape which is used to determine the maximum extent of output pixel coordinates. Order is (rr, cc). Returns ------- label_array : array Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding to the order they are specified in coords. Order is (rr, cc). """ label_array = np.zeros(shape, dtype=np.int64) label = 0 for points in end_points: if len(points) != 4: raise ValueError( "end points should have four number of" " elements, giving starting co-ordinates," " ending co-ordinates for each line" ) rr, cc = line( np.max([points[0], 0]), np.max([points[1], 0]), np.min([points[2], shape[0] - 1]), np.min([points[3], shape[1] - 1]), ) label += 1 label_array[rr, cc] = label return label_array def auto_find_center_rings(avg_img, sigma=1, no_rings=4, min_samples=3, residual_threshold=1, max_trials=1000): """This will find the center of the speckle pattern and the radii of the most intense rings. Parameters ---------- avg_img : 2D array 2D (grayscale) or 3D (RGB) array. The last dimension of RGB image must be 3. sigma : float, optional Standard deviation of the Gaussian filter. no_rings : int, optional number of rings min_sample : int, optional The minimum number of data points to fit a model to. residual_threshold : float, optional Maximum distance for a data point to be classified as an inlier. max_trials : int, optional Maximum number of iterations for random sample selection. Returns ------- center : tuple center co-ordinates of the speckle pattern image : 2D array Indices of pixels that belong to the rings, directly index into an array radii : list values of the radii of the rings Notes ----- scikit-image ransac method(http://www.imagexd.org/tutorial/lessons/1_ransac.html) is used to automatically find the center and the most intense rings. """ if avg_img.ndim == 3: image_tmp = color.rgb2gray(avg_img) else: image_tmp = avg_img image = img_as_float(image_tmp, force_copy=True) edges = feature.canny(image, sigma) coords = np.column_stack(np.nonzero(edges)) edge_pts_xy = coords[:, ::-1] radii = [] for i in range(no_rings): model_robust, inliers = ransac( edge_pts_xy, CircleModel, min_samples, residual_threshold, max_trials=max_trials ) if i == 0: center = int(model_robust.params[0]), int(model_robust.params[1]) radii.append(model_robust.params[2]) rr, cc = draw.circle_perimeter(center[1], center[0], int(model_robust.params[2]), shape=image.shape) image[rr, cc] = i + 1 edge_pts_xy = edge_pts_xy[~inliers] return center, image, radii
0.952153
0.512693
from __future__ import absolute_import, division, print_function import logging import warnings from collections import namedtuple import numpy as np from scipy.optimize import minimize logger = logging.getLogger(__name__) def image_reduction(im, roi=None, bad_pixels=None): """ Sum the image data over rows and columns. Parameters ---------- im : ndarray Input image. roi : ndarray, optional [r, c, row, col], selects ROI im[r : r + row, c : c + col]. Default is None, which uses the whole image. bad_pixels : list, optional List of (row, column) tuples marking bad pixels. [(1, 5), (2, 6)] --> 2 bad pixels --> (1, 5) and (2, 6). Default is None. Returns ------- xline : ndarray The row vector of the sums of each column. yline : ndarray The column vector of the sums of each row. """ if bad_pixels: im = im.copy() for row, column in bad_pixels: im[row, column] = 0 if roi: r, c, row, col = roi im = im[r : (r + row), c : (c + col)] xline = np.sum(im, axis=0) yline = np.sum(im, axis=1) return xline, yline def _rss_factory(length): """ A factory function for returning a residue function for use in dpc fitting. The main reason to do this is to generate a closure over beta so that linspace is only called once. Parameters ---------- length : int The length of the data vector that the returned function can deal with. Returns ------- function A function with signature f(v, xdata, ydata) which is suitable for use as a cost function for use with scipy.optimize. """ beta = 1j * (np.linspace(-(length - 1) // 2, (length - 1) // 2, length)) def _rss(v, ref_reduction, diff_reduction): """ Internal function used by fit() Cost function to be minimized in nonlinear fitting Parameters ---------- v : list Fit parameters. v[0], amplitude of the sample transmission function at one scanning point; v[1], the phase gradient (along x or y direction) of the sample transmission function. ref_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of the reference image data along x or y direction. diff_refuction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of one captured diffraction pattern along x or y direction. Returns -------- float Residue value. """ diff = diff_reduction - ref_reduction * v[0] * np.exp(v[1] * beta) return np.sum((diff * np.conj(diff)).real) return _rss def dpc_fit(rss, ref_reduction, diff_reduction, start_point, solver="Nelder-Mead", tol=1e-6, max_iters=2000): """ Nonlinear fitting for 2 points. Parameters ---------- rss : callable Objective function to be minimized in DPC fitting. ref_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of the reference image data along x or y direction. diff_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of one captured diffraction pattern along x or y direction. start_point : list start_point[0], start-searching value for the amplitude of the sample transmission function at one scanning point. start_point[1], start-searching value for the phase gradient (along x or y direction) of the sample transmission function at one scanning point. solver : str, optional Type of solver, one of the following (default 'Nelder-Mead'): * 'Nelder-Mead' * 'Powell' * 'CG' * 'BFGS' * 'Anneal' * 'L-BFGS-B' * 'TNC' * 'COBYLA' * 'SLSQP' tol : float, optional Termination criteria of nonlinear fitting. Default is 1e-6. max_iters : int, optional Maximum iterations of nonlinear fitting. Default is 2000. Returns ------- tuple Fitting result: intensity attenuation and phase gradient. """ return minimize( rss, start_point, args=(ref_reduction, diff_reduction), method=solver, tol=tol, options=dict(maxiter=max_iters), ).x # attributes dpc_fit.solver = ["Nelder-Mead", "Powell", "CG", "BFGS", "Anneal", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"] def recon(gx, gy, scan_xstep, scan_ystep, padding=0, weighting=0.5): """Reconstruct the final phase image. Parameters ---------- gx : ndarray Phase gradient along x direction. gy : ndarray Phase gradient along y direction. scan_xstep : float Scanning step size in x direction (in micro-meter). scan_ystep : float Scanning step size in y direction (in micro-meter). padding : int, optional Pad a N-by-M array to be a ``(N*(2*padding+1))``-by-``(M*(2*padding+1))`` array with the image in the middle with a (N*padding, M*padding) thick edge of zeros. Default is 0. padding = 0 --> v (the original image, size = (N, M)) 0 0 0 padding = 1 --> 0 v 0 (the padded image, size = (3 * N, 3 * M)) 0 0 0 weighting : float, optional Weighting parameter for the phase gradient along x and y direction when constructing the final phase image. Valid in [0, 1]. Default value = 0.5, which means that gx and gy equally contribute to the final phase image. Returns ------- phase : ndarray Final phase image. """ if weighting < 0 or weighting > 1: raise ValueError("weighting should be within the range of [0, 1]!") pad = 2 * padding + 1 gx = np.asarray(gx) rows, cols = gx.shape pad_row = rows * pad pad_col = cols * pad gx_padding = np.zeros((pad_row, pad_col), dtype="d") gy_padding = np.zeros((pad_row, pad_col), dtype="d") roi_slice = (slice(padding * rows, (padding + 1) * rows), slice(padding * cols, (padding + 1) * cols)) gx_padding[roi_slice] = gx gy_padding[roi_slice] = gy tx = np.fft.fftshift(np.fft.fft2(gx_padding)) ty = np.fft.fftshift(np.fft.fft2(gy_padding)) mid_col = pad_col // 2 + 1 mid_row = pad_row // 2 + 1 ax = 2 * np.pi * np.arange(1 - mid_col, pad_col - mid_col + 1) / (pad_col * scan_xstep) ay = 2 * np.pi * np.arange(1 - mid_row, pad_row - mid_row + 1) / (pad_row * scan_ystep) kappax, kappay = np.meshgrid(ax, ay) div_v = kappax**2 * (1 - weighting) + kappay**2 * weighting with warnings.catch_warnings(): # It appears that having nans in data arrays is normal mode of # operation for this function. So let's disable warnings. warnings.filterwarnings("ignore", category=RuntimeWarning) c = -1j * (kappax * tx * (1 - weighting) + kappay * ty * weighting) / div_v c = np.fft.ifftshift(np.where(div_v == 0, 0, c)) phase = np.fft.ifft2(c)[roi_slice].real return phase # holy hacks, Batman! 'index' here is a single element list so # that I can keep track of how many images have been computed dpc_internal_state = namedtuple("dpc_internal_state", ["ax", "ay", "gx", "gy", "ref_fx", "ref_fy", "index"]) def dpc_runner( ref, image_sequence, start_point, pixel_size, focus_to_det, scan_rows, scan_cols, scan_xstep, scan_ystep, energy, padding=0, weighting=0.5, solver="Nelder-Mead", roi=None, bad_pixels=None, negate=True, scale=True, ): """Wraps `lazy_dpc` See docstring for `lazy_dpc` and `reconstruct_phase_from_partial_info` for the input parameters for this function and what it returns """ if len(pixel_size) == 2: # make sure the pixels are the same size if pixel_size[0] != pixel_size[1]: raise ValueError("In DPC, pixels must be square. You provided" "pixel values of {}".format(pixel_size)) dpc_gen = lazy_dpc(ref, image_sequence, start_point, scan_rows, scan_cols, solver, roi, bad_pixels) # exhaust the generator, keeping only the last result for dpc_state in dpc_gen: pass # compute the final results phase, amplitude = reconstruct_phase_from_partial_info( dpc_state, energy, scan_xstep, scan_ystep, pixel_size[0], focus_to_det, negate, scale, padding, weighting ) return phase, amplitude def lazy_dpc( ref, image_sequence, start_point, scan_rows, scan_cols, solver="Nelder-Mead", roi=None, bad_pixels=None, dpc_state=None, ): """ Controller function to run the whole Differential Phase Contrast (DPC) imaging calculation. Parameters ---------- ref : ndarray The reference image for a DPC calculation. image_sequence : iterable of 2D arrays Return diffraction patterns (2D Numpy arrays) when iterated over. start_point : list start_point[0], start-searching value for the amplitude of the sample transmission function at one scanning point. start_point[1], start-searching value for the phase gradient (along x or y direction) of the sample transmission function at one scanning point. scan_rows : int Number of scanned rows. scan_cols : int Number of scanned columns. solver : str, optional Type of solver, one of the following (default 'Nelder-Mead'): * 'Nelder-Mead' * 'Powell' * 'CG' * 'BFGS' * 'Anneal' * 'L-BFGS-B' * 'TNC' * 'COBYLA' * 'SLSQP' roi : ndarray, optional [r, c, row, col], selects ROI im[r : r + row, c : c + col]. Default is None. bad_pixels : list, optional List of (row, column) tuples marking bad pixels. [(1, 5), (2, 6)] --> 2 bad pixels --> (1, 5) and (2, 6). Default is None. Yields ------ dpc_state : namedtuple The internal state that `dpc_runner` requires for each iteration. Can be passed to reconstruct_phase_from_partial_info which, along with some additional info, will produce the final phase image References: text [1]_ .. [1] Yan, H. et al. Quantitative x-ray phase imaging at the nanoscale by multilayer Laue lenses. Sci. Rep. 3, 1307; DOI:10.1038/srep01307 (2013). """ def initialize_state(scan_rows, scan_cols, ref, roi, bad_pixels): # Initialize ax, ay, gx, and gy ax = np.zeros((scan_rows, scan_cols), dtype="d") ay = np.zeros((scan_rows, scan_cols), dtype="d") gx = np.zeros((scan_rows, scan_cols), dtype="d") gy = np.zeros((scan_rows, scan_cols), dtype="d") # Dimension reduction along x and y direction refx, refy = image_reduction(ref, roi, bad_pixels) ref_fx = np.fft.fftshift(np.fft.ifft(refx)) ref_fy = np.fft.fftshift(np.fft.ifft(refy)) return dpc_internal_state(ax, ay, gx, gy, ref_fx, ref_fy, [0]) if dpc_state is None: dpc_state = initialize_state(scan_rows, scan_cols, ref, roi, bad_pixels) # 1-D IFFT ffx = _rss_factory(len(dpc_state.ref_fx)) ffy = _rss_factory(len(dpc_state.ref_fy)) # Same calculation on each diffraction pattern for im in image_sequence: i, j = np.unravel_index(dpc_state.index[0], (scan_rows, scan_cols)) # Dimension reduction along x and y direction imx, imy = image_reduction(im, roi, bad_pixels) # 1-D IFFT fx = np.fft.fftshift(np.fft.ifft(imx)) fy = np.fft.fftshift(np.fft.ifft(imy)) # Nonlinear fitting _ax, _gx = dpc_fit(ffx, dpc_state.ref_fx, fx, start_point, solver) _ay, _gy = dpc_fit(ffy, dpc_state.ref_fy, fy, start_point, solver) # Store one-point intermediate results dpc_state.gx[i, j] = _gx dpc_state.gy[i, j] = _gy dpc_state.ax[i, j] = _ax dpc_state.ay[i, j] = _ay dpc_state.index[0] += 1 yield dpc_state def reconstruct_phase_from_partial_info( dpc_state, energy, scan_xstep, scan_ystep, pixel_size=None, focus_to_det=None, negate=True, scale=True, padding=0, weighting=0.5, ): """Using the partial results from dpc_runner, reconstruct the phase image Parameters ---------- dpc_state : namedtuple The thing yielded from `dpc_runner` energy : float Energy of the scanning x-ray in keV. focus_to_det : float Focus to detector distance in um. scan_xstep : float Scanning step size in x direction (in micro-meter). scan_ystep : float Scanning step size in y direction (in micro-meter). pixel_size : Number, optional The size of the detector pixels. Pixels must be square. If `pixel_size and `focus_to_det` are provided, it is assumed that you want to scale the image. focus_to_det : Number, optional The distance from the focal point of the beam to the detector. Must be provided as a pair with `pixel_size`. negate : bool, optional If True (default), negate the phase gradient along x direction before reconstructing the final phase image. Default is True. scale : bool, optional If True, scale gx and gy according to the experiment set up. If False, ignore pixel_size, focus_to_det, energy. Default is True. padding : int, optional Pad a N-by-M array to be a ``(N*(2*padding+1))``-by-``(M*(2*padding+1))`` array with the image in the middle with a (N*padding, M*padding) thick edge of zeros. Default is 0. padding = 0 --> v (the original image, size = (N, M)) 0 0 0 padding = 1 --> 0 v 0 (the padded image, size = (3 * N, 3 * M)) 0 0 0 weighting : float, optional Weighting parameter for the phase gradient along x and y direction when constructing the final phase image. Valid in [0, 1]. Default value = 0.5, which means that gx and gy equally contribute to the final phase image. Returns ------- phase : ndarray The final reconstructed phase image. amplitude : ndarray Amplitude of the sample transmission function. """ if weighting < 0 or weighting > 1: raise ValueError("weighting should be within the range of [0, 1]!") gx = None gy = dpc_state.gy if pixel_size and focus_to_det: # Convert to wavelength lambda_ = 12.4e-4 / energy # pre-compute the scaling factor scale = pixel_size / (lambda_ * focus_to_det) gx = dpc_state.gx * len(dpc_state.ref_fx) * scale gy = dpc_state.gy * len(dpc_state.ref_fy) * scale if negate: if gx is not None: gx *= -1 else: gx = dpc_state.gx * -1 # Reconstruct the final phase image phase = recon(gx, gy, scan_xstep, scan_ystep, padding, weighting) return phase, (dpc_state.ax + dpc_state.ay) / 2 # attributes dpc_runner.solver = ["Nelder-Mead", "Powell", "CG", "BFGS", "Anneal", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"]
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/dpc.py
dpc.py
from __future__ import absolute_import, division, print_function import logging import warnings from collections import namedtuple import numpy as np from scipy.optimize import minimize logger = logging.getLogger(__name__) def image_reduction(im, roi=None, bad_pixels=None): """ Sum the image data over rows and columns. Parameters ---------- im : ndarray Input image. roi : ndarray, optional [r, c, row, col], selects ROI im[r : r + row, c : c + col]. Default is None, which uses the whole image. bad_pixels : list, optional List of (row, column) tuples marking bad pixels. [(1, 5), (2, 6)] --> 2 bad pixels --> (1, 5) and (2, 6). Default is None. Returns ------- xline : ndarray The row vector of the sums of each column. yline : ndarray The column vector of the sums of each row. """ if bad_pixels: im = im.copy() for row, column in bad_pixels: im[row, column] = 0 if roi: r, c, row, col = roi im = im[r : (r + row), c : (c + col)] xline = np.sum(im, axis=0) yline = np.sum(im, axis=1) return xline, yline def _rss_factory(length): """ A factory function for returning a residue function for use in dpc fitting. The main reason to do this is to generate a closure over beta so that linspace is only called once. Parameters ---------- length : int The length of the data vector that the returned function can deal with. Returns ------- function A function with signature f(v, xdata, ydata) which is suitable for use as a cost function for use with scipy.optimize. """ beta = 1j * (np.linspace(-(length - 1) // 2, (length - 1) // 2, length)) def _rss(v, ref_reduction, diff_reduction): """ Internal function used by fit() Cost function to be minimized in nonlinear fitting Parameters ---------- v : list Fit parameters. v[0], amplitude of the sample transmission function at one scanning point; v[1], the phase gradient (along x or y direction) of the sample transmission function. ref_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of the reference image data along x or y direction. diff_refuction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of one captured diffraction pattern along x or y direction. Returns -------- float Residue value. """ diff = diff_reduction - ref_reduction * v[0] * np.exp(v[1] * beta) return np.sum((diff * np.conj(diff)).real) return _rss def dpc_fit(rss, ref_reduction, diff_reduction, start_point, solver="Nelder-Mead", tol=1e-6, max_iters=2000): """ Nonlinear fitting for 2 points. Parameters ---------- rss : callable Objective function to be minimized in DPC fitting. ref_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of the reference image data along x or y direction. diff_reduction : ndarray Extra argument passed to the objective function. In DPC, it's the sum of one captured diffraction pattern along x or y direction. start_point : list start_point[0], start-searching value for the amplitude of the sample transmission function at one scanning point. start_point[1], start-searching value for the phase gradient (along x or y direction) of the sample transmission function at one scanning point. solver : str, optional Type of solver, one of the following (default 'Nelder-Mead'): * 'Nelder-Mead' * 'Powell' * 'CG' * 'BFGS' * 'Anneal' * 'L-BFGS-B' * 'TNC' * 'COBYLA' * 'SLSQP' tol : float, optional Termination criteria of nonlinear fitting. Default is 1e-6. max_iters : int, optional Maximum iterations of nonlinear fitting. Default is 2000. Returns ------- tuple Fitting result: intensity attenuation and phase gradient. """ return minimize( rss, start_point, args=(ref_reduction, diff_reduction), method=solver, tol=tol, options=dict(maxiter=max_iters), ).x # attributes dpc_fit.solver = ["Nelder-Mead", "Powell", "CG", "BFGS", "Anneal", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"] def recon(gx, gy, scan_xstep, scan_ystep, padding=0, weighting=0.5): """Reconstruct the final phase image. Parameters ---------- gx : ndarray Phase gradient along x direction. gy : ndarray Phase gradient along y direction. scan_xstep : float Scanning step size in x direction (in micro-meter). scan_ystep : float Scanning step size in y direction (in micro-meter). padding : int, optional Pad a N-by-M array to be a ``(N*(2*padding+1))``-by-``(M*(2*padding+1))`` array with the image in the middle with a (N*padding, M*padding) thick edge of zeros. Default is 0. padding = 0 --> v (the original image, size = (N, M)) 0 0 0 padding = 1 --> 0 v 0 (the padded image, size = (3 * N, 3 * M)) 0 0 0 weighting : float, optional Weighting parameter for the phase gradient along x and y direction when constructing the final phase image. Valid in [0, 1]. Default value = 0.5, which means that gx and gy equally contribute to the final phase image. Returns ------- phase : ndarray Final phase image. """ if weighting < 0 or weighting > 1: raise ValueError("weighting should be within the range of [0, 1]!") pad = 2 * padding + 1 gx = np.asarray(gx) rows, cols = gx.shape pad_row = rows * pad pad_col = cols * pad gx_padding = np.zeros((pad_row, pad_col), dtype="d") gy_padding = np.zeros((pad_row, pad_col), dtype="d") roi_slice = (slice(padding * rows, (padding + 1) * rows), slice(padding * cols, (padding + 1) * cols)) gx_padding[roi_slice] = gx gy_padding[roi_slice] = gy tx = np.fft.fftshift(np.fft.fft2(gx_padding)) ty = np.fft.fftshift(np.fft.fft2(gy_padding)) mid_col = pad_col // 2 + 1 mid_row = pad_row // 2 + 1 ax = 2 * np.pi * np.arange(1 - mid_col, pad_col - mid_col + 1) / (pad_col * scan_xstep) ay = 2 * np.pi * np.arange(1 - mid_row, pad_row - mid_row + 1) / (pad_row * scan_ystep) kappax, kappay = np.meshgrid(ax, ay) div_v = kappax**2 * (1 - weighting) + kappay**2 * weighting with warnings.catch_warnings(): # It appears that having nans in data arrays is normal mode of # operation for this function. So let's disable warnings. warnings.filterwarnings("ignore", category=RuntimeWarning) c = -1j * (kappax * tx * (1 - weighting) + kappay * ty * weighting) / div_v c = np.fft.ifftshift(np.where(div_v == 0, 0, c)) phase = np.fft.ifft2(c)[roi_slice].real return phase # holy hacks, Batman! 'index' here is a single element list so # that I can keep track of how many images have been computed dpc_internal_state = namedtuple("dpc_internal_state", ["ax", "ay", "gx", "gy", "ref_fx", "ref_fy", "index"]) def dpc_runner( ref, image_sequence, start_point, pixel_size, focus_to_det, scan_rows, scan_cols, scan_xstep, scan_ystep, energy, padding=0, weighting=0.5, solver="Nelder-Mead", roi=None, bad_pixels=None, negate=True, scale=True, ): """Wraps `lazy_dpc` See docstring for `lazy_dpc` and `reconstruct_phase_from_partial_info` for the input parameters for this function and what it returns """ if len(pixel_size) == 2: # make sure the pixels are the same size if pixel_size[0] != pixel_size[1]: raise ValueError("In DPC, pixels must be square. You provided" "pixel values of {}".format(pixel_size)) dpc_gen = lazy_dpc(ref, image_sequence, start_point, scan_rows, scan_cols, solver, roi, bad_pixels) # exhaust the generator, keeping only the last result for dpc_state in dpc_gen: pass # compute the final results phase, amplitude = reconstruct_phase_from_partial_info( dpc_state, energy, scan_xstep, scan_ystep, pixel_size[0], focus_to_det, negate, scale, padding, weighting ) return phase, amplitude def lazy_dpc( ref, image_sequence, start_point, scan_rows, scan_cols, solver="Nelder-Mead", roi=None, bad_pixels=None, dpc_state=None, ): """ Controller function to run the whole Differential Phase Contrast (DPC) imaging calculation. Parameters ---------- ref : ndarray The reference image for a DPC calculation. image_sequence : iterable of 2D arrays Return diffraction patterns (2D Numpy arrays) when iterated over. start_point : list start_point[0], start-searching value for the amplitude of the sample transmission function at one scanning point. start_point[1], start-searching value for the phase gradient (along x or y direction) of the sample transmission function at one scanning point. scan_rows : int Number of scanned rows. scan_cols : int Number of scanned columns. solver : str, optional Type of solver, one of the following (default 'Nelder-Mead'): * 'Nelder-Mead' * 'Powell' * 'CG' * 'BFGS' * 'Anneal' * 'L-BFGS-B' * 'TNC' * 'COBYLA' * 'SLSQP' roi : ndarray, optional [r, c, row, col], selects ROI im[r : r + row, c : c + col]. Default is None. bad_pixels : list, optional List of (row, column) tuples marking bad pixels. [(1, 5), (2, 6)] --> 2 bad pixels --> (1, 5) and (2, 6). Default is None. Yields ------ dpc_state : namedtuple The internal state that `dpc_runner` requires for each iteration. Can be passed to reconstruct_phase_from_partial_info which, along with some additional info, will produce the final phase image References: text [1]_ .. [1] Yan, H. et al. Quantitative x-ray phase imaging at the nanoscale by multilayer Laue lenses. Sci. Rep. 3, 1307; DOI:10.1038/srep01307 (2013). """ def initialize_state(scan_rows, scan_cols, ref, roi, bad_pixels): # Initialize ax, ay, gx, and gy ax = np.zeros((scan_rows, scan_cols), dtype="d") ay = np.zeros((scan_rows, scan_cols), dtype="d") gx = np.zeros((scan_rows, scan_cols), dtype="d") gy = np.zeros((scan_rows, scan_cols), dtype="d") # Dimension reduction along x and y direction refx, refy = image_reduction(ref, roi, bad_pixels) ref_fx = np.fft.fftshift(np.fft.ifft(refx)) ref_fy = np.fft.fftshift(np.fft.ifft(refy)) return dpc_internal_state(ax, ay, gx, gy, ref_fx, ref_fy, [0]) if dpc_state is None: dpc_state = initialize_state(scan_rows, scan_cols, ref, roi, bad_pixels) # 1-D IFFT ffx = _rss_factory(len(dpc_state.ref_fx)) ffy = _rss_factory(len(dpc_state.ref_fy)) # Same calculation on each diffraction pattern for im in image_sequence: i, j = np.unravel_index(dpc_state.index[0], (scan_rows, scan_cols)) # Dimension reduction along x and y direction imx, imy = image_reduction(im, roi, bad_pixels) # 1-D IFFT fx = np.fft.fftshift(np.fft.ifft(imx)) fy = np.fft.fftshift(np.fft.ifft(imy)) # Nonlinear fitting _ax, _gx = dpc_fit(ffx, dpc_state.ref_fx, fx, start_point, solver) _ay, _gy = dpc_fit(ffy, dpc_state.ref_fy, fy, start_point, solver) # Store one-point intermediate results dpc_state.gx[i, j] = _gx dpc_state.gy[i, j] = _gy dpc_state.ax[i, j] = _ax dpc_state.ay[i, j] = _ay dpc_state.index[0] += 1 yield dpc_state def reconstruct_phase_from_partial_info( dpc_state, energy, scan_xstep, scan_ystep, pixel_size=None, focus_to_det=None, negate=True, scale=True, padding=0, weighting=0.5, ): """Using the partial results from dpc_runner, reconstruct the phase image Parameters ---------- dpc_state : namedtuple The thing yielded from `dpc_runner` energy : float Energy of the scanning x-ray in keV. focus_to_det : float Focus to detector distance in um. scan_xstep : float Scanning step size in x direction (in micro-meter). scan_ystep : float Scanning step size in y direction (in micro-meter). pixel_size : Number, optional The size of the detector pixels. Pixels must be square. If `pixel_size and `focus_to_det` are provided, it is assumed that you want to scale the image. focus_to_det : Number, optional The distance from the focal point of the beam to the detector. Must be provided as a pair with `pixel_size`. negate : bool, optional If True (default), negate the phase gradient along x direction before reconstructing the final phase image. Default is True. scale : bool, optional If True, scale gx and gy according to the experiment set up. If False, ignore pixel_size, focus_to_det, energy. Default is True. padding : int, optional Pad a N-by-M array to be a ``(N*(2*padding+1))``-by-``(M*(2*padding+1))`` array with the image in the middle with a (N*padding, M*padding) thick edge of zeros. Default is 0. padding = 0 --> v (the original image, size = (N, M)) 0 0 0 padding = 1 --> 0 v 0 (the padded image, size = (3 * N, 3 * M)) 0 0 0 weighting : float, optional Weighting parameter for the phase gradient along x and y direction when constructing the final phase image. Valid in [0, 1]. Default value = 0.5, which means that gx and gy equally contribute to the final phase image. Returns ------- phase : ndarray The final reconstructed phase image. amplitude : ndarray Amplitude of the sample transmission function. """ if weighting < 0 or weighting > 1: raise ValueError("weighting should be within the range of [0, 1]!") gx = None gy = dpc_state.gy if pixel_size and focus_to_det: # Convert to wavelength lambda_ = 12.4e-4 / energy # pre-compute the scaling factor scale = pixel_size / (lambda_ * focus_to_det) gx = dpc_state.gx * len(dpc_state.ref_fx) * scale gy = dpc_state.gy * len(dpc_state.ref_fy) * scale if negate: if gx is not None: gx *= -1 else: gx = dpc_state.gx * -1 # Reconstruct the final phase image phase = recon(gx, gy, scan_xstep, scan_ystep, padding, weighting) return phase, (dpc_state.ax + dpc_state.ay) / 2 # attributes dpc_runner.solver = ["Nelder-Mead", "Powell", "CG", "BFGS", "Anneal", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"]
0.950428
0.633821
from __future__ import absolute_import, division, print_function import logging import time import numpy as np from scipy.ndimage import gaussian_filter logger = logging.getLogger(__name__) def _dist(dims): """ Create array with pixel value equals to the distance from array center. Parameters ---------- dims : list or tuple shape of array to create Returns ------- arr : np.ndarray ND array whose pixels are equal to the distance from the center of the array of shape `dims` """ dist_sum = [] dist_sum = np.zeros(dims, dtype=np.float64) for idx, d in enumerate(dims): shape = np.ones(len(dims), dtype=np.int64) shape[idx] = d vec = (np.arange(d) - d // 2) ** 2 vec = np.broadcast_to(np.reshape(vec, newshape=shape), dims) dist_sum += vec return np.sqrt(dist_sum) def gauss(dims, sigma): """ Generate Gaussian function in 2D or 3D. Parameters ---------- dims : list or tuple shape of the data sigma : float standard deviation of gaussian function Returns ------- arr : array ND gaussian """ x = _dist(dims) y = np.exp(-((x / sigma) ** 2) / 2) return y / np.sum(y) def pi_modulus(recon_pattern, diffracted_pattern, offset_v=1e-12): """ Transfer sample from real space to q space. Use constraint based on diffraction pattern from experiments. Parameters ---------- recon_pattern : array reconstructed pattern in real space diffracted_pattern : array diffraction pattern from experiments offset_v : float, optional add small value to avoid the case of dividing something by zero Returns ------- array : updated pattern in real space """ diff_tmp = np.fft.fftn(recon_pattern) / np.sqrt(np.size(recon_pattern)) index = diffracted_pattern > 0 diff_tmp[index] = diffracted_pattern[index] * diff_tmp[index] / (np.abs(diff_tmp[index]) + offset_v) return np.fft.ifftn(diff_tmp) * np.sqrt(np.size(diffracted_pattern)) def find_support(sample_obj, sw_sigma, sw_threshold): """ Update sample area based on thresholds. Parameters ---------- sample_obj : array sample for reconstruction sw_sigma : float sigma for gaussian in shrinkwrap method sw_threshold : float threshold used in shrinkwrap method Returns ------- array : index of sample support """ sample_obj = np.abs(sample_obj) conv_fun = gaussian_filter(sample_obj, sw_sigma) conv_max = np.max(conv_fun) return conv_fun >= (sw_threshold * conv_max) def cal_diff_error(sample_obj, diffracted_pattern): """ Calculate the error in q space. Parameters ---------- sample_obj : array sample data diffracted_pattern : array diffraction pattern from experiments Returns ------- float : relative error in q space """ new_diff = np.abs(np.fft.fftn(sample_obj)) / np.sqrt(np.size(sample_obj)) return np.linalg.norm(new_diff - diffracted_pattern) / np.linalg.norm(diffracted_pattern) def generate_random_phase_field(diffracted_pattern): """ Initiate random phase. Parameters ---------- diffracted_pattern : array diffraction pattern from experiments Returns ------- sample_obj : array sample information with phase """ pha_tmp = np.random.uniform(0, 2 * np.pi, diffracted_pattern.shape) sample_obj = np.fft.ifftn(diffracted_pattern * np.exp(1j * pha_tmp)) * np.sqrt(np.size(diffracted_pattern)) return sample_obj def generate_box_support(sup_radius, shape_v): """ Generate support area as a box for either 2D or 3D cases. Parameters ---------- sup_radius : float radius of support shape_v : list shape of diffraction pattern, which can be either 2D or 3D case. Returns ------- sup : array support with a box area """ slc_list = [slice(s // 2 - sup_radius, s // 2 + sup_radius) for s in shape_v] sup = np.zeros(shape_v) sup[tuple(slc_list)] = 1 return sup def generate_disk_support(sup_radius, shape_v): """ Generate support area as a disk for either 2D or 3D cases. Parameters ---------- sup_radius : float radius of support shape_v : list shape of diffraction pattern, which can be either 2D or 3D case. Returns ------- sup : array support with a disk area """ sup = np.zeros(shape_v) dummy = _dist(shape_v) sup[dummy < sup_radius] = 1 return sup def cdi_recon( diffracted_pattern, sample_obj, sup, beta=1.15, start_avg=0.8, pi_modulus_flag="Complex", sw_flag=True, sw_sigma=0.5, sw_threshold=0.1, sw_start=0.2, sw_end=0.8, sw_step=10, n_iterations=1000, cb_function=None, cb_step=10, ): """ Run reconstruction with difference map algorithm. Parameters ---------- diffracted_pattern : array diffraction pattern from experiments sample_obj : array initial sample with phase, complex number sup : array initial support beta : float, optional feedback parameter for difference map algorithm. default is 1.15. start_avg : float, optional define the point to start doing average. default is 0.8. pi_modulus_flag : {'complex', 'real'}, optional 'complex' or 'real', defining the way to perform pi_modulus calculation. default is 'Complex'. sw_flag : Bool, optional flag to use shrinkwrap algorithm or not. default is True. sw_sigma : float, optional gaussian width used in sw algorithm. default is 0.5. sw_threshold : float, optional shreshold cut in sw algorithm. default is 0.1. sw_start : float, optional at which point to start to do shrinkwrap. defualt is 0.2 sw_end : float, optional at which point to stop shrinkwrap. defualt is 0.8 sw_step : float, optional the frequency to perform sw algorithm. defualt is 10 n_iterations : int, optional number of iterations to run. default is 1000. cb_function : function, optional This is a callback function that expects to receive these four objects: sample_obj, obj_error, diff_error, sup_error. Sample_obj is a 2D array. And obj_error, diff_error, and sup_error are 1D array. cb_step : int, optional define plotting frequency, i.e., if plot_step = 10, plot results after every 10 iterations. Returns ------- obj_avg : array reconstructed sample object error_dict : dict Error information for all iterations. The dict keys include obj_error, diff_error and sup_error. Obj_error is a list of the relative error of sample object. Diff_error is calculated as the difference between new diffraction pattern and the original diffraction pattern. And sup_error stores the size of the sample support. References ---------- .. [1] V. Elser, "Phase retrieval by iterated projections", J. Opt. Soc. Am. A, vol. 20, No. 1, 2003 """ diffracted_pattern = np.array(diffracted_pattern) # diffraction data diffracted_pattern = np.fft.fftshift(diffracted_pattern) pi_modulus_flag = pi_modulus_flag.lower() real_operation = False if pi_modulus_flag == "real": real_operation = True elif pi_modulus_flag == "complex": real_operation = False else: raise ValueError('py_modulus_flag must be one of ("complex","real") not ' "{!r}".format(pi_modulus_flag)) gamma_1 = -1 / beta gamma_2 = 1 / beta # get support index outside_sup_index = sup != 1 error_dict = {} obj_error = np.zeros(n_iterations) diff_error = np.zeros(n_iterations) sup_error = np.zeros(n_iterations) sup_old = np.zeros_like(diffracted_pattern) obj_avg = np.zeros_like(diffracted_pattern).astype(complex) avg_i = 0 time_start = time.time() for n in range(n_iterations): obj_old = np.array(sample_obj) obj_a = pi_modulus(sample_obj, diffracted_pattern) if real_operation: obj_a = np.abs(obj_a) obj_a = (1 + gamma_2) * obj_a - gamma_2 * sample_obj obj_a[outside_sup_index] = 0 # define support obj_b = np.array(sample_obj) obj_b[outside_sup_index] = 0 # define support obj_b = (1 + gamma_1) * obj_b - gamma_1 * sample_obj obj_b = pi_modulus(obj_b, diffracted_pattern) if real_operation: obj_b = np.abs(obj_b) sample_obj += beta * (obj_a - obj_b) # calculate errors obj_error[n] = np.linalg.norm(sample_obj - obj_old) / np.linalg.norm(obj_old) diff_error[n] = cal_diff_error(sample_obj, diffracted_pattern) if sw_flag: if (n >= (sw_start * n_iterations)) and (n <= (sw_end * n_iterations)): if np.mod(n, sw_step) == 0: logger.info("Refine support with shrinkwrap") sup_index = find_support(sample_obj, sw_sigma, sw_threshold) sup = np.zeros_like(diffracted_pattern) sup[sup_index] = 1 outside_sup_index = sup != 1 sup_error[n] = np.sum(sup_old) sup_old = np.array(sup) if cb_function and n_iterations % cb_step == 0: cb_function(sample_obj, obj_error, diff_error, sup_error) if n > start_avg * n_iterations: obj_avg += sample_obj avg_i += 1 logger.info("%d object_chi= %f, diff_chi=%f" % (n, obj_error[n], diff_error[n])) obj_avg = obj_avg / avg_i time_end = time.time() logger.info("%d iterations takes %f sec" % (n_iterations, time_end - time_start)) error_dict["obj_error"] = obj_error error_dict["diff_error"] = diff_error error_dict["sup_error"] = sup_error return obj_avg, error_dict
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/cdi.py
cdi.py
from __future__ import absolute_import, division, print_function import logging import time import numpy as np from scipy.ndimage import gaussian_filter logger = logging.getLogger(__name__) def _dist(dims): """ Create array with pixel value equals to the distance from array center. Parameters ---------- dims : list or tuple shape of array to create Returns ------- arr : np.ndarray ND array whose pixels are equal to the distance from the center of the array of shape `dims` """ dist_sum = [] dist_sum = np.zeros(dims, dtype=np.float64) for idx, d in enumerate(dims): shape = np.ones(len(dims), dtype=np.int64) shape[idx] = d vec = (np.arange(d) - d // 2) ** 2 vec = np.broadcast_to(np.reshape(vec, newshape=shape), dims) dist_sum += vec return np.sqrt(dist_sum) def gauss(dims, sigma): """ Generate Gaussian function in 2D or 3D. Parameters ---------- dims : list or tuple shape of the data sigma : float standard deviation of gaussian function Returns ------- arr : array ND gaussian """ x = _dist(dims) y = np.exp(-((x / sigma) ** 2) / 2) return y / np.sum(y) def pi_modulus(recon_pattern, diffracted_pattern, offset_v=1e-12): """ Transfer sample from real space to q space. Use constraint based on diffraction pattern from experiments. Parameters ---------- recon_pattern : array reconstructed pattern in real space diffracted_pattern : array diffraction pattern from experiments offset_v : float, optional add small value to avoid the case of dividing something by zero Returns ------- array : updated pattern in real space """ diff_tmp = np.fft.fftn(recon_pattern) / np.sqrt(np.size(recon_pattern)) index = diffracted_pattern > 0 diff_tmp[index] = diffracted_pattern[index] * diff_tmp[index] / (np.abs(diff_tmp[index]) + offset_v) return np.fft.ifftn(diff_tmp) * np.sqrt(np.size(diffracted_pattern)) def find_support(sample_obj, sw_sigma, sw_threshold): """ Update sample area based on thresholds. Parameters ---------- sample_obj : array sample for reconstruction sw_sigma : float sigma for gaussian in shrinkwrap method sw_threshold : float threshold used in shrinkwrap method Returns ------- array : index of sample support """ sample_obj = np.abs(sample_obj) conv_fun = gaussian_filter(sample_obj, sw_sigma) conv_max = np.max(conv_fun) return conv_fun >= (sw_threshold * conv_max) def cal_diff_error(sample_obj, diffracted_pattern): """ Calculate the error in q space. Parameters ---------- sample_obj : array sample data diffracted_pattern : array diffraction pattern from experiments Returns ------- float : relative error in q space """ new_diff = np.abs(np.fft.fftn(sample_obj)) / np.sqrt(np.size(sample_obj)) return np.linalg.norm(new_diff - diffracted_pattern) / np.linalg.norm(diffracted_pattern) def generate_random_phase_field(diffracted_pattern): """ Initiate random phase. Parameters ---------- diffracted_pattern : array diffraction pattern from experiments Returns ------- sample_obj : array sample information with phase """ pha_tmp = np.random.uniform(0, 2 * np.pi, diffracted_pattern.shape) sample_obj = np.fft.ifftn(diffracted_pattern * np.exp(1j * pha_tmp)) * np.sqrt(np.size(diffracted_pattern)) return sample_obj def generate_box_support(sup_radius, shape_v): """ Generate support area as a box for either 2D or 3D cases. Parameters ---------- sup_radius : float radius of support shape_v : list shape of diffraction pattern, which can be either 2D or 3D case. Returns ------- sup : array support with a box area """ slc_list = [slice(s // 2 - sup_radius, s // 2 + sup_radius) for s in shape_v] sup = np.zeros(shape_v) sup[tuple(slc_list)] = 1 return sup def generate_disk_support(sup_radius, shape_v): """ Generate support area as a disk for either 2D or 3D cases. Parameters ---------- sup_radius : float radius of support shape_v : list shape of diffraction pattern, which can be either 2D or 3D case. Returns ------- sup : array support with a disk area """ sup = np.zeros(shape_v) dummy = _dist(shape_v) sup[dummy < sup_radius] = 1 return sup def cdi_recon( diffracted_pattern, sample_obj, sup, beta=1.15, start_avg=0.8, pi_modulus_flag="Complex", sw_flag=True, sw_sigma=0.5, sw_threshold=0.1, sw_start=0.2, sw_end=0.8, sw_step=10, n_iterations=1000, cb_function=None, cb_step=10, ): """ Run reconstruction with difference map algorithm. Parameters ---------- diffracted_pattern : array diffraction pattern from experiments sample_obj : array initial sample with phase, complex number sup : array initial support beta : float, optional feedback parameter for difference map algorithm. default is 1.15. start_avg : float, optional define the point to start doing average. default is 0.8. pi_modulus_flag : {'complex', 'real'}, optional 'complex' or 'real', defining the way to perform pi_modulus calculation. default is 'Complex'. sw_flag : Bool, optional flag to use shrinkwrap algorithm or not. default is True. sw_sigma : float, optional gaussian width used in sw algorithm. default is 0.5. sw_threshold : float, optional shreshold cut in sw algorithm. default is 0.1. sw_start : float, optional at which point to start to do shrinkwrap. defualt is 0.2 sw_end : float, optional at which point to stop shrinkwrap. defualt is 0.8 sw_step : float, optional the frequency to perform sw algorithm. defualt is 10 n_iterations : int, optional number of iterations to run. default is 1000. cb_function : function, optional This is a callback function that expects to receive these four objects: sample_obj, obj_error, diff_error, sup_error. Sample_obj is a 2D array. And obj_error, diff_error, and sup_error are 1D array. cb_step : int, optional define plotting frequency, i.e., if plot_step = 10, plot results after every 10 iterations. Returns ------- obj_avg : array reconstructed sample object error_dict : dict Error information for all iterations. The dict keys include obj_error, diff_error and sup_error. Obj_error is a list of the relative error of sample object. Diff_error is calculated as the difference between new diffraction pattern and the original diffraction pattern. And sup_error stores the size of the sample support. References ---------- .. [1] V. Elser, "Phase retrieval by iterated projections", J. Opt. Soc. Am. A, vol. 20, No. 1, 2003 """ diffracted_pattern = np.array(diffracted_pattern) # diffraction data diffracted_pattern = np.fft.fftshift(diffracted_pattern) pi_modulus_flag = pi_modulus_flag.lower() real_operation = False if pi_modulus_flag == "real": real_operation = True elif pi_modulus_flag == "complex": real_operation = False else: raise ValueError('py_modulus_flag must be one of ("complex","real") not ' "{!r}".format(pi_modulus_flag)) gamma_1 = -1 / beta gamma_2 = 1 / beta # get support index outside_sup_index = sup != 1 error_dict = {} obj_error = np.zeros(n_iterations) diff_error = np.zeros(n_iterations) sup_error = np.zeros(n_iterations) sup_old = np.zeros_like(diffracted_pattern) obj_avg = np.zeros_like(diffracted_pattern).astype(complex) avg_i = 0 time_start = time.time() for n in range(n_iterations): obj_old = np.array(sample_obj) obj_a = pi_modulus(sample_obj, diffracted_pattern) if real_operation: obj_a = np.abs(obj_a) obj_a = (1 + gamma_2) * obj_a - gamma_2 * sample_obj obj_a[outside_sup_index] = 0 # define support obj_b = np.array(sample_obj) obj_b[outside_sup_index] = 0 # define support obj_b = (1 + gamma_1) * obj_b - gamma_1 * sample_obj obj_b = pi_modulus(obj_b, diffracted_pattern) if real_operation: obj_b = np.abs(obj_b) sample_obj += beta * (obj_a - obj_b) # calculate errors obj_error[n] = np.linalg.norm(sample_obj - obj_old) / np.linalg.norm(obj_old) diff_error[n] = cal_diff_error(sample_obj, diffracted_pattern) if sw_flag: if (n >= (sw_start * n_iterations)) and (n <= (sw_end * n_iterations)): if np.mod(n, sw_step) == 0: logger.info("Refine support with shrinkwrap") sup_index = find_support(sample_obj, sw_sigma, sw_threshold) sup = np.zeros_like(diffracted_pattern) sup[sup_index] = 1 outside_sup_index = sup != 1 sup_error[n] = np.sum(sup_old) sup_old = np.array(sup) if cb_function and n_iterations % cb_step == 0: cb_function(sample_obj, obj_error, diff_error, sup_error) if n > start_avg * n_iterations: obj_avg += sample_obj avg_i += 1 logger.info("%d object_chi= %f, diff_chi=%f" % (n, obj_error[n], diff_error[n])) obj_avg = obj_avg / avg_i time_end = time.time() logger.info("%d iterations takes %f sec" % (n_iterations, time_end - time_start)) error_dict["obj_error"] = obj_error error_dict["diff_error"] = diff_error error_dict["sup_error"] = sup_error return obj_avg, error_dict
0.931673
0.669326
from __future__ import absolute_import, division, print_function import logging import numpy as np import scipy.stats as sts logger = logging.getLogger(__name__) def bad_to_nan_gen(images, bad): """ Convert the images marked as "bad" in `bad` by their index in images into a np.nan array Parameters ---------- images : iterable Iterable of 2-D arrays bad : list List of integer indices into the `images` parameter that mark those images as "bad". Yields ------ img : array if image is bad it will convert to np.nan array otherwise no change to the array """ ret_val = None for n, im in enumerate(images): if n in bad: if ret_val is None: ret_val = np.empty(im.shape) ret_val[:] = np.nan yield ret_val else: yield im def threshold(images, threshold, mask=None): """ This generator sets all pixels whose value is greater than `threshold` to 0 and yields the thresholded images out Parameters ---------- images : iterable Iterable of 2-D arrays threshold : float threshold value to remove the hot spots in the image mask : array array with values above the threshold marked as 0 and values below marked as 1. shape is (num_columns, num_rows) of the image, optional None Yields ------ mask : array array with values above the threshold marked as 0 and values below marked as 1. shape is (num_columns, num_rows) of the image """ if mask is None: mask = np.ones_like(images[0]) for im in images: bad_pixels = np.where(im >= threshold) if len(bad_pixels[0]) != 0: mask[bad_pixels] = 0 yield mask def margin(img_shape, edge_size): """ Mask the edge of an image Parameters ---------- img_shape: tuple The shape of the image edge_size: int Number of pixels to mask from the edge Returns ------- 2darray: The mask array, bad pixels are 0 """ mask = np.ones(img_shape, dtype=bool) mask[edge_size:-edge_size, edge_size:-edge_size] = 0.0 return ~mask def binned_outlier(img, r, alpha, bins, mask=None): """ Generates a mask by identifying outlier pixels in bins and masks any pixels which have a value greater or less than alpha * std away from the mean Parameters ---------- img: 2darray The image r: 2darray The array which maps pixels to bins alpha: float or tuple or, 1darray Then number of acceptable standard deviations, if tuple then we use a linear distribution of alphas from alpha[0] to alpha[1], if array then we just use that as the distribution of alphas bins: list The bin edges mask: 1darray, bool A starting flattened mask Returns ------- 2darray: The mask """ if mask is None: working_mask = np.ones(img.shape).astype(bool) else: working_mask = np.copy(mask).astype(bool) if working_mask.shape != img.shape: working_mask = working_mask.reshape(img.shape) msk_img = img[working_mask] msk_r = r[working_mask] int_r = np.digitize(r, bins[:-1], True) - 1 # integration mean = sts.binned_statistic(msk_r, msk_img, bins=bins, statistic="mean")[0] std = sts.binned_statistic(msk_r, msk_img, bins=bins, statistic=np.std)[0] if type(alpha) is tuple: alpha = np.linspace(alpha[0], alpha[1], len(std)) threshold = alpha * std lower = mean - threshold upper = mean + threshold # single out the too low and too high pixels working_mask *= img > lower[int_r] working_mask *= img < upper[int_r] return working_mask.astype(bool)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/mask.py
mask.py
from __future__ import absolute_import, division, print_function import logging import numpy as np import scipy.stats as sts logger = logging.getLogger(__name__) def bad_to_nan_gen(images, bad): """ Convert the images marked as "bad" in `bad` by their index in images into a np.nan array Parameters ---------- images : iterable Iterable of 2-D arrays bad : list List of integer indices into the `images` parameter that mark those images as "bad". Yields ------ img : array if image is bad it will convert to np.nan array otherwise no change to the array """ ret_val = None for n, im in enumerate(images): if n in bad: if ret_val is None: ret_val = np.empty(im.shape) ret_val[:] = np.nan yield ret_val else: yield im def threshold(images, threshold, mask=None): """ This generator sets all pixels whose value is greater than `threshold` to 0 and yields the thresholded images out Parameters ---------- images : iterable Iterable of 2-D arrays threshold : float threshold value to remove the hot spots in the image mask : array array with values above the threshold marked as 0 and values below marked as 1. shape is (num_columns, num_rows) of the image, optional None Yields ------ mask : array array with values above the threshold marked as 0 and values below marked as 1. shape is (num_columns, num_rows) of the image """ if mask is None: mask = np.ones_like(images[0]) for im in images: bad_pixels = np.where(im >= threshold) if len(bad_pixels[0]) != 0: mask[bad_pixels] = 0 yield mask def margin(img_shape, edge_size): """ Mask the edge of an image Parameters ---------- img_shape: tuple The shape of the image edge_size: int Number of pixels to mask from the edge Returns ------- 2darray: The mask array, bad pixels are 0 """ mask = np.ones(img_shape, dtype=bool) mask[edge_size:-edge_size, edge_size:-edge_size] = 0.0 return ~mask def binned_outlier(img, r, alpha, bins, mask=None): """ Generates a mask by identifying outlier pixels in bins and masks any pixels which have a value greater or less than alpha * std away from the mean Parameters ---------- img: 2darray The image r: 2darray The array which maps pixels to bins alpha: float or tuple or, 1darray Then number of acceptable standard deviations, if tuple then we use a linear distribution of alphas from alpha[0] to alpha[1], if array then we just use that as the distribution of alphas bins: list The bin edges mask: 1darray, bool A starting flattened mask Returns ------- 2darray: The mask """ if mask is None: working_mask = np.ones(img.shape).astype(bool) else: working_mask = np.copy(mask).astype(bool) if working_mask.shape != img.shape: working_mask = working_mask.reshape(img.shape) msk_img = img[working_mask] msk_r = r[working_mask] int_r = np.digitize(r, bins[:-1], True) - 1 # integration mean = sts.binned_statistic(msk_r, msk_img, bins=bins, statistic="mean")[0] std = sts.binned_statistic(msk_r, msk_img, bins=bins, statistic=np.std)[0] if type(alpha) is tuple: alpha = np.linspace(alpha[0], alpha[1], len(std)) threshold = alpha * std lower = mean - threshold upper = mean + threshold # single out the too low and too high pixels working_mask *= img > lower[int_r] working_mask *= img < upper[int_r] return working_mask.astype(bool)
0.943958
0.734857
from __future__ import absolute_import, division, print_function import logging from collections import deque import numpy as np from six.moves import zip from .fitting import fit_quad_to_peak logger = logging.getLogger(__name__) class PeakRejection(Exception): """Custom exception class to indicate that the refine function rejected the candidate peak. This uses the exception handling framework in a method akin to `StopIteration` to indicate that there will be no return value. """ pass def peak_refinement(x, y, cands, window, refine_function, refine_args=None): """Refine candidate locations Parameters ---------- x : array The independent variable, does not need to be evenly spaced. y : array The dependent variable. Must correspond 1:1 with the values in `x` cands : array Array of the indices in `x` (and `y`) for the candidate peaks. refine_function : function A function which takes a section of data with a peak in it and returns the location and height of the peak to sub-sample accuracy. Additional parameters can be passed through via the refine_args kwarg. The function signature must be:: center, height = refine_func(x, y, **kwargs) This function may raise `PeakRejection` to indicate no suitable peak was found window : int How many samples to extract on either side of the candidate locations are passed to the refine function. The window will be truncated near the boundaries. The length of the data passed to the refine function will be (2 * window + 1). refine_args : dict, optional The passed to the refine_function Returns ------- peak_locations : array The locations of the peaks peak_heights : array The heights of the peaks Examples -------- >>> x = np.arange(512) >>> tt = np.zeros(512) >>> tt += np.exp(-((x - 150.55)/10)**2) >>> tt += np.exp(-((x - 450.75)/10)**2) >>> cands = scipy.signal.argrelmax(tt)[0] >>> print(peak_refinement(x, tt, cands, 10, refine_quadratic)) (array([ 150.62286432, 450.7909412 ]), array([ 0.96435832, 0.96491501])) >>> print(peak_refinement(x, tt, cands, 10, refine_log_quadratic)) (array([ 150.55, 450.75]), array([ 1., 1.])) """ # clean up input x = np.asarray(x) y = np.asarray(y) cands = np.asarray(cands, dtype=int) window = int(window) if refine_args is None: refine_args = dict() # local working variables out_tmp = deque() max_ind = len(x) for ind in cands: slc = slice(np.max([0, ind - window]), np.min([max_ind, ind + window + 1])) try: ret = refine_function(x[slc], y[slc], **refine_args) except PeakRejection: # We are catching the PeakRejections raised here as # an indication that no suitable peak was found continue else: out_tmp.append(ret) return tuple([np.array(_) for _ in zip(*out_tmp)]) def refine_quadratic(x, y, Rval_thresh=None): """ Attempts to refine the peaks by fitting to a quadratic function. Parameters ---------- x : array Independent variable y : array Dependent variable Rval_thresh : float, optional Threshold for R^2 value of fit, If the computed R^2 is worse than this threshold PeakRejection will be raised Returns ------- center : float Refined estimate for center height : float Refined estimate for height Raises ------ PeakRejection Raised to indicate that no suitable peak was found in the interval """ beta, R2 = fit_quad_to_peak(x, y) if Rval_thresh is not None and R2 < Rval_thresh: raise PeakRejection() return beta[1], beta[2] def refine_log_quadratic(x, y, Rval_thresh=None): """ Attempts to refine the peaks by fitting a quadratic to the log of the y-data. This is a linear approximation of fitting a Gaussian. Parameters ---------- x : array Independent variable y : array Dependent variable Rval_thresh : float, optional Threshold for R^2 value of fit, If the computed R^2 is worse than this threshold PeakRejection will be raised Returns ------- center : float Refined estimate for center height : float Refined estimate for height Raises ------ PeakRejection Raised to indicate that no suitable peak was found in the interval """ beta, R2 = fit_quad_to_peak(x, np.log(y)) if Rval_thresh is not None and R2 < Rval_thresh: raise PeakRejection() return beta[1], np.exp(beta[2]) def filter_n_largest(y, cands, N): """Filters the N largest candidate peaks Return a maximum of N largest candidates. If N > len(cands) then all of the cands will be returned sorted, else the indices of the N largest peaks will be returned in descending order. Parameters ---------- y : array Independent variable cands : array An array containing the indices of candidate peaks N : int The maximum number of peaks to return, sorted by size. Must be positive Returns ------- cands : array An array of the indices of up to the N largest candidates """ cands = np.asarray(cands) N = int(N) if N <= 0: raise ValueError("The maximum number of peaks to return must " "be positive not {}".format(N)) sorted_args = np.argsort(y[cands]) # cut out if asking for more peaks than exist if len(cands) < N: return cands[sorted_args][::-1] return cands[sorted_args[-N:]][::-1] def filter_peak_height(y, cands, thresh, window=5): """ Filter to remove candidate that are too small. This is implemented by looking at the relative height (max - min) of the peak in a window around the candidate peak. Parameters ---------- y : array Independent variable cands : array An array containing the indices of candidate peaks thresh : int The minimum peak-to-peak size of the candidate peak to be accepted window : int, optional The size of the window around the peak to consider Returns ------- cands : array An array of the indices which pass the filter """ y = np.asarray(y) out_tmp = deque() max_ind = len(y) for ind in cands: slc = slice(np.max([0, ind - window]), np.min([max_ind, ind + window + 1])) pk_hght = np.ptp(y[slc]) if pk_hght > thresh: out_tmp.append(ind) return np.array(out_tmp) # add our refinement functions as an attribute on peak_refinement # ta make auto-wrapping for vistrials easier. peak_refinement.refine_function = [refine_log_quadratic, refine_quadratic]
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/feature.py
feature.py
from __future__ import absolute_import, division, print_function import logging from collections import deque import numpy as np from six.moves import zip from .fitting import fit_quad_to_peak logger = logging.getLogger(__name__) class PeakRejection(Exception): """Custom exception class to indicate that the refine function rejected the candidate peak. This uses the exception handling framework in a method akin to `StopIteration` to indicate that there will be no return value. """ pass def peak_refinement(x, y, cands, window, refine_function, refine_args=None): """Refine candidate locations Parameters ---------- x : array The independent variable, does not need to be evenly spaced. y : array The dependent variable. Must correspond 1:1 with the values in `x` cands : array Array of the indices in `x` (and `y`) for the candidate peaks. refine_function : function A function which takes a section of data with a peak in it and returns the location and height of the peak to sub-sample accuracy. Additional parameters can be passed through via the refine_args kwarg. The function signature must be:: center, height = refine_func(x, y, **kwargs) This function may raise `PeakRejection` to indicate no suitable peak was found window : int How many samples to extract on either side of the candidate locations are passed to the refine function. The window will be truncated near the boundaries. The length of the data passed to the refine function will be (2 * window + 1). refine_args : dict, optional The passed to the refine_function Returns ------- peak_locations : array The locations of the peaks peak_heights : array The heights of the peaks Examples -------- >>> x = np.arange(512) >>> tt = np.zeros(512) >>> tt += np.exp(-((x - 150.55)/10)**2) >>> tt += np.exp(-((x - 450.75)/10)**2) >>> cands = scipy.signal.argrelmax(tt)[0] >>> print(peak_refinement(x, tt, cands, 10, refine_quadratic)) (array([ 150.62286432, 450.7909412 ]), array([ 0.96435832, 0.96491501])) >>> print(peak_refinement(x, tt, cands, 10, refine_log_quadratic)) (array([ 150.55, 450.75]), array([ 1., 1.])) """ # clean up input x = np.asarray(x) y = np.asarray(y) cands = np.asarray(cands, dtype=int) window = int(window) if refine_args is None: refine_args = dict() # local working variables out_tmp = deque() max_ind = len(x) for ind in cands: slc = slice(np.max([0, ind - window]), np.min([max_ind, ind + window + 1])) try: ret = refine_function(x[slc], y[slc], **refine_args) except PeakRejection: # We are catching the PeakRejections raised here as # an indication that no suitable peak was found continue else: out_tmp.append(ret) return tuple([np.array(_) for _ in zip(*out_tmp)]) def refine_quadratic(x, y, Rval_thresh=None): """ Attempts to refine the peaks by fitting to a quadratic function. Parameters ---------- x : array Independent variable y : array Dependent variable Rval_thresh : float, optional Threshold for R^2 value of fit, If the computed R^2 is worse than this threshold PeakRejection will be raised Returns ------- center : float Refined estimate for center height : float Refined estimate for height Raises ------ PeakRejection Raised to indicate that no suitable peak was found in the interval """ beta, R2 = fit_quad_to_peak(x, y) if Rval_thresh is not None and R2 < Rval_thresh: raise PeakRejection() return beta[1], beta[2] def refine_log_quadratic(x, y, Rval_thresh=None): """ Attempts to refine the peaks by fitting a quadratic to the log of the y-data. This is a linear approximation of fitting a Gaussian. Parameters ---------- x : array Independent variable y : array Dependent variable Rval_thresh : float, optional Threshold for R^2 value of fit, If the computed R^2 is worse than this threshold PeakRejection will be raised Returns ------- center : float Refined estimate for center height : float Refined estimate for height Raises ------ PeakRejection Raised to indicate that no suitable peak was found in the interval """ beta, R2 = fit_quad_to_peak(x, np.log(y)) if Rval_thresh is not None and R2 < Rval_thresh: raise PeakRejection() return beta[1], np.exp(beta[2]) def filter_n_largest(y, cands, N): """Filters the N largest candidate peaks Return a maximum of N largest candidates. If N > len(cands) then all of the cands will be returned sorted, else the indices of the N largest peaks will be returned in descending order. Parameters ---------- y : array Independent variable cands : array An array containing the indices of candidate peaks N : int The maximum number of peaks to return, sorted by size. Must be positive Returns ------- cands : array An array of the indices of up to the N largest candidates """ cands = np.asarray(cands) N = int(N) if N <= 0: raise ValueError("The maximum number of peaks to return must " "be positive not {}".format(N)) sorted_args = np.argsort(y[cands]) # cut out if asking for more peaks than exist if len(cands) < N: return cands[sorted_args][::-1] return cands[sorted_args[-N:]][::-1] def filter_peak_height(y, cands, thresh, window=5): """ Filter to remove candidate that are too small. This is implemented by looking at the relative height (max - min) of the peak in a window around the candidate peak. Parameters ---------- y : array Independent variable cands : array An array containing the indices of candidate peaks thresh : int The minimum peak-to-peak size of the candidate peak to be accepted window : int, optional The size of the window around the peak to consider Returns ------- cands : array An array of the indices which pass the filter """ y = np.asarray(y) out_tmp = deque() max_ind = len(y) for ind in cands: slc = slice(np.max([0, ind - window]), np.min([max_ind, ind + window + 1])) pk_hght = np.ptp(y[slc]) if pk_hght > thresh: out_tmp.append(ind) return np.array(out_tmp) # add our refinement functions as an attribute on peak_refinement # ta make auto-wrapping for vistrials easier. peak_refinement.refine_function = [refine_log_quadratic, refine_quadratic]
0.924539
0.674885
from __future__ import absolute_import, division, print_function import numpy as np import scipy.signal _defaults = {"con_val_no_bin": 3, "con_val_bin": 5, "iter_num_no_bin": 3, "iter_num_bin": 5} def snip_method( spectrum, e_off, e_lin, e_quad, xmin=0, xmax=4096, epsilon=2.96, width=0.5, decrease_factor=np.sqrt(2), spectral_binning=None, con_val=None, iter_num=None, width_threshold=0.5, ): """ use snip algorithm to obtain background Parameters ---------- spectrum : array intensity spectrum e_off : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 e_lin : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 e_quad : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 xmin : float, optional smallest index to define the range xmax : float, optional largest index to define the range epsilon : float, optional energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value width : int, optional window size to adjust how much to shift background decrease_factor : float, optional gradually decrease of window size, default as sqrt(2) spectral_binning : float, optional bin the data into different size con_val : int, optional size of scipy.signal.windows.boxcar to convolve the spectrum. Default value is controlled by the keys `con_val_no_bin` and `con_val_bin` in the defaults dictionary, depending on if spectral_binning is used or not iter_num : int, optional Number of iterations. Default value is controlled by the keys `iter_num_no_bin` and `iter_num_bin` in the defaults dictionary, depending on if spectral_binning is used or not width_threshold : float, optional stop point of the algorithm Returns ------- background : array output results with peak removed References ---------- .. [1] C.G. Ryan etc, "SNIP, a statistics-sensitive background treatment for the quantitative analysis of PIXE spectra in geoscience applications", Nuclear Instruments and Methods in Physics Research Section B, vol. 34, 1998. """ # clean input a bit if con_val is None: if spectral_binning is None: con_val = _defaults["con_val_no_bin"] else: con_val = _defaults["con_val_bin"] if iter_num is None: if spectral_binning is None: iter_num = _defaults["iter_num_no_bin"] else: iter_num = _defaults["iter_num_bin"] background = np.array(spectrum) n_background = background.size energy = np.arange(n_background, dtype=np.float64) if spectral_binning is not None: energy = energy * spectral_binning energy = e_off + energy * e_lin + energy**2 * e_quad # transfer from std to fwhm std_fwhm = 2 * np.sqrt(2 * np.log(2)) tmp = (e_off / std_fwhm) ** 2 + energy * epsilon * e_lin tmp[tmp < 0] = 0 fwhm = std_fwhm * np.sqrt(tmp) # smooth the background s = scipy.signal.windows.boxcar(con_val) # For background remove, we only care about the central parts # where there are peaks. On the boundary part, we don't care # the accuracy so much. But we need to pay attention to edge # effects in general convolution. A = s.sum() background = scipy.signal.convolve(background, s, mode="same") / A window_p = width * fwhm / e_lin if spectral_binning is not None and spectral_binning > 0: window_p = window_p / 2.0 background = np.log(np.log(background + 1) + 1) index = np.arange(n_background) # FIRST SNIPPING for j in range(iter_num): lo_index = np.clip(index - window_p, np.max([xmin, 0]), np.min([xmax, n_background - 1])) hi_index = np.clip(index + window_p, np.max([xmin, 0]), np.min([xmax, n_background - 1])) temp = (background[lo_index.astype(np.int64)] + background[hi_index.astype(np.int64)]) / 2.0 bg_index = background > temp background[bg_index] = temp[bg_index] current_width = window_p max_current_width = np.amax(current_width) while max_current_width >= width_threshold: lo_index = np.clip(index - current_width, np.max([xmin, 0]), np.min([xmax, n_background - 1])) hi_index = np.clip(index + current_width, np.max([xmin, 0]), np.min([xmax, n_background - 1])) temp = (background[lo_index.astype(np.int64)] + background[hi_index.astype(np.int64)]) / 2.0 bg_index = background > temp background[bg_index] = temp[bg_index] # decrease the width and repeat current_width = current_width / decrease_factor max_current_width = np.amax(current_width) background = np.exp(np.exp(background) - 1) - 1 inf_ind = np.where(~np.isfinite(background)) background[inf_ind] = 0.0 return background
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/fitting/background.py
background.py
from __future__ import absolute_import, division, print_function import numpy as np import scipy.signal _defaults = {"con_val_no_bin": 3, "con_val_bin": 5, "iter_num_no_bin": 3, "iter_num_bin": 5} def snip_method( spectrum, e_off, e_lin, e_quad, xmin=0, xmax=4096, epsilon=2.96, width=0.5, decrease_factor=np.sqrt(2), spectral_binning=None, con_val=None, iter_num=None, width_threshold=0.5, ): """ use snip algorithm to obtain background Parameters ---------- spectrum : array intensity spectrum e_off : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 e_lin : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 e_quad : float energy calibration, such as e_off + e_lin * energy + e_quad * energy^2 xmin : float, optional smallest index to define the range xmax : float, optional largest index to define the range epsilon : float, optional energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value width : int, optional window size to adjust how much to shift background decrease_factor : float, optional gradually decrease of window size, default as sqrt(2) spectral_binning : float, optional bin the data into different size con_val : int, optional size of scipy.signal.windows.boxcar to convolve the spectrum. Default value is controlled by the keys `con_val_no_bin` and `con_val_bin` in the defaults dictionary, depending on if spectral_binning is used or not iter_num : int, optional Number of iterations. Default value is controlled by the keys `iter_num_no_bin` and `iter_num_bin` in the defaults dictionary, depending on if spectral_binning is used or not width_threshold : float, optional stop point of the algorithm Returns ------- background : array output results with peak removed References ---------- .. [1] C.G. Ryan etc, "SNIP, a statistics-sensitive background treatment for the quantitative analysis of PIXE spectra in geoscience applications", Nuclear Instruments and Methods in Physics Research Section B, vol. 34, 1998. """ # clean input a bit if con_val is None: if spectral_binning is None: con_val = _defaults["con_val_no_bin"] else: con_val = _defaults["con_val_bin"] if iter_num is None: if spectral_binning is None: iter_num = _defaults["iter_num_no_bin"] else: iter_num = _defaults["iter_num_bin"] background = np.array(spectrum) n_background = background.size energy = np.arange(n_background, dtype=np.float64) if spectral_binning is not None: energy = energy * spectral_binning energy = e_off + energy * e_lin + energy**2 * e_quad # transfer from std to fwhm std_fwhm = 2 * np.sqrt(2 * np.log(2)) tmp = (e_off / std_fwhm) ** 2 + energy * epsilon * e_lin tmp[tmp < 0] = 0 fwhm = std_fwhm * np.sqrt(tmp) # smooth the background s = scipy.signal.windows.boxcar(con_val) # For background remove, we only care about the central parts # where there are peaks. On the boundary part, we don't care # the accuracy so much. But we need to pay attention to edge # effects in general convolution. A = s.sum() background = scipy.signal.convolve(background, s, mode="same") / A window_p = width * fwhm / e_lin if spectral_binning is not None and spectral_binning > 0: window_p = window_p / 2.0 background = np.log(np.log(background + 1) + 1) index = np.arange(n_background) # FIRST SNIPPING for j in range(iter_num): lo_index = np.clip(index - window_p, np.max([xmin, 0]), np.min([xmax, n_background - 1])) hi_index = np.clip(index + window_p, np.max([xmin, 0]), np.min([xmax, n_background - 1])) temp = (background[lo_index.astype(np.int64)] + background[hi_index.astype(np.int64)]) / 2.0 bg_index = background > temp background[bg_index] = temp[bg_index] current_width = window_p max_current_width = np.amax(current_width) while max_current_width >= width_threshold: lo_index = np.clip(index - current_width, np.max([xmin, 0]), np.min([xmax, n_background - 1])) hi_index = np.clip(index + current_width, np.max([xmin, 0]), np.min([xmax, n_background - 1])) temp = (background[lo_index.astype(np.int64)] + background[hi_index.astype(np.int64)]) / 2.0 bg_index = background > temp background[bg_index] = temp[bg_index] # decrease the width and repeat current_width = current_width / decrease_factor max_current_width = np.amax(current_width) background = np.exp(np.exp(background) - 1) - 1 inf_ind = np.where(~np.isfinite(background)) background[inf_ind] = 0.0 return background
0.911731
0.589835
from __future__ import absolute_import, division, print_function import logging import numpy as np import scipy.special from scipy import stats from scipy.special import gamma, gammaln logger = logging.getLogger(__name__) log2 = np.log(2) s2pi = np.sqrt(2 * np.pi) spi = np.sqrt(np.pi) s2 = np.sqrt(2.0) def gaussian(x, area, center, sigma): """1 dimensional gaussian Parameters ---------- x : array independent variable area : float Area of the normally distributed peak center : float center position sigma : float standard deviation """ return (area / (s2pi * sigma)) * np.exp(-1 * (1.0 * x - center) ** 2 / (2 * sigma**2)) def lorentzian(x, area, center, sigma): """1 dimensional lorentzian Parameters ---------- x : array independent variable area : float area of lorentzian peak, If area is set as 1, the integral is unity. center : float center position sigma : float standard deviation """ return (area / (1 + ((1.0 * x - center) / sigma) ** 2)) / (np.pi * sigma) def lorentzian2(x, area, center, sigma): """1-d lorentzian squared profile Parameters ---------- x : array independent variable area : float area of lorentzian squared peak, If area is set as 1, the integral is unity. center : float center position sigma : float standard deviation """ return (area / (1 + ((x - center) / sigma) ** 2) ** 2) / (np.pi * sigma) def voigt(x, area, center, sigma, gamma=None): """Convolution of gaussian and lorentzian curve. see http://en.wikipedia.org/wiki/Voigt_profile Parameters ---------- x : array independent variable area : float area of voigt peak center : float center position sigma : float standard deviation gamma : float, optional half width at half maximum of lorentzian. If optional, `gamma` gets set to `sigma` """ if gamma is None: gamma = sigma z = (x - center + 1j * gamma) / (sigma * s2) return area * scipy.special.wofz(z).real / (sigma * s2pi) def pvoigt(x, area, center, sigma, fraction): """Linear combination of gaussian and lorentzian Parameters ---------- x : array independent variable area : float area of pvoigt peak center : float center position sigma : float standard deviation fraction : float weight for lorentzian peak in the linear combination, and (1-fraction) is the weight for gaussian peak. """ return (1 - fraction) * gaussian(x, area, center, sigma) + fraction * lorentzian(x, area, center, sigma) def gausssian_step(x, area, center, sigma, peak_e): """ Gauss step function is an important component in modeling compton peak. Use scipy erfc function. Please note erfc = 1-erf. Parameters ---------- x : array data in x coordinate area : float area of gauss step function center : float center position sigma : float standard deviation peak_e : float emission energy Returns ------- counts : array gaussian step peak References ---------- .. [1] Rene Van Grieken, "Handbook of X-Ray Spectrometry, Second Edition, (Practical Spectroscopy)", CRC Press, 2 edition, pp. 182, 2007. """ return area * scipy.special.erfc((x - center) / (np.sqrt(2) * sigma)) / (2.0 * peak_e) def gaussian_tail(x, area, center, sigma, gamma): """ Use a gaussian tail function to simulate compton peak Parameters ---------- x : array data in x coordinate area : float area of gauss tail function If area is set as 1, the integral is unity. center : float center position sigma : float control peak width gamma : float normalization factor Returns ------- counts : array gaussian tail peak References ---------- .. [1] Rene Van Grieken, "Handbook of X-Ray Spectrometry, Second Edition, (Practical Spectroscopy)", CRC Press, 2 edition, pp. 182, 2007. """ dx_neg = np.array(x) - center dx_neg[dx_neg > 0] = 0 temp_a = np.exp(dx_neg / (gamma * sigma)) v1 = scipy.special.erfc((x - center) / (np.sqrt(2) * sigma) + (1 / (gamma * np.sqrt(2)))) v2 = 2 * gamma * sigma * np.exp(-0.5 / (gamma**2)) counts = area * temp_a * (v1 / v2) return counts def elastic( x, coherent_sct_amplitude, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, e_offset, e_linear, e_quadratic, epsilon=2.96, ): """Model of elastic peak in X-Ray fluorescence Parameters ---------- x : array energy value coherent_sct_amplitude : float area of elastic peak coherent_sct_energy : float incident energy fwhm_offset : float global fitting parameter for peak width fwhm_fanoprime : float global fitting parameter for peak width e_offset : float offset of energy calibration e_linear : float linear coefficient in energy calibration e_quadratic : float quadratic coefficient in energy calibration epsilon : float energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value Returns ------- value : array elastic peak """ x = e_offset + x * e_linear + x**2 * e_quadratic temp_val = 2 * np.sqrt(2 * np.log(2)) sigma = np.sqrt((fwhm_offset / temp_val) ** 2 + coherent_sct_energy * epsilon * fwhm_fanoprime) return gaussian(x, coherent_sct_amplitude, coherent_sct_energy, sigma) def compton( x, compton_amplitude, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, e_offset, e_linear, e_quadratic, compton_angle, compton_fwhm_corr, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, epsilon=2.96, ): """ Model compton peak, which is generated as an inelastic peak and always stays to the left of elastic peak on the spectrum. Parameters ---------- x : array energy value compton_amplitude : float area for gaussian peak, gaussian step and gaussian tail functions coherent_sct_energy : float incident energy fwhm_offset : float global fitting parameter for peak width fwhm_fanoprime : float global fitting parameter for peak width e_offset : float offset of energy calibration e_linear : float linear coefficient in energy calibration e_quadratic : float quadratic coefficient in energy calibration compton_angle : float compton angle in degree compton_fwhm_corr : float correction factor on peak width compton_f_step : float weight factor of the gaussian step function compton_f_tail : float weight factor of gaussian tail on lower side compton_gamma : float normalization factor of gaussian tail on lower side compton_hi_f_tail : float weight factor of gaussian tail on higher side compton_hi_gamma : float normalization factor of gaussian tail on higher side epsilon : float energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value Returns ------- counts : array compton peak References ---------- .. [1] M. Van Gysel etc, "Description of Compton peaks in energy-dispersive x-ray fluorescence spectra", X-Ray Spectrometry, vol. 32, pp. 139-147, 2003. """ x = e_offset + x * e_linear + x**2 * e_quadratic # the rest-mass energy of an electron (511 keV) mc2 = 511 comp_denom = 1 + coherent_sct_energy / mc2 * (1 - np.cos(np.deg2rad(compton_angle))) compton_e = coherent_sct_energy / comp_denom temp_val = 2 * np.sqrt(2 * np.log(2)) sigma = np.sqrt((fwhm_offset / temp_val) ** 2 + compton_e * epsilon * fwhm_fanoprime) counts = np.zeros_like(x) factor = 1 / (1 + compton_f_step + compton_f_tail + compton_hi_f_tail) value = factor * gaussian(x, compton_amplitude, compton_e, sigma * compton_fwhm_corr) counts += value # compton peak, step if compton_f_step > 0.0: value = factor * compton_f_step value *= gausssian_step(x, compton_amplitude, compton_e, sigma, compton_e) counts += value # compton peak, tail on the low side value = factor * compton_f_tail value *= gaussian_tail(x, compton_amplitude, compton_e, sigma, compton_gamma) counts += value # compton peak, tail on the high side value = factor * compton_hi_f_tail value *= gaussian_tail(-1 * x, compton_amplitude, -1 * compton_e, sigma, compton_hi_gamma) counts += value return counts def gamma_dist(bin_values, K, M): """Gamma distribution function Parameters ---------- bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) K : int mean count of photons M : int number of coherent modes Returns ------- gamma_dist : array Gamma distribution Notes ----- These implementations are based on the references under the ``Notes`` section of the ``nbinom_dist()`` docstring .. math:: P(K) = \\frac{\\Gamma(K + M)} {\\Gamma(K + 1)\\Gamma(M)} (\\frac {M} {M + <K>})^M (\\frac {<K>}{M + <K>})^K """ gamma_dist = (stats.gamma(M, 0.0, K / M)).pdf(bin_values) return gamma_dist def nbinom_dist(bin_values, K, M): """ Negative Binomial (Poisson-Gamma) distribution function Parameters ---------- bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) K : int mean count of photons M : int number of coherent modes Returns ------- nbinom : array Negative Binomial (Poisson-Gamma) distribution function Notes ----- The negative-binomial distribution function .. math:: P(K) =(\\frac{M}{<K>})^M \\frac{K^{M-1}} {\\Gamma(M)}\\exp(-M\\frac{K}{<K>}) Implementation reference [1]_ References ---------- .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, C. Carona and A. Fluerasu , "Photon statistics and speckle visibility spectroscopy with partially coherent x-rays" J. Synchrotron Rad., vol 21, p 1288-1295, 2014. """ co_eff = np.exp(gammaln(bin_values + M) - gammaln(bin_values + 1) - gammaln(M)) nbinom = co_eff * np.power(M / (K + M), M) * np.power(K / (M + K), bin_values) return nbinom def poisson_dist(bin_values, K): """ Poisson Distribution Parameters ---------- K : int mean count of photons bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) Returns ------- poisson_dist : array Poisson Distribution Notes ----- These implementations are based on the references under the ``Notes`` section of the ``nbinom_dist()`` docstring .. math:: P(K) = \\frac{<K>^K}{K!}\\exp(-<K>) """ poisson_dist = np.exp(-K) * np.power(K, bin_values) / gamma(bin_values + 1) return poisson_dist
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/fitting/lineshapes.py
lineshapes.py
from __future__ import absolute_import, division, print_function import logging import numpy as np import scipy.special from scipy import stats from scipy.special import gamma, gammaln logger = logging.getLogger(__name__) log2 = np.log(2) s2pi = np.sqrt(2 * np.pi) spi = np.sqrt(np.pi) s2 = np.sqrt(2.0) def gaussian(x, area, center, sigma): """1 dimensional gaussian Parameters ---------- x : array independent variable area : float Area of the normally distributed peak center : float center position sigma : float standard deviation """ return (area / (s2pi * sigma)) * np.exp(-1 * (1.0 * x - center) ** 2 / (2 * sigma**2)) def lorentzian(x, area, center, sigma): """1 dimensional lorentzian Parameters ---------- x : array independent variable area : float area of lorentzian peak, If area is set as 1, the integral is unity. center : float center position sigma : float standard deviation """ return (area / (1 + ((1.0 * x - center) / sigma) ** 2)) / (np.pi * sigma) def lorentzian2(x, area, center, sigma): """1-d lorentzian squared profile Parameters ---------- x : array independent variable area : float area of lorentzian squared peak, If area is set as 1, the integral is unity. center : float center position sigma : float standard deviation """ return (area / (1 + ((x - center) / sigma) ** 2) ** 2) / (np.pi * sigma) def voigt(x, area, center, sigma, gamma=None): """Convolution of gaussian and lorentzian curve. see http://en.wikipedia.org/wiki/Voigt_profile Parameters ---------- x : array independent variable area : float area of voigt peak center : float center position sigma : float standard deviation gamma : float, optional half width at half maximum of lorentzian. If optional, `gamma` gets set to `sigma` """ if gamma is None: gamma = sigma z = (x - center + 1j * gamma) / (sigma * s2) return area * scipy.special.wofz(z).real / (sigma * s2pi) def pvoigt(x, area, center, sigma, fraction): """Linear combination of gaussian and lorentzian Parameters ---------- x : array independent variable area : float area of pvoigt peak center : float center position sigma : float standard deviation fraction : float weight for lorentzian peak in the linear combination, and (1-fraction) is the weight for gaussian peak. """ return (1 - fraction) * gaussian(x, area, center, sigma) + fraction * lorentzian(x, area, center, sigma) def gausssian_step(x, area, center, sigma, peak_e): """ Gauss step function is an important component in modeling compton peak. Use scipy erfc function. Please note erfc = 1-erf. Parameters ---------- x : array data in x coordinate area : float area of gauss step function center : float center position sigma : float standard deviation peak_e : float emission energy Returns ------- counts : array gaussian step peak References ---------- .. [1] Rene Van Grieken, "Handbook of X-Ray Spectrometry, Second Edition, (Practical Spectroscopy)", CRC Press, 2 edition, pp. 182, 2007. """ return area * scipy.special.erfc((x - center) / (np.sqrt(2) * sigma)) / (2.0 * peak_e) def gaussian_tail(x, area, center, sigma, gamma): """ Use a gaussian tail function to simulate compton peak Parameters ---------- x : array data in x coordinate area : float area of gauss tail function If area is set as 1, the integral is unity. center : float center position sigma : float control peak width gamma : float normalization factor Returns ------- counts : array gaussian tail peak References ---------- .. [1] Rene Van Grieken, "Handbook of X-Ray Spectrometry, Second Edition, (Practical Spectroscopy)", CRC Press, 2 edition, pp. 182, 2007. """ dx_neg = np.array(x) - center dx_neg[dx_neg > 0] = 0 temp_a = np.exp(dx_neg / (gamma * sigma)) v1 = scipy.special.erfc((x - center) / (np.sqrt(2) * sigma) + (1 / (gamma * np.sqrt(2)))) v2 = 2 * gamma * sigma * np.exp(-0.5 / (gamma**2)) counts = area * temp_a * (v1 / v2) return counts def elastic( x, coherent_sct_amplitude, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, e_offset, e_linear, e_quadratic, epsilon=2.96, ): """Model of elastic peak in X-Ray fluorescence Parameters ---------- x : array energy value coherent_sct_amplitude : float area of elastic peak coherent_sct_energy : float incident energy fwhm_offset : float global fitting parameter for peak width fwhm_fanoprime : float global fitting parameter for peak width e_offset : float offset of energy calibration e_linear : float linear coefficient in energy calibration e_quadratic : float quadratic coefficient in energy calibration epsilon : float energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value Returns ------- value : array elastic peak """ x = e_offset + x * e_linear + x**2 * e_quadratic temp_val = 2 * np.sqrt(2 * np.log(2)) sigma = np.sqrt((fwhm_offset / temp_val) ** 2 + coherent_sct_energy * epsilon * fwhm_fanoprime) return gaussian(x, coherent_sct_amplitude, coherent_sct_energy, sigma) def compton( x, compton_amplitude, coherent_sct_energy, fwhm_offset, fwhm_fanoprime, e_offset, e_linear, e_quadratic, compton_angle, compton_fwhm_corr, compton_f_step, compton_f_tail, compton_gamma, compton_hi_f_tail, compton_hi_gamma, epsilon=2.96, ): """ Model compton peak, which is generated as an inelastic peak and always stays to the left of elastic peak on the spectrum. Parameters ---------- x : array energy value compton_amplitude : float area for gaussian peak, gaussian step and gaussian tail functions coherent_sct_energy : float incident energy fwhm_offset : float global fitting parameter for peak width fwhm_fanoprime : float global fitting parameter for peak width e_offset : float offset of energy calibration e_linear : float linear coefficient in energy calibration e_quadratic : float quadratic coefficient in energy calibration compton_angle : float compton angle in degree compton_fwhm_corr : float correction factor on peak width compton_f_step : float weight factor of the gaussian step function compton_f_tail : float weight factor of gaussian tail on lower side compton_gamma : float normalization factor of gaussian tail on lower side compton_hi_f_tail : float weight factor of gaussian tail on higher side compton_hi_gamma : float normalization factor of gaussian tail on higher side epsilon : float energy to create a hole-electron pair for Ge 2.96, for Si 3.61 at 300K needs to double check this value Returns ------- counts : array compton peak References ---------- .. [1] M. Van Gysel etc, "Description of Compton peaks in energy-dispersive x-ray fluorescence spectra", X-Ray Spectrometry, vol. 32, pp. 139-147, 2003. """ x = e_offset + x * e_linear + x**2 * e_quadratic # the rest-mass energy of an electron (511 keV) mc2 = 511 comp_denom = 1 + coherent_sct_energy / mc2 * (1 - np.cos(np.deg2rad(compton_angle))) compton_e = coherent_sct_energy / comp_denom temp_val = 2 * np.sqrt(2 * np.log(2)) sigma = np.sqrt((fwhm_offset / temp_val) ** 2 + compton_e * epsilon * fwhm_fanoprime) counts = np.zeros_like(x) factor = 1 / (1 + compton_f_step + compton_f_tail + compton_hi_f_tail) value = factor * gaussian(x, compton_amplitude, compton_e, sigma * compton_fwhm_corr) counts += value # compton peak, step if compton_f_step > 0.0: value = factor * compton_f_step value *= gausssian_step(x, compton_amplitude, compton_e, sigma, compton_e) counts += value # compton peak, tail on the low side value = factor * compton_f_tail value *= gaussian_tail(x, compton_amplitude, compton_e, sigma, compton_gamma) counts += value # compton peak, tail on the high side value = factor * compton_hi_f_tail value *= gaussian_tail(-1 * x, compton_amplitude, -1 * compton_e, sigma, compton_hi_gamma) counts += value return counts def gamma_dist(bin_values, K, M): """Gamma distribution function Parameters ---------- bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) K : int mean count of photons M : int number of coherent modes Returns ------- gamma_dist : array Gamma distribution Notes ----- These implementations are based on the references under the ``Notes`` section of the ``nbinom_dist()`` docstring .. math:: P(K) = \\frac{\\Gamma(K + M)} {\\Gamma(K + 1)\\Gamma(M)} (\\frac {M} {M + <K>})^M (\\frac {<K>}{M + <K>})^K """ gamma_dist = (stats.gamma(M, 0.0, K / M)).pdf(bin_values) return gamma_dist def nbinom_dist(bin_values, K, M): """ Negative Binomial (Poisson-Gamma) distribution function Parameters ---------- bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) K : int mean count of photons M : int number of coherent modes Returns ------- nbinom : array Negative Binomial (Poisson-Gamma) distribution function Notes ----- The negative-binomial distribution function .. math:: P(K) =(\\frac{M}{<K>})^M \\frac{K^{M-1}} {\\Gamma(M)}\\exp(-M\\frac{K}{<K>}) Implementation reference [1]_ References ---------- .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini, C. Carona and A. Fluerasu , "Photon statistics and speckle visibility spectroscopy with partially coherent x-rays" J. Synchrotron Rad., vol 21, p 1288-1295, 2014. """ co_eff = np.exp(gammaln(bin_values + M) - gammaln(bin_values + 1) - gammaln(M)) nbinom = co_eff * np.power(M / (K + M), M) * np.power(K / (M + K), bin_values) return nbinom def poisson_dist(bin_values, K): """ Poisson Distribution Parameters ---------- K : int mean count of photons bin_values : array bin values for detecting photons eg : max photon counts is 8 bin_values = np.arange(8+2) Returns ------- poisson_dist : array Poisson Distribution Notes ----- These implementations are based on the references under the ``Notes`` section of the ``nbinom_dist()`` docstring .. math:: P(K) = \\frac{<K>^K}{K!}\\exp(-<K>) """ poisson_dist = np.exp(-K) * np.power(K, bin_values) / gamma(bin_values + 1) return poisson_dist
0.973582
0.637271
from __future__ import absolute_import, division, print_function import inspect import logging from lmfit import Model from .base.parameter_data import get_para from .lineshapes import compton, elastic, lorentzian2 logger = logging.getLogger(__name__) def set_default(model_name, func_name): """ Set values and bounds to Model parameters in lmfit. Parameters ---------- model_name : class object Model class object from lmfit func_name : function function name of physics peak """ paras = inspect.getargspec(func_name) # the first argument is independent variable, also ignored # default values are not considered for fitting in this function my_args = paras.args[1:] para_dict = get_para() for name in my_args: if name not in para_dict.keys(): continue my_dict = para_dict[name] if my_dict["bound_type"] == "none": model_name.set_param_hint(name, vary=True) elif my_dict["bound_type"] == "fixed": model_name.set_param_hint(name, vary=False, value=my_dict["value"]) elif my_dict["bound_type"] == "lo": model_name.set_param_hint(name, value=my_dict["value"], vary=True, min=my_dict["min"]) elif my_dict["bound_type"] == "hi": model_name.set_param_hint(name, value=my_dict["value"], vary=True, max=my_dict["max"]) elif my_dict["bound_type"] == "lohi": model_name.set_param_hint( name, value=my_dict["value"], vary=True, min=my_dict["min"], max=my_dict["max"] ) else: raise TypeError("Boundary type {0} can't be " "used".format(my_dict["bound_type"])) def _gen_class_docs(func): """ Parameters ---------- func : function function of peak profile Returns ------- str : documentation of the function """ return ( " Wrap the {} function for fitting within lmfit " "framework\n ".format(func.__name__) + func.__doc__ ) # DEFINE NEW MODELS class ElasticModel(Model): __doc__ = _gen_class_docs(elastic) def __init__(self, *args, **kwargs): super(ElasticModel, self).__init__(elastic, *args, **kwargs) self.set_param_hint("epsilon", value=2.96, vary=False) class ComptonModel(Model): __doc__ = _gen_class_docs(compton) def __init__(self, *args, **kwargs): super(ComptonModel, self).__init__(compton, *args, **kwargs) self.set_param_hint("epsilon", value=2.96, vary=False) class Lorentzian2Model(Model): __doc__ = _gen_class_docs(lorentzian2) def __init__(self, *args, **kwargs): super(Lorentzian2Model, self).__init__(lorentzian2, *args, **kwargs)
scikit-beam
/scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/fitting/models.py
models.py
from __future__ import absolute_import, division, print_function import inspect import logging from lmfit import Model from .base.parameter_data import get_para from .lineshapes import compton, elastic, lorentzian2 logger = logging.getLogger(__name__) def set_default(model_name, func_name): """ Set values and bounds to Model parameters in lmfit. Parameters ---------- model_name : class object Model class object from lmfit func_name : function function name of physics peak """ paras = inspect.getargspec(func_name) # the first argument is independent variable, also ignored # default values are not considered for fitting in this function my_args = paras.args[1:] para_dict = get_para() for name in my_args: if name not in para_dict.keys(): continue my_dict = para_dict[name] if my_dict["bound_type"] == "none": model_name.set_param_hint(name, vary=True) elif my_dict["bound_type"] == "fixed": model_name.set_param_hint(name, vary=False, value=my_dict["value"]) elif my_dict["bound_type"] == "lo": model_name.set_param_hint(name, value=my_dict["value"], vary=True, min=my_dict["min"]) elif my_dict["bound_type"] == "hi": model_name.set_param_hint(name, value=my_dict["value"], vary=True, max=my_dict["max"]) elif my_dict["bound_type"] == "lohi": model_name.set_param_hint( name, value=my_dict["value"], vary=True, min=my_dict["min"], max=my_dict["max"] ) else: raise TypeError("Boundary type {0} can't be " "used".format(my_dict["bound_type"])) def _gen_class_docs(func): """ Parameters ---------- func : function function of peak profile Returns ------- str : documentation of the function """ return ( " Wrap the {} function for fitting within lmfit " "framework\n ".format(func.__name__) + func.__doc__ ) # DEFINE NEW MODELS class ElasticModel(Model): __doc__ = _gen_class_docs(elastic) def __init__(self, *args, **kwargs): super(ElasticModel, self).__init__(elastic, *args, **kwargs) self.set_param_hint("epsilon", value=2.96, vary=False) class ComptonModel(Model): __doc__ = _gen_class_docs(compton) def __init__(self, *args, **kwargs): super(ComptonModel, self).__init__(compton, *args, **kwargs) self.set_param_hint("epsilon", value=2.96, vary=False) class Lorentzian2Model(Model): __doc__ = _gen_class_docs(lorentzian2) def __init__(self, *args, **kwargs): super(Lorentzian2Model, self).__init__(lorentzian2, *args, **kwargs)
0.821975
0.173183