desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Predict the class labels for the provided data Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' Test samples. Returns y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample.'
def predict(self, X):
X = check_array(X, accept_sparse='csr') (neigh_dist, neigh_ind) = self.kneighbors(X) classes_ = self.classes_ _y = self._y if (not self.outputs_2d_): _y = self._y.reshape(((-1), 1)) classes_ = [self.classes_] n_outputs = len(classes_) n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for (k, classes_k) in enumerate(classes_): if (weights is None): (mode, _) = stats.mode(_y[(neigh_ind, k)], axis=1) else: (mode, _) = weighted_mode(_y[(neigh_ind, k)], weights, axis=1) mode = np.asarray(mode.ravel(), dtype=np.intp) y_pred[:, k] = classes_k.take(mode) if (not self.outputs_2d_): y_pred = y_pred.ravel() return y_pred
'Return probability estimates for the test data X. Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' Test samples. Returns p : array of shape = [n_samples, n_classes], or a list of n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order.'
def predict_proba(self, X):
X = check_array(X, accept_sparse='csr') (neigh_dist, neigh_ind) = self.kneighbors(X) classes_ = self.classes_ _y = self._y if (not self.outputs_2d_): _y = self._y.reshape(((-1), 1)) classes_ = [self.classes_] n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) if (weights is None): weights = np.ones_like(neigh_ind) all_rows = np.arange(X.shape[0]) probabilities = [] for (k, classes_k) in enumerate(classes_): pred_labels = _y[:, k][neigh_ind] proba_k = np.zeros((n_samples, classes_k.size)) for (i, idx) in enumerate(pred_labels.T): proba_k[(all_rows, idx)] += weights[:, i] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[(normalizer == 0.0)] = 1.0 proba_k /= normalizer probabilities.append(proba_k) if (not self.outputs_2d_): probabilities = probabilities[0] return probabilities
'Predict the class labels for the provided data Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' Test samples. Returns y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample.'
def predict(self, X):
X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] (neigh_dist, neigh_ind) = self.radius_neighbors(X) inliers = [i for (i, nind) in enumerate(neigh_ind) if (len(nind) != 0)] outliers = [i for (i, nind) in enumerate(neigh_ind) if (len(nind) == 0)] classes_ = self.classes_ _y = self._y if (not self.outputs_2d_): _y = self._y.reshape(((-1), 1)) classes_ = [self.classes_] n_outputs = len(classes_) if (self.outlier_label is not None): neigh_dist[outliers] = 1e-06 elif outliers: raise ValueError(('No neighbors found for test samples %r, you can try using larger radius, give a label for outliers, or consider removing them from your dataset.' % outliers)) weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for (k, classes_k) in enumerate(classes_): pred_labels = np.zeros(len(neigh_ind), dtype=object) pred_labels[:] = [_y[(ind, k)] for ind in neigh_ind] if (weights is None): mode = np.array([stats.mode(pl)[0] for pl in pred_labels[inliers]], dtype=np.int) else: mode = np.array([weighted_mode(pl, w)[0] for (pl, w) in zip(pred_labels[inliers], weights[inliers])], dtype=np.int) mode = mode.ravel() y_pred[(inliers, k)] = classes_k.take(mode) if outliers: y_pred[outliers, :] = self.outlier_label if (not self.outputs_2d_): y_pred = y_pred.ravel() return y_pred
'"Fits the model to the training set X and returns the labels (1 inlier, -1 outlier) on the training set according to the LOF score and the contamination parameter. Parameters X : array-like, shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. to the training samples. Returns is_inlier : array, shape (n_samples,) Returns -1 for anomalies/outliers and 1 for inliers.'
def fit_predict(self, X, y=None):
return self.fit(X)._predict()
'Fit the model using X as training data. Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'. Returns self : object Returns self.'
def fit(self, X, y=None):
if (not (0.0 < self.contamination <= 0.5)): raise ValueError('contamination must be in (0, 0.5]') super(LocalOutlierFactor, self).fit(X) n_samples = self._fit_X.shape[0] if (self.n_neighbors > n_samples): warn(('n_neighbors (%s) is greater than the total number of samples (%s). n_neighbors will be set to (n_samples - 1) for estimation.' % (self.n_neighbors, n_samples))) self.n_neighbors_ = max(1, min(self.n_neighbors, (n_samples - 1))) (self._distances_fit_X_, _neighbors_indices_fit_X_) = self.kneighbors(None, n_neighbors=self.n_neighbors_) self._lrd = self._local_reachability_density(self._distances_fit_X_, _neighbors_indices_fit_X_) lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]) self.negative_outlier_factor_ = (- np.mean(lrd_ratios_array, axis=1)) self.threshold_ = (- scoreatpercentile((- self.negative_outlier_factor_), (100.0 * (1.0 - self.contamination)))) return self
'Predict the labels (1 inlier, -1 outlier) of X according to LOF. If X is None, returns the same as fit_predict(X_train). This method allows to generalize prediction to new observations (not in the training set). As LOF originally does not deal with new data, this method is kept private. Parameters X : array-like, shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. to the training samples. If None, makes prediction on the training data without considering them as their own neighbors. Returns is_inlier : array, shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.'
def _predict(self, X=None):
check_is_fitted(self, ['threshold_', 'negative_outlier_factor_', 'n_neighbors_', '_distances_fit_X_']) if (X is not None): X = check_array(X, accept_sparse='csr') is_inlier = np.ones(X.shape[0], dtype=int) is_inlier[(self._decision_function(X) <= self.threshold_)] = (-1) else: is_inlier = np.ones(self._fit_X.shape[0], dtype=int) is_inlier[(self.negative_outlier_factor_ <= self.threshold_)] = (-1) return is_inlier
'Opposite of the Local Outlier Factor of X (as bigger is better, i.e. large values correspond to inliers). The argument X is supposed to contain *new data*: if X contains a point from training, it consider the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. The decision function on training data is available by considering the opposite of the negative_outlier_factor_ attribute. Parameters X : array-like, shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns opposite_lof_scores : array, shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal.'
def _decision_function(self, X):
check_is_fitted(self, ['threshold_', 'negative_outlier_factor_', '_distances_fit_X_']) X = check_array(X, accept_sparse='csr') (distances_X, neighbors_indices_X) = self.kneighbors(X, n_neighbors=self.n_neighbors_) X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X) lrd_ratios_array = (self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]) return (- np.mean(lrd_ratios_array, axis=1))
'The local reachability density (LRD) The LRD of a sample is the inverse of the average reachability distance of its k-nearest neighbors. Parameters distances_X : array, shape (n_query, self.n_neighbors) Distances to the neighbors (in the training samples `self._fit_X`) of each query point to compute the LRD. neighbors_indices : array, shape (n_query, self.n_neighbors) Neighbors indices (of each query point) among training samples self._fit_X. Returns local_reachability_density : array, shape (n_samples,) The local reachability density of each sample.'
def _local_reachability_density(self, distances_X, neighbors_indices):
dist_k = self._distances_fit_X_[(neighbors_indices, (self.n_neighbors_ - 1))] reach_dist_array = np.maximum(distances_X, dist_k) return (1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10))
'Fit the NearestCentroid model according to the given training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array, shape = [n_samples] Target values (integers)'
def fit(self, X, y):
if (self.metric == 'precomputed'): raise ValueError('Precomputed is not supported.') if (self.metric == 'manhattan'): (X, y) = check_X_y(X, y, ['csc']) else: (X, y) = check_X_y(X, y, ['csr', 'csc']) is_X_sparse = sp.issparse(X) if (is_X_sparse and self.shrink_threshold): raise ValueError('threshold shrinking not supported for sparse input') check_classification_targets(y) (n_samples, n_features) = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if (n_classes < 2): raise ValueError('y has less than 2 classes') self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = (y_ind == cur_class) nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] if (self.metric == 'manhattan'): if (not is_X_sparse): self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: if (self.metric != 'euclidean'): warnings.warn('Averaging for metrics other than euclidean and manhattan not supported. The average is set to be the mean.') self.centroids_[cur_class] = X[center_mask].mean(axis=0) if self.shrink_threshold: dataset_centroid_ = np.mean(X, axis=0) m = np.sqrt(((1.0 / nk) - (1.0 / n_samples))) variance = ((X - self.centroids_[y_ind]) ** 2) variance = variance.sum(axis=0) s = np.sqrt((variance / (n_samples - n_classes))) s += np.median(s) mm = m.reshape(len(m), 1) ms = (mm * s) deviation = ((self.centroids_ - dataset_centroid_) / ms) signs = np.sign(deviation) deviation = (np.abs(deviation) - self.shrink_threshold) deviation[(deviation < 0)] = 0 deviation *= signs msd = (ms * deviation) self.centroids_ = (dataset_centroid_[np.newaxis, :] + msd) return self
'Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array, shape = [n_samples] Notes If the metric constructor parameter is "precomputed", X is assumed to be the distance matrix between the data to be predicted and ``self.centroids_``.'
def predict(self, X):
check_is_fitted(self, 'centroids_') X = check_array(X, accept_sparse='csr') return self.classes_[pairwise_distances(X, self.centroids_, metric=self.metric).argmin(axis=1)]
'Predict the target for the provided data Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' Test samples. Returns y : array of int, shape = [n_samples] or [n_samples, n_outputs] Target values'
def predict(self, X):
X = check_array(X, accept_sparse='csr') (neigh_dist, neigh_ind) = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if (_y.ndim == 1): _y = _y.reshape(((-1), 1)) if (weights is None): y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum((_y[(neigh_ind, j)] * weights), axis=1) y_pred[:, j] = (num / denom) if (self._y.ndim == 1): y_pred = y_pred.ravel() return y_pred
'Predict the target for the provided data Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' Test samples. Returns y : array of int, shape = [n_samples] or [n_samples, n_outputs] Target values'
def predict(self, X):
X = check_array(X, accept_sparse='csr') (neigh_dist, neigh_ind) = self.radius_neighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if (_y.ndim == 1): _y = _y.reshape(((-1), 1)) if (weights is None): y_pred = np.array([np.mean(_y[ind, :], axis=0) for ind in neigh_ind]) else: y_pred = np.array([np.average(_y[ind, :], axis=0, weights=weights[i]) for (i, ind) in enumerate(neigh_ind)]) if (self._y.ndim == 1): y_pred = y_pred.ravel() return y_pred
'Perform classification on an array of test vectors X. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array, shape = [n_samples] Predicted target values for X'
def predict(self, X):
jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)]
'Return log-probability estimates for the test vector X. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.'
def predict_log_proba(self, X):
jll = self._joint_log_likelihood(X) log_prob_x = logsumexp(jll, axis=1) return (jll - np.atleast_2d(log_prob_x).T)
'Return probability estimates for the test vector X. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array-like, shape = [n_samples, n_classes] Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.'
def predict_proba(self, X):
return np.exp(self.predict_log_proba(X))
'Fit Gaussian Naive Bayes according to X, y Parameters X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
(X, y) = check_X_y(X, y) return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight)
'Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like, shape (number of Gaussians,) Means for Gaussians in original set. var : array-like, shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). Returns total_mu : array-like, shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like, shape (number of Gaussians,) Updated variance for each Gaussian over the combined set.'
@staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
if (X.shape[0] == 0): return (mu, var) if (sample_weight is not None): n_new = float(sample_weight.sum()) new_mu = np.average(X, axis=0, weights=(sample_weight / n_new)) new_var = np.average(((X - new_mu) ** 2), axis=0, weights=(sample_weight / n_new)) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if (n_past == 0): return (new_mu, new_var) n_total = float((n_past + n_new)) total_mu = (((n_new * new_mu) + (n_past * mu)) / n_total) old_ssd = (n_past * var) new_ssd = (n_new * new_var) total_ssd = ((old_ssd + new_ssd) + ((n_past / float((n_new * n_total))) * (((n_new * mu) - (n_new * new_mu)) ** 2))) total_var = (total_ssd / n_total) return (total_mu, total_var)
'Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,), optional (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Returns self : object Returns self.'
def partial_fit(self, X, y, classes=None, sample_weight=None):
return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight)
'Actual implementation of Gaussian NB fitting. Parameters X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,), optional (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit: bool, optional (default=False) If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). Returns self : object Returns self.'
def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
(X, y) = check_X_y(X, y) if (sample_weight is not None): sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) epsilon = (1e-09 * np.var(X, axis=0).max()) if _refit: self.classes_ = None if _check_partial_fit_first_call(self, classes): n_features = X.shape[1] n_classes = len(self.classes_) self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_count_ = np.zeros(n_classes, dtype=np.float64) n_classes = len(self.classes_) if (self.priors is not None): priors = np.asarray(self.priors) if (len(priors) != n_classes): raise ValueError('Number of priors must match number of classes.') if (priors.sum() != 1.0): raise ValueError('The sum of the priors should be 1.') if (priors < 0).any(): raise ValueError('Priors must be non-negative.') self.class_prior_ = priors else: self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64) else: if (X.shape[1] != self.theta_.shape[1]): msg = 'Number of features %d does not match previous data %d.' raise ValueError((msg % (X.shape[1], self.theta_.shape[1]))) self.sigma_[:, :] -= epsilon classes = self.classes_ unique_y = np.unique(y) unique_y_in_classes = np.in1d(unique_y, classes) if (not np.all(unique_y_in_classes)): raise ValueError(('The target label(s) %s in y do not exist in the initial classes %s' % (unique_y[(~ unique_y_in_classes)], classes))) for y_i in unique_y: i = classes.searchsorted(y_i) X_i = X[(y == y_i), :] if (sample_weight is not None): sw_i = sample_weight[(y == y_i)] N_i = sw_i.sum() else: sw_i = None N_i = X_i.shape[0] (new_theta, new_sigma) = self._update_mean_variance(self.class_count_[i], self.theta_[i, :], self.sigma_[i, :], X_i, sw_i) self.theta_[i, :] = new_theta self.sigma_[i, :] = new_sigma self.class_count_[i] += N_i self.sigma_[:, :] += epsilon if (self.priors is None): self.class_prior_ = (self.class_count_ / self.class_count_.sum()) return self
'Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. classes : array-like, shape = [n_classes] (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape = [n_samples] (default=None) Weights applied to individual samples (1. for unweighted). Returns self : object Returns self.'
def partial_fit(self, X, y, classes=None, sample_weight=None):
X = check_array(X, accept_sparse='csr', dtype=np.float64) (_, n_features) = X.shape if _check_partial_fit_first_call(self, classes): n_effective_classes = (len(classes) if (len(classes) > 1) else 2) self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) elif (n_features != self.coef_.shape[1]): msg = 'Number of features %d does not match previous data %d.' raise ValueError((msg % (n_features, self.coef_.shape[(-1)]))) Y = label_binarize(y, classes=self.classes_) if (Y.shape[1] == 1): Y = np.concatenate(((1 - Y), Y), axis=1) (n_samples, n_classes) = Y.shape if (X.shape[0] != Y.shape[0]): msg = 'X.shape[0]=%d and y.shape[0]=%d are incompatible.' raise ValueError((msg % (X.shape[0], y.shape[0]))) Y = Y.astype(np.float64) if (sample_weight is not None): sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior self._count(X, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self
'Fit Naive Bayes classifier according to X, y Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
(X, y) = check_X_y(X, y, 'csr') (_, n_features) = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if (Y.shape[1] == 1): Y = np.concatenate(((1 - Y), Y), axis=1) Y = Y.astype(np.float64) if (sample_weight is not None): sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self
'Count and smooth feature occurrences.'
def _count(self, X, Y):
if np.any(((X.data if issparse(X) else X) < 0)): raise ValueError('Input X must be non-negative') self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
'Apply smoothing to raw counts and recompute log probabilities'
def _update_feature_log_prob(self, alpha):
smoothed_fc = (self.feature_count_ + alpha) smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape((-1), 1)))
'Calculate the posterior log probability of the samples X'
def _joint_log_likelihood(self, X):
check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_)
'Count and smooth feature occurrences.'
def _count(self, X, Y):
if (self.binarize is not None): X = binarize(X, threshold=self.binarize) self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
'Apply smoothing to raw counts and recompute log probabilities'
def _update_feature_log_prob(self, alpha):
smoothed_fc = (self.feature_count_ + alpha) smoothed_cc = (self.class_count_ + (alpha * 2)) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape((-1), 1)))
'Calculate the posterior log probability of the samples X'
def _joint_log_likelihood(self, X):
check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse='csr') if (self.binarize is not None): X = binarize(X, threshold=self.binarize) (n_classes, n_features) = self.feature_log_prob_.shape (n_samples, n_features_X) = X.shape if (n_features_X != n_features): raise ValueError(('Expected input with %d features, got %d instead' % (n_features, n_features_X))) neg_prob = np.log((1 - np.exp(self.feature_log_prob_))) jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) jll += (self.class_log_prior_ + neg_prob.sum(axis=1)) return jll
'Validate X whenever one tries to predict, apply, predict_proba'
def _validate_X_predict(self, X, check_input):
if check_input: X = check_array(X, dtype=DTYPE, accept_sparse='csr') if (issparse(X) and ((X.indices.dtype != np.intc) or (X.indptr.dtype != np.intc))): raise ValueError('No support for np.int64 index based sparse matrices') n_features = X.shape[1] if (self.n_features_ != n_features): raise ValueError(('Number of features of the model must match the input. Model n_features is %s and input n_features is %s ' % (self.n_features_, n_features))) return X
'Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. Returns y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted classes, or the predict values.'
def predict(self, X, check_input=True):
check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) n_samples = X.shape[0] if is_classifier(self): if (self.n_outputs_ == 1): return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: predictions = np.zeros((n_samples, self.n_outputs_)) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take(np.argmax(proba[:, k], axis=1), axis=0) return predictions elif (self.n_outputs_ == 1): return proba[:, 0] else: return proba[:, :, 0]
'Returns the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. Returns X_leaves : array_like, shape = [n_samples,] For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering.'
def apply(self, X, check_input=True):
check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) return self.tree_.apply(X)
'Return the decision path in the tree .. versionadded:: 0.18 Parameters X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. Returns indicator : sparse csr array, shape = [n_samples, n_nodes] Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes.'
def decision_path(self, X, check_input=True):
X = self._validate_X_predict(X, check_input) return self.tree_.decision_path(X)
'Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Returns feature_importances_ : array, shape = [n_features]'
@property def feature_importances_(self):
check_is_fitted(self, 'tree_') return self.tree_.compute_feature_importances()
'Build a decision tree classifier from the training set (X, y). Parameters X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. X_idx_sorted : array-like, shape = [n_samples, n_features], optional The indexes of the sorted training input samples. If many tree are grown on the same dataset, this allows the ordering to be cached between trees. If None, the data will be sorted here. Don\'t use this parameter unless you know what to do. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):
super(DecisionTreeClassifier, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted) return self
'Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. Parameters X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool Run check_array on X. Returns p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`.'
def predict_proba(self, X, check_input=True):
check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) if (self.n_outputs_ == 1): proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[(normalizer == 0.0)] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[(normalizer == 0.0)] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba
'Predict class log-probabilities of the input samples X. Parameters X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`.'
def predict_log_proba(self, X):
proba = self.predict_proba(X) if (self.n_outputs_ == 1): return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba
'Build a decision tree regressor from the training set (X, y). Parameters X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (real numbers). Use ``dtype=np.float64`` and ``order=\'C\'`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this parameter unless you know what you do. X_idx_sorted : array-like, shape = [n_samples, n_features], optional The indexes of the sorted training input samples. If many tree are grown on the same dataset, this allows the ordering to be cached between trees. If None, the data will be sorted here. Don\'t use this parameter unless you know what to do. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):
super(DecisionTreeRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted) return self
'Fit the random classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) if (self.strategy not in ('most_frequent', 'stratified', 'uniform', 'constant', 'prior')): raise ValueError('Unknown strategy type.') if ((self.strategy == 'uniform') and sp.issparse(y)): y = y.toarray() warnings.warn('A local copy of the target data has been converted to a numpy array. Predicting on sparse target data with the uniform strategy would not save memory and would be slower.', UserWarning) self.sparse_output_ = sp.issparse(y) if (not self.sparse_output_): y = np.atleast_1d(y) self.output_2d_ = (y.ndim == 2) if (y.ndim == 1): y = np.reshape(y, ((-1), 1)) self.n_outputs_ = y.shape[1] if (self.strategy == 'constant'): if (self.constant is None): raise ValueError('Constant target value has to be specified when the constant strategy is used.') else: constant = np.reshape(np.atleast_1d(self.constant), ((-1), 1)) if (constant.shape[0] != self.n_outputs_): raise ValueError(('Constant target value should have shape (%d, 1).' % self.n_outputs_)) (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(y, sample_weight) if ((self.strategy == 'constant') and any(((constant[k] not in self.classes_[k]) for k in range(self.n_outputs_)))): raise ValueError('The constant target value must be present in training data') if ((self.n_outputs_ == 1) and (not self.output_2d_)): self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self.class_prior_ = self.class_prior_[0] return self
'Perform classification on test vectors X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X.'
def predict(self, X):
check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if (self.n_outputs_ == 1): n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] if (self.strategy == 'stratified'): proba = self.predict_proba(X) if (self.n_outputs_ == 1): proba = [proba] if self.sparse_output_: class_prob = None if (self.strategy in ('most_frequent', 'prior')): classes_ = [np.array([cp.argmax()]) for cp in class_prior_] elif (self.strategy == 'stratified'): class_prob = class_prior_ elif (self.strategy == 'uniform'): raise ValueError('Sparse target prediction is not supported with the uniform strategy') elif (self.strategy == 'constant'): classes_ = [np.array([c]) for c in constant] y = random_choice_csc(n_samples, classes_, class_prob, self.random_state) else: if (self.strategy in ('most_frequent', 'prior')): y = np.tile([classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_)], [n_samples, 1]) elif (self.strategy == 'stratified'): y = np.vstack((classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_))).T elif (self.strategy == 'uniform'): ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_)] y = np.vstack(ret).T elif (self.strategy == 'constant'): y = np.tile(self.constant, (n_samples, 1)) if ((self.n_outputs_ == 1) and (not self.output_2d_)): y = np.ravel(y) return y
'Return probability estimates for the test vectors X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns P : array-like or list of array-lke of shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered arithmetically, for each output.'
def predict_proba(self, X):
check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if ((self.n_outputs_ == 1) and (not self.output_2d_)): n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if (self.strategy == 'most_frequent'): ind = class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif (self.strategy == 'prior'): out = (np.ones((n_samples, 1)) * class_prior_[k]) elif (self.strategy == 'stratified'): out = rs.multinomial(1, class_prior_[k], size=n_samples) elif (self.strategy == 'uniform'): out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif (self.strategy == 'constant'): ind = np.where((classes_[k] == constant[k])) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if ((self.n_outputs_ == 1) and (not self.output_2d_)): P = P[0] return P
'Return log probability estimates for the test vectors X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns P : array-like or list of array-like of shape = [n_samples, n_classes] Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output.'
def predict_log_proba(self, X):
proba = self.predict_proba(X) if (self.n_outputs_ == 1): return np.log(proba) else: return [np.log(p) for p in proba]
'Fit the random regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) if (self.strategy not in ('mean', 'median', 'quantile', 'constant')): raise ValueError(("Unknown strategy type: %s, expected 'mean', 'median', 'quantile' or 'constant'" % self.strategy)) y = check_array(y, ensure_2d=False) if (len(y) == 0): raise ValueError('y must not be empty.') self.output_2d_ = (y.ndim == 2) if (y.ndim == 1): y = np.reshape(y, ((-1), 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y, sample_weight) if (self.strategy == 'mean'): self.constant_ = np.average(y, axis=0, weights=sample_weight) elif (self.strategy == 'median'): if (sample_weight is None): self.constant_ = np.median(y, axis=0) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=50.0) for k in range(self.n_outputs_)] elif (self.strategy == 'quantile'): if ((self.quantile is None) or (not np.isscalar(self.quantile))): raise ValueError(('Quantile must be a scalar in the range [0.0, 1.0], but got %s.' % self.quantile)) percentile = (self.quantile * 100.0) if (sample_weight is None): self.constant_ = np.percentile(y, axis=0, q=percentile) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=percentile) for k in range(self.n_outputs_)] elif (self.strategy == 'constant'): if (self.constant is None): raise TypeError('Constant target value has to be specified when the constant strategy is used.') self.constant = check_array(self.constant, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False, ensure_min_samples=0) if (self.output_2d_ and (self.constant.shape[0] != y.shape[1])): raise ValueError(('Constant target value should have shape (%d, 1).' % y.shape[1])) self.constant_ = self.constant self.constant_ = np.reshape(self.constant_, (1, (-1))) return self
'Perform classification on test vectors X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X.'
def predict(self, X):
check_is_fitted(self, 'constant_') X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) n_samples = X.shape[0] y = (np.ones((n_samples, 1)) * self.constant_) if ((self.n_outputs_ == 1) and (not self.output_2d_)): y = np.ravel(y) return y
'The Gaussian Process model fitting method. Parameters X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) or shape (n_samples, n_targets) with the observations of the output to be predicted. Returns gp : self A fitted Gaussian Process model object awaiting data to perform predictions.'
def fit(self, X, y):
self._check_params() self.random_state = check_random_state(self.random_state) (X, y) = check_X_y(X, y, multi_output=True, y_numeric=True) self.y_ndim_ = y.ndim if (y.ndim == 1): y = y[:, np.newaxis] (n_samples, n_features) = X.shape (_, n_targets) = y.shape self._check_params(n_samples) if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[(X_std == 0.0)] = 1.0 y_std[(y_std == 0.0)] = 1.0 X = ((X - X_mean) / X_std) y = ((y - y_mean) / y_std) else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) (D, ij) = l1_cross_distances(X) if ((np.min(np.sum(D, axis=1)) == 0.0) and (self.corr != correlation.pure_nugget)): raise Exception('Multiple input features cannot have the same target value.') F = self.regr(X) n_samples_F = F.shape[0] if (F.ndim > 1): p = F.shape[1] else: p = 1 if (n_samples_F != n_samples): raise Exception('Number of rows in F and X do not match. Most likely something is going wrong with the regression model.') if (p > n_samples_F): raise Exception(('Ordinary least squares problem is undetermined n_samples=%d must be greater than the regression model size p=%d.' % (n_samples, p))) if (self.beta0 is not None): if (self.beta0.shape[0] != p): raise Exception('Shapes of beta0 and F do not match.') self.X = X self.y = y self.D = D self.ij = ij self.F = F (self.X_mean, self.X_std) = (X_mean, X_std) (self.y_mean, self.y_std) = (y_mean, y_std) if ((self.thetaL is not None) and (self.thetaU is not None)): if self.verbose: print('Performing Maximum Likelihood Estimation of the autocorrelation parameters...') (self.theta_, self.reduced_likelihood_function_value_, par) = self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception('Bad parameter region. Try increasing upper bound') else: if self.verbose: print('Given autocorrelation parameters. Computing Gaussian Process model parameters...') self.theta_ = self.theta0 (self.reduced_likelihood_function_value_, par) = self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception('Bad point. Try increasing theta0.') self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if (self.storage_mode == 'light'): if self.verbose: print('Light storage mode specified. Flushing autocorrelation matrix...') self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self
'This function evaluates the Gaussian Process model at x. Parameters X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simultaneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns y : array_like, shape (n_samples, ) or (n_samples, n_targets) An array with shape (n_eval, ) if the Gaussian Process was trained on an array of shape (n_samples, ) or an array with shape (n_eval, n_targets) if the Gaussian Process was trained on an array of shape (n_samples, n_targets) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) or (n_eval, n_targets) as with y, with the Mean Squared Error at x.'
def predict(self, X, eval_MSE=False, batch_size=None):
check_is_fitted(self, 'X') X = check_array(X) (n_eval, _) = X.shape (n_samples, n_features) = self.X.shape (n_samples_y, n_targets) = self.y.shape self._check_params(n_samples) if (X.shape[1] != n_features): raise ValueError(('The number of features in X (X.shape[1] = %d) should match the number of features used for fit() which is %d.' % (X.shape[1], n_features))) if (batch_size is None): X = ((X - self.X_mean) / self.X_std) y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) dx = manhattan_distances(X, Y=self.X, sum_over_features=False) f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) y_ = (np.dot(f, self.beta) + np.dot(r, self.gamma)) y = (self.y_mean + (self.y_std * y_)).reshape(n_eval, n_targets) if (self.y_ndim_ == 1): y = y.ravel() if eval_MSE: C = self.C if (C is None): if self.verbose: print("This GaussianProcess used 'light' storage mode at instantiation. Need to recompute autocorrelation matrix...") (reduced_likelihood_function_value, par) = self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = linalg.solve_triangular(self.C, r.T, lower=True) if (self.beta0 is None): u = linalg.solve_triangular(self.G.T, (np.dot(self.Ft.T, rt) - f.T), lower=True) else: u = np.zeros((n_targets, n_eval)) MSE = np.dot(self.sigma2.reshape(n_targets, 1), ((1.0 - (rt ** 2.0).sum(axis=0)) + (u ** 2.0).sum(axis=0))[np.newaxis, :]) MSE = np.sqrt(((MSE ** 2.0).sum(axis=0) / n_targets)) MSE[(MSE < 0.0)] = 0.0 if (self.y_ndim_ == 1): MSE = MSE.ravel() return (y, MSE) else: return y else: if ((type(batch_size) is not int) or (batch_size <= 0)): raise Exception('batch_size must be a positive integer') if eval_MSE: (y, MSE) = (np.zeros(n_eval), np.zeros(n_eval)) for k in range(max(1, int((n_eval / batch_size)))): batch_from = (k * batch_size) batch_to = min([(((k + 1) * batch_size) + 1), (n_eval + 1)]) (y[batch_from:batch_to], MSE[batch_from:batch_to]) = self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return (y, MSE) else: y = np.zeros(n_eval) for k in range(max(1, int((n_eval / batch_size)))): batch_from = (k * batch_size) batch_to = min([(((k + 1) * batch_size) + 1), (n_eval + 1)]) y[batch_from:batch_to] = self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y
'This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: - ``sigma2`` is the Gaussian Process variance. - ``beta`` is the generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. - ``gamma`` is the Gaussian Process weights. - ``C`` is the Cholesky decomposition of the correlation matrix [R]. - ``Ft`` is the solution of the linear equation system [R] x Ft = F - ``G`` is the QR decomposition of the matrix Ft.'
def reduced_likelihood_function(self, theta=None):
check_is_fitted(self, 'X') if (theta is None): theta = self.theta_ reduced_likelihood_function_value = (- np.inf) par = {} n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if (D is None): (D, ij) = l1_cross_distances(self.X) if ((np.min(np.sum(D, axis=1)) == 0.0) and (self.corr != correlation.pure_nugget)): raise Exception('Multiple X are not allowed') F = self.regr(self.X) r = self.corr(theta, D) R = (np.eye(n_samples) * (1.0 + self.nugget)) R[(ij[:, 0], ij[:, 1])] = r R[(ij[:, 1], ij[:, 0])] = r try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return (reduced_likelihood_function_value, par) Ft = linalg.solve_triangular(C, F, lower=True) (Q, G) = linalg.qr(Ft, mode='economic') sv = linalg.svd(G, compute_uv=False) rcondG = (sv[(-1)] / sv[0]) if (rcondG < 1e-10): sv = linalg.svd(F, compute_uv=False) condF = (sv[0] / sv[(-1)]) if (condF > 1000000000000000.0): raise Exception('F is too ill conditioned. Poor combination of regression model and observations.') else: return (reduced_likelihood_function_value, par) Yt = linalg.solve_triangular(C, self.y, lower=True) if (self.beta0 is None): beta = linalg.solve_triangular(G, np.dot(Q.T, Yt)) else: beta = np.array(self.beta0) rho = (Yt - np.dot(Ft, beta)) sigma2 = ((rho ** 2.0).sum(axis=0) / n_samples) detR = (np.diag(C) ** (2.0 / n_samples)).prod() reduced_likelihood_function_value = ((- sigma2.sum()) * detR) par['sigma2'] = (sigma2 * (self.y_std ** 2.0)) par['beta'] = beta par['gamma'] = linalg.solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return (reduced_likelihood_function_value, par)
'This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters self : All parameters are stored in the Gaussian Process model object. Returns optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt.'
def _arg_max_reduced_likelihood_function(self):
best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print(('The chosen optimizer is: ' + str(self.optimizer))) if (self.random_start > 1): print((str(self.random_start) + ' random starts are required.')) percent_completed = 0.0 if ((self.optimizer == 'Welch') and (self.theta0.size == 1)): self.optimizer = 'fmin_cobyla' if (self.optimizer == 'fmin_cobyla'): def minus_reduced_likelihood_function(log10t): return (- self.reduced_likelihood_function(theta=(10.0 ** log10t))[0]) constraints = [] for i in range(self.theta0.size): constraints.append((lambda log10t, i=i: (log10t[i] - np.log10(self.thetaL[(0, i)])))) constraints.append((lambda log10t, i=i: (np.log10(self.thetaU[(0, i)]) - log10t[i]))) for k in range(self.random_start): if (k == 0): theta0 = self.theta0 else: log10theta0 = (np.log10(self.thetaL) + (self.random_state.rand(*self.theta0.shape) * np.log10((self.thetaU / self.thetaL)))) theta0 = (10.0 ** log10theta0) try: log10_optimal_theta = optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0).ravel(), constraints, iprint=0) except ValueError as ve: print('Optimization failed. Try increasing the ``nugget``') raise ve optimal_theta = (10.0 ** log10_optimal_theta) (optimal_rlf_value, optimal_par) = self.reduced_likelihood_function(theta=optimal_theta) if (k > 0): if (optimal_rlf_value > best_optimal_rlf_value): best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if (self.verbose and (self.random_start > 1)): if (((20 * k) / self.random_start) > percent_completed): percent_completed = ((20 * k) / self.random_start) print(('%s completed' % (5 * percent_completed))) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif (self.optimizer == 'Welch'): (theta0, thetaL, thetaU) = (self.theta0, self.thetaL, self.thetaU) corr = self.corr verbose = self.verbose self.optimizer = 'fmin_cobyla' self.verbose = False if verbose: print('Initialize under isotropy assumption...') self.theta0 = check_array(self.theta0.min()) self.thetaL = check_array(self.thetaL.min()) self.thetaU = check_array(self.thetaU.max()) (theta_iso, optimal_rlf_value_iso, par_iso) = self._arg_max_reduced_likelihood_function() optimal_theta = (theta_iso + np.zeros(theta0.shape)) if verbose: print('Now improving allowing for anisotropy...') for i in self.random_state.permutation(theta0.size): if verbose: print(('Proceeding along dimension %d...' % (i + 1))) self.theta0 = check_array(theta_iso) self.thetaL = check_array(thetaL[(0, i)]) self.thetaU = check_array(thetaU[(0, i)]) def corr_cut(t, d): return corr(check_array(np.hstack([optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::None]])), d) self.corr = corr_cut (optimal_theta[(0, i)], optimal_rlf_value, optimal_par) = self._arg_max_reduced_likelihood_function() (self.theta0, self.thetaL, self.thetaU) = (theta0, thetaL, thetaU) self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError(("This optimizer ('%s') is not implemented yet. Please contribute!" % self.optimizer)) return (optimal_theta, optimal_rlf_value, optimal_par)
'Fit Gaussian process classification model Parameters X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns self : returns an instance of self.'
def fit(self, X, y):
if (self.kernel is None): self.kernel_ = (C(1.0, constant_value_bounds='fixed') * RBF(1.0, length_scale_bounds='fixed')) else: self.kernel_ = clone(self.kernel) self.rng = check_random_state(self.random_state) self.X_train_ = (np.copy(X) if self.copy_X_train else X) label_encoder = LabelEncoder() self.y_train_ = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ if (self.classes_.size > 2): raise ValueError(('%s supports only binary classification. y contains classes %s' % (self.__class__.__name__, self.classes_))) elif (self.classes_.size == 1): raise ValueError('{0:s} requires 2 classes.'.format(self.__class__.__name__)) if ((self.optimizer is not None) and (self.kernel_.n_dims > 0)): def obj_func(theta, eval_gradient=True): if eval_gradient: (lml, grad) = self.log_marginal_likelihood(theta, eval_gradient=True) return ((- lml), (- grad)) else: return (- self.log_marginal_likelihood(theta)) optima = [self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds)] if (self.n_restarts_optimizer > 0): if (not np.isfinite(self.kernel_.bounds).all()): raise ValueError('Multiple optimizer restarts (n_restarts_optimizer>0) requires that all bounds are finite.') bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) optima.append(self._constrained_optimization(obj_func, theta_initial, bounds)) lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = (- np.min(lml_values)) else: self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(self.kernel_.theta) K = self.kernel_(self.X_train_) (_, (self.pi_, self.W_sr_, self.L_, _, _)) = self._posterior_mode(K, return_temporaries=True) return self
'Perform classification on an array of test vectors X. Parameters X : array-like, shape = (n_samples, n_features) Returns C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_``'
def predict(self, X):
check_is_fitted(self, ['X_train_', 'y_train_', 'pi_', 'W_sr_', 'L_']) K_star = self.kernel_(self.X_train_, X) f_star = K_star.T.dot((self.y_train_ - self.pi_)) return np.where((f_star > 0), self.classes_[1], self.classes_[0])
'Return probability estimates for the test vector X. Parameters X : array-like, shape = (n_samples, n_features) Returns C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``.'
def predict_proba(self, X):
check_is_fitted(self, ['X_train_', 'y_train_', 'pi_', 'W_sr_', 'L_']) K_star = self.kernel_(self.X_train_, X) f_star = K_star.T.dot((self.y_train_ - self.pi_)) v = solve(self.L_, (self.W_sr_[:, np.newaxis] * K_star)) var_f_star = (self.kernel_.diag(X) - np.einsum('ij,ij->j', v, v)) alpha = (1 / (2 * var_f_star)) gamma = (LAMBDAS * f_star) integrals = ((np.sqrt((np.pi / alpha)) * erf((gamma * np.sqrt((alpha / (alpha + (LAMBDAS ** 2))))))) / (2 * np.sqrt(((var_f_star * 2) * np.pi)))) pi_star = ((COEFS * integrals).sum(axis=0) + (0.5 * COEFS.sum())) return np.vstack(((1 - pi_star), pi_star)).T
'Returns log-marginal likelihood of theta for training data. Parameters theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True.'
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
if (theta is None): if eval_gradient: raise ValueError('Gradient can only be evaluated for theta!=None') return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: (K, K_gradient) = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) (Z, (pi, W_sr, L, b, a)) = self._posterior_mode(K, return_temporaries=True) if (not eval_gradient): return Z d_Z = np.empty(theta.shape[0]) R = (W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr))) C = solve(L, (W_sr[:, np.newaxis] * K)) s_2 = (((-0.5) * (np.diag(K) - np.einsum('ij, ij -> j', C, C))) * ((pi * (1 - pi)) * (1 - (2 * pi)))) for j in range(d_Z.shape[0]): C = K_gradient[:, :, j] s_1 = ((0.5 * a.T.dot(C).dot(a)) - (0.5 * R.T.ravel().dot(C.ravel()))) b = C.dot((self.y_train_ - pi)) s_3 = (b - K.dot(R.dot(b))) d_Z[j] = (s_1 + s_2.T.dot(s_3)) return (Z, d_Z)
'Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton\'s iteration to find the mode of this approximation.'
def _posterior_mode(self, K, return_temporaries=False):
if (self.warm_start and hasattr(self, 'f_cached') and (self.f_cached.shape == self.y_train_.shape)): f = self.f_cached else: f = np.zeros_like(self.y_train_, dtype=np.float64) log_marginal_likelihood = (- np.inf) for _ in range(self.max_iter_predict): pi = expit(f) W = (pi * (1 - pi)) W_sr = np.sqrt(W) W_sr_K = (W_sr[:, np.newaxis] * K) B = (np.eye(W.shape[0]) + (W_sr_K * W_sr)) L = cholesky(B, lower=True) b = ((W * f) + (self.y_train_ - pi)) a = (b - (W_sr * cho_solve((L, True), W_sr_K.dot(b)))) f = K.dot(a) lml = ((((-0.5) * a.T.dot(f)) - np.log((1 + np.exp(((- ((self.y_train_ * 2) - 1)) * f)))).sum()) - np.log(np.diag(L)).sum()) if ((lml - log_marginal_likelihood) < 1e-10): break log_marginal_likelihood = lml self.f_cached = f if return_temporaries: return (log_marginal_likelihood, (pi, W_sr, L, b, a)) else: return log_marginal_likelihood
'Fit Gaussian process classification model Parameters X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns self : returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, multi_output=False) self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(self.kernel, self.optimizer, self.n_restarts_optimizer, self.max_iter_predict, self.warm_start, self.copy_X_train, self.random_state) self.classes_ = np.unique(y) self.n_classes_ = self.classes_.size if (self.n_classes_ == 1): raise ValueError(('GaussianProcessClassifier requires 2 or more distinct classes. Only class %s present.' % self.classes_[0])) if (self.n_classes_ > 2): if (self.multi_class == 'one_vs_rest'): self.base_estimator_ = OneVsRestClassifier(self.base_estimator_, n_jobs=self.n_jobs) elif (self.multi_class == 'one_vs_one'): self.base_estimator_ = OneVsOneClassifier(self.base_estimator_, n_jobs=self.n_jobs) else: raise ValueError(('Unknown multi-class mode %s' % self.multi_class)) self.base_estimator_.fit(X, y) if (self.n_classes_ > 2): self.log_marginal_likelihood_value_ = np.mean([estimator.log_marginal_likelihood() for estimator in self.base_estimator_.estimators_]) else: self.log_marginal_likelihood_value_ = self.base_estimator_.log_marginal_likelihood() return self
'Perform classification on an array of test vectors X. Parameters X : array-like, shape = (n_samples, n_features) Returns C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_``'
def predict(self, X):
check_is_fitted(self, ['classes_', 'n_classes_']) X = check_array(X) return self.base_estimator_.predict(X)
'Return probability estimates for the test vector X. Parameters X : array-like, shape = (n_samples, n_features) Returns C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.'
def predict_proba(self, X):
check_is_fitted(self, ['classes_', 'n_classes_']) if ((self.n_classes_ > 2) and (self.multi_class == 'one_vs_one')): raise ValueError('one_vs_one multi-class mode does not support predicting probability estimates. Use one_vs_rest mode instead.') X = check_array(X) return self.base_estimator_.predict_proba(X)
'Returns log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters theta : array-like, shape = (n_kernel_params,) or none Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. Returns log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True.'
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
check_is_fitted(self, ['classes_', 'n_classes_']) if (theta is None): if eval_gradient: raise ValueError('Gradient can only be evaluated for theta!=None') return self.log_marginal_likelihood_value_ theta = np.asarray(theta) if (self.n_classes_ == 2): return self.base_estimator_.log_marginal_likelihood(theta, eval_gradient) else: if eval_gradient: raise NotImplementedError('Gradient of log-marginal-likelihood not implemented for multi-class GPC.') estimators = self.base_estimator_.estimators_ n_dims = estimators[0].kernel_.n_dims if (theta.shape[0] == n_dims): return np.mean([estimator.log_marginal_likelihood(theta) for (i, estimator) in enumerate(estimators)]) elif (theta.shape[0] == (n_dims * self.classes_.shape[0])): return np.mean([estimator.log_marginal_likelihood(theta[(n_dims * i):(n_dims * (i + 1))]) for (i, estimator) in enumerate(estimators)]) else: raise ValueError(('Shape of theta must be either %d or %d. Obtained theta with shape %d.' % (n_dims, (n_dims * self.classes_.shape[0]), theta.shape[0])))
'Fit Gaussian process regression model. Parameters X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples, [n_output_dims]) Target values Returns self : returns an instance of self.'
def fit(self, X, y):
if (self.kernel is None): self.kernel_ = (C(1.0, constant_value_bounds='fixed') * RBF(1.0, length_scale_bounds='fixed')) else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) (X, y) = check_X_y(X, y, multi_output=True, y_numeric=True) if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) y = (y - self._y_train_mean) else: self._y_train_mean = np.zeros(1) if (np.iterable(self.alpha) and (self.alpha.shape[0] != y.shape[0])): if (self.alpha.shape[0] == 1): self.alpha = self.alpha[0] else: raise ValueError(('alpha must be a scalar or an array with same number of entries as y.(%d != %d)' % (self.alpha.shape[0], y.shape[0]))) self.X_train_ = (np.copy(X) if self.copy_X_train else X) self.y_train_ = (np.copy(y) if self.copy_X_train else y) if ((self.optimizer is not None) and (self.kernel_.n_dims > 0)): def obj_func(theta, eval_gradient=True): if eval_gradient: (lml, grad) = self.log_marginal_likelihood(theta, eval_gradient=True) return ((- lml), (- grad)) else: return (- self.log_marginal_likelihood(theta)) optima = [self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds)] if (self.n_restarts_optimizer > 0): if (not np.isfinite(self.kernel_.bounds).all()): raise ValueError('Multiple optimizer restarts (n_restarts_optimizer>0) requires that all bounds are finite.') bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append(self._constrained_optimization(obj_func, theta_initial, bounds)) lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = (- np.min(lml_values)) else: self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(self.kernel_.theta) K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=True) except np.linalg.LinAlgError as exc: exc.args = ((("The kernel, %s, is not returning a positive definite matrix. Try gradually increasing the 'alpha' parameter of your GaussianProcessRegressor estimator." % self.kernel_),) + exc.args) raise self.alpha_ = cho_solve((self.L_, True), self.y_train_) return self
'Predict using the Gaussian process regression model We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, also its standard deviation (return_std=True) or covariance (return_cov=True). Note that at most one of the two can be requested. Parameters X : array-like, shape = (n_samples, n_features) Query points where the GP is evaluated return_std : bool, default: False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default: False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean Returns y_mean : array, shape = (n_samples, [n_output_dims]) Mean of predictive distribution a query points y_std : array, shape = (n_samples,), optional Standard deviation of predictive distribution at query points. Only returned when return_std is True. y_cov : array, shape = (n_samples, n_samples), optional Covariance of joint predictive distribution a query points. Only returned when return_cov is True.'
def predict(self, X, return_std=False, return_cov=False):
if (return_std and return_cov): raise RuntimeError('Not returning standard deviation of predictions when returning full covariance.') X = check_array(X) if (not hasattr(self, 'X_train_')): if (self.kernel is None): kernel = (C(1.0, constant_value_bounds='fixed') * RBF(1.0, length_scale_bounds='fixed')) else: kernel = self.kernel y_mean = np.zeros(X.shape[0]) if return_cov: y_cov = kernel(X) return (y_mean, y_cov) elif return_std: y_var = kernel.diag(X) return (y_mean, np.sqrt(y_var)) else: return y_mean else: K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans.dot(self.alpha_) y_mean = (self._y_train_mean + y_mean) if return_cov: v = cho_solve((self.L_, True), K_trans.T) y_cov = (self.kernel_(X) - K_trans.dot(v)) return (y_mean, y_cov) elif return_std: L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0])) K_inv = L_inv.dot(L_inv.T) y_var = self.kernel_.diag(X) y_var -= np.einsum('ij,ij->i', np.dot(K_trans, K_inv), K_trans) y_var_negative = (y_var < 0) if np.any(y_var_negative): warnings.warn('Predicted variances smaller than 0. Setting those variances to 0.') y_var[y_var_negative] = 0.0 return (y_mean, np.sqrt(y_var)) else: return y_mean
'Draw samples from Gaussian process and evaluate at X. Parameters X : array-like, shape = (n_samples_X, n_features) Query points where the GP samples are evaluated n_samples : int, default: 1 The number of samples drawn from the Gaussian process random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points.'
def sample_y(self, X, n_samples=1, random_state=0):
rng = check_random_state(random_state) (y_mean, y_cov) = self.predict(X, return_cov=True) if (y_mean.ndim == 1): y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_mean.shape[1])] y_samples = np.hstack(y_samples) return y_samples
'Returns log-marginal likelihood of theta for training data. Parameters theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True.'
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
if (theta is None): if eval_gradient: raise ValueError('Gradient can only be evaluated for theta!=None') return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: (K, K_gradient) = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=True) except np.linalg.LinAlgError: return (((- np.inf), np.zeros_like(theta)) if eval_gradient else (- np.inf)) y_train = self.y_train_ if (y_train.ndim == 1): y_train = y_train[:, np.newaxis] alpha = cho_solve((L, True), y_train) log_likelihood_dims = ((-0.5) * np.einsum('ik,ik->k', y_train, alpha)) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= ((K.shape[0] / 2) * np.log((2 * np.pi))) log_likelihood = log_likelihood_dims.sum((-1)) if eval_gradient: tmp = np.einsum('ik,jk->ijk', alpha, alpha) tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis] log_likelihood_gradient_dims = (0.5 * np.einsum('ijl,ijk->kl', tmp, K_gradient)) log_likelihood_gradient = log_likelihood_gradient_dims.sum((-1)) if eval_gradient: return (log_likelihood, log_likelihood_gradient) else: return log_likelihood
'Get parameters of this kernel. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
params = dict() cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) init_sign = signature(init) (args, varargs) = ([], []) for parameter in init_sign.parameters.values(): if ((parameter.kind != parameter.VAR_KEYWORD) and (parameter.name != 'self')): args.append(parameter.name) if (parameter.kind == parameter.VAR_POSITIONAL): varargs.append(parameter.name) if (len(varargs) != 0): raise RuntimeError(("scikit-learn kernels should always specify their parameters in the signature of their __init__ (no varargs). %s doesn't follow this convention." % (cls,))) for arg in args: params[arg] = getattr(self, arg, None) return params
'Set the parameters of this kernel. The method works on simple kernels as well as on nested kernels. The latter have parameters of the form ``<component>__<parameter>`` so that it\'s possible to update each component of a nested object. Returns self'
def set_params(self, **params):
if (not params): return self valid_params = self.get_params(deep=True) for (key, value) in six.iteritems(params): split = key.split('__', 1) if (len(split) > 1): (name, sub_name) = split if (name not in valid_params): raise ValueError(('Invalid parameter %s for kernel %s. Check the list of available parameters with `kernel.get_params().keys()`.' % (name, self))) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: if (key not in valid_params): raise ValueError(('Invalid parameter %s for kernel %s. Check the list of available parameters with `kernel.get_params().keys()`.' % (key, self.__class__.__name__))) setattr(self, key, value) return self
'Returns a clone of self with given hyperparameters theta.'
def clone_with_theta(self, theta):
cloned = clone(self) cloned.theta = theta return cloned
'Returns the number of non-fixed hyperparameters of the kernel.'
@property def n_dims(self):
return self.theta.shape[0]
'Returns a list of all hyperparameter specifications.'
@property def hyperparameters(self):
r = [] for attr in dir(self): if attr.startswith('hyperparameter_'): r.append(getattr(self, attr)) return r
'Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel\'s hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@property def theta(self):
theta = [] params = self.get_params() for hyperparameter in self.hyperparameters: if (not hyperparameter.fixed): theta.append(params[hyperparameter.name]) if (len(theta) > 0): return np.log(np.hstack(theta)) else: return np.array([])
'Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@theta.setter def theta(self, theta):
params = self.get_params() i = 0 for hyperparameter in self.hyperparameters: if hyperparameter.fixed: continue if (hyperparameter.n_elements > 1): params[hyperparameter.name] = np.exp(theta[i:(i + hyperparameter.n_elements)]) i += hyperparameter.n_elements else: params[hyperparameter.name] = np.exp(theta[i]) i += 1 if (i != len(theta)): raise ValueError(('theta has not the correct number of entries. Should be %d; given are %d' % (i, len(theta)))) self.set_params(**params)
'Returns the log-transformed bounds on the theta. Returns bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel\'s hyperparameters theta'
@property def bounds(self):
bounds = [] for hyperparameter in self.hyperparameters: if (not hyperparameter.fixed): bounds.append(hyperparameter.bounds) if (len(bounds) > 0): return np.log(np.vstack(bounds)) else: return np.array([])
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return np.ones(X.shape[0])
'Returns whether the kernel is stationary.'
def is_stationary(self):
return True
'Get parameters of this kernel. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
return dict(kernels=self.kernels)
'Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel\'s hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@property def theta(self):
return np.hstack([kernel.theta for kernel in self.kernels])
'Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@theta.setter def theta(self, theta):
k_dims = self.k1.n_dims for (i, kernel) in enumerate(self.kernels): kernel.theta = theta[(i * k_dims):((i + 1) * k_dims)]
'Returns the log-transformed bounds on the theta. Returns bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel\'s hyperparameters theta'
@property def bounds(self):
return np.vstack([kernel.bounds for kernel in self.kernels])
'Return the kernel k(X, Y) and optionally its gradient. Note that this compound kernel returns the results of all simple kernel stacked along an additional axis. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns K : array, shape (n_samples_X, n_samples_Y, n_kernels) Kernel k(X, Y) K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
if eval_gradient: K = [] K_grad = [] for kernel in self.kernels: (K_single, K_grad_single) = kernel(X, Y, eval_gradient) K.append(K_single) K_grad.append(K_grad_single[..., np.newaxis]) return (np.dstack(K), np.concatenate(K_grad, 3)) else: return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
'Returns whether the kernel is stationary.'
def is_stationary(self):
return np.all([kernel.is_stationary() for kernel in self.kernels])
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X)'
def diag(self, X):
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
'Get parameters of this kernel. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
params = dict(k1=self.k1, k2=self.k2) if deep: deep_items = self.k1.get_params().items() params.update(((('k1__' + k), val) for (k, val) in deep_items)) deep_items = self.k2.get_params().items() params.update(((('k2__' + k), val) for (k, val) in deep_items)) return params
'Returns a list of all hyperparameter.'
@property def hyperparameters(self):
r = [] for hyperparameter in self.k1.hyperparameters: r.append(Hyperparameter(('k1__' + hyperparameter.name), hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) for hyperparameter in self.k2.hyperparameters: r.append(Hyperparameter(('k2__' + hyperparameter.name), hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r
'Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel\'s hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@property def theta(self):
return np.append(self.k1.theta, self.k2.theta)
'Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@theta.setter def theta(self, theta):
k1_dims = self.k1.n_dims self.k1.theta = theta[:k1_dims] self.k2.theta = theta[k1_dims:]
'Returns the log-transformed bounds on the theta. Returns bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel\'s hyperparameters theta'
@property def bounds(self):
if (self.k1.bounds.size == 0): return self.k2.bounds if (self.k2.bounds.size == 0): return self.k1.bounds return np.vstack((self.k1.bounds, self.k2.bounds))
'Returns whether the kernel is stationary.'
def is_stationary(self):
return (self.k1.is_stationary() and self.k2.is_stationary())
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
if eval_gradient: (K1, K1_gradient) = self.k1(X, Y, eval_gradient=True) (K2, K2_gradient) = self.k2(X, Y, eval_gradient=True) return ((K1 + K2), np.dstack((K1_gradient, K2_gradient))) else: return (self.k1(X, Y) + self.k2(X, Y))
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return (self.k1.diag(X) + self.k2.diag(X))
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
if eval_gradient: (K1, K1_gradient) = self.k1(X, Y, eval_gradient=True) (K2, K2_gradient) = self.k2(X, Y, eval_gradient=True) return ((K1 * K2), np.dstack(((K1_gradient * K2[:, :, np.newaxis]), (K2_gradient * K1[:, :, np.newaxis])))) else: return (self.k1(X, Y) * self.k2(X, Y))
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return (self.k1.diag(X) * self.k2.diag(X))
'Get parameters of this kernel. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
params = dict(kernel=self.kernel, exponent=self.exponent) if deep: deep_items = self.kernel.get_params().items() params.update(((('kernel__' + k), val) for (k, val) in deep_items)) return params
'Returns a list of all hyperparameter.'
@property def hyperparameters(self):
r = [] for hyperparameter in self.kernel.hyperparameters: r.append(Hyperparameter(('kernel__' + hyperparameter.name), hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r
'Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel\'s hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@property def theta(self):
return self.kernel.theta
'Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel'
@theta.setter def theta(self, theta):
self.kernel.theta = theta
'Returns the log-transformed bounds on the theta. Returns bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel\'s hyperparameters theta'
@property def bounds(self):
return self.kernel.bounds
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
if eval_gradient: (K, K_gradient) = self.kernel(X, Y, eval_gradient=True) K_gradient *= (self.exponent * (K[:, :, np.newaxis] ** (self.exponent - 1))) return ((K ** self.exponent), K_gradient) else: K = self.kernel(X, Y, eval_gradient=False) return (K ** self.exponent)
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return (self.kernel.diag(X) ** self.exponent)
'Returns whether the kernel is stationary.'
def is_stationary(self):
return self.kernel.is_stationary()
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) if (Y is None): Y = X elif eval_gradient: raise ValueError('Gradient can only be evaluated when Y is None.') K = (self.constant_value * np.ones((X.shape[0], Y.shape[0]))) if eval_gradient: if (not self.hyperparameter_constant_value.fixed): return (K, (self.constant_value * np.ones((X.shape[0], X.shape[0], 1)))) else: return (K, np.empty((X.shape[0], X.shape[0], 0))) else: return K
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return (self.constant_value * np.ones(X.shape[0]))
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) if ((Y is not None) and eval_gradient): raise ValueError('Gradient can only be evaluated when Y is None.') if (Y is None): K = (self.noise_level * np.eye(X.shape[0])) if eval_gradient: if (not self.hyperparameter_noise_level.fixed): return (K, (self.noise_level * np.eye(X.shape[0])[:, :, np.newaxis])) else: return (K, np.empty((X.shape[0], X.shape[0], 0))) else: return K else: return np.zeros((X.shape[0], Y.shape[0]))
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)'
def diag(self, X):
return (self.noise_level * np.ones(X.shape[0]))
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if (Y is None): dists = pdist((X / length_scale), metric='sqeuclidean') K = np.exp(((-0.5) * dists)) K = squareform(K) np.fill_diagonal(K, 1) else: if eval_gradient: raise ValueError('Gradient can only be evaluated when Y is None.') dists = cdist((X / length_scale), (Y / length_scale), metric='sqeuclidean') K = np.exp(((-0.5) * dists)) if eval_gradient: if self.hyperparameter_length_scale.fixed: return (K, np.empty((X.shape[0], X.shape[0], 0))) elif ((not self.anisotropic) or (length_scale.shape[0] == 1)): K_gradient = (K * squareform(dists))[:, :, np.newaxis] return (K, K_gradient) elif self.anisotropic: K_gradient = (((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2) / (length_scale ** 2)) K_gradient *= K[..., np.newaxis] return (K, K_gradient) else: return K
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True.'
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if (Y is None): dists = pdist((X / length_scale), metric='euclidean') else: if eval_gradient: raise ValueError('Gradient can only be evaluated when Y is None.') dists = cdist((X / length_scale), (Y / length_scale), metric='euclidean') if (self.nu == 0.5): K = np.exp((- dists)) elif (self.nu == 1.5): K = (dists * math.sqrt(3)) K = ((1.0 + K) * np.exp((- K))) elif (self.nu == 2.5): K = (dists * math.sqrt(5)) K = (((1.0 + K) + ((K ** 2) / 3.0)) * np.exp((- K))) else: K = dists K[(K == 0.0)] += np.finfo(float).eps tmp = (math.sqrt((2 * self.nu)) * K) K.fill(((2 ** (1.0 - self.nu)) / gamma(self.nu))) K *= (tmp ** self.nu) K *= kv(self.nu, tmp) if (Y is None): K = squareform(K) np.fill_diagonal(K, 1) if eval_gradient: if self.hyperparameter_length_scale.fixed: K_gradient = np.empty((X.shape[0], X.shape[0], 0)) return (K, K_gradient) if self.anisotropic: D = (((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2) / (length_scale ** 2)) else: D = squareform((dists ** 2))[:, :, np.newaxis] if (self.nu == 0.5): K_gradient = ((K[..., np.newaxis] * D) / np.sqrt(D.sum(2))[:, :, np.newaxis]) K_gradient[(~ np.isfinite(K_gradient))] = 0 elif (self.nu == 1.5): K_gradient = ((3 * D) * np.exp((- np.sqrt((3 * D.sum((-1))))))[..., np.newaxis]) elif (self.nu == 2.5): tmp = np.sqrt((5 * D.sum((-1))))[..., np.newaxis] K_gradient = ((((5.0 / 3.0) * D) * (tmp + 1)) * np.exp((- tmp))) else: def f(theta): return self.clone_with_theta(theta)(X, Y) return (K, _approx_fprime(self.theta, f, 1e-10)) if (not self.anisotropic): return (K, K_gradient[:, :].sum((-1))[:, :, np.newaxis]) else: return (K, K_gradient) else: return K