desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Write the function code and the filename to a file.'
| def _write_func_code(self, filename, func_code, first_line):
| func_code = (u'%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code))
with io.open(filename, 'w', encoding='UTF-8') as out:
out.write(func_code)
is_named_callable = False
if PY3_OR_LATER:
is_named_callable = (hasattr(self.func, '__name__') and (self.func.__name__ != '<lambda>'))
else:
is_named_callable = (hasattr(self.func, 'func_name') and (self.func.func_name != '<lambda>'))
if is_named_callable:
func_hash = self._hash_func()
try:
_FUNCTION_HASHES[self.func] = func_hash
except TypeError:
pass
|
'stacklevel is the depth a which this function is called, to
issue useful warnings to the user.'
| def _check_previous_func_code(self, stacklevel=2):
| try:
if (self.func in _FUNCTION_HASHES):
func_hash = self._hash_func()
if (func_hash == _FUNCTION_HASHES[self.func]):
return True
except TypeError:
pass
(func_code, source_file, first_line) = get_func_code(self.func)
func_dir = self._get_func_dir()
func_code_file = os.path.join(func_dir, 'func_code.py')
try:
with io.open(func_code_file, encoding='UTF-8') as infile:
(old_func_code, old_first_line) = extract_first_line(infile.read())
except IOError:
self._write_func_code(func_code_file, func_code, first_line)
return False
if (old_func_code == func_code):
return True
(_, func_name) = get_func_name(self.func, resolv_alias=False, win_characters=False)
if ((old_first_line == first_line == (-1)) or (func_name == '<lambda>')):
if (not (first_line == (-1))):
func_description = ('%s (%s:%i)' % (func_name, source_file, first_line))
else:
func_description = func_name
warnings.warn(JobLibCollisionWarning(("Cannot detect name collisions for function '%s'" % func_description)), stacklevel=stacklevel)
if ((not (old_first_line == first_line)) and (source_file is not None)):
possible_collision = False
if os.path.exists(source_file):
(_, func_name) = get_func_name(self.func, resolv_alias=False)
num_lines = len(func_code.split('\n'))
with open_py_source(source_file) as f:
on_disk_func_code = f.readlines()[(old_first_line - 1):(((old_first_line - 1) + num_lines) - 1)]
on_disk_func_code = ''.join(on_disk_func_code)
possible_collision = (on_disk_func_code.rstrip() == old_func_code.rstrip())
else:
possible_collision = source_file.startswith('<doctest ')
if possible_collision:
warnings.warn(JobLibCollisionWarning(("Possible name collisions between functions '%s' (%s:%i) and '%s' (%s:%i)" % (func_name, source_file, old_first_line, func_name, source_file, first_line))), stacklevel=stacklevel)
if (self._verbose > 10):
(_, func_name) = get_func_name(self.func, resolv_alias=False)
self.warn(('Function %s (stored in %s) has changed.' % (func_name, func_dir)))
self.clear(warn=True)
return False
|
'Empty the function\'s cache.'
| def clear(self, warn=True):
| func_dir = self._get_func_dir(mkdir=False)
if ((self._verbose > 0) and warn):
self.warn(('Clearing cache %s' % func_dir))
if os.path.exists(func_dir):
shutil.rmtree(func_dir, ignore_errors=True)
mkdirp(func_dir)
(func_code, _, first_line) = get_func_code(self.func)
func_code_file = os.path.join(func_dir, 'func_code.py')
self._write_func_code(func_code_file, func_code, first_line)
|
'Force the execution of the function with the given arguments and
persist the output values.'
| def call(self, *args, **kwargs):
| start_time = time.time()
(output_dir, _) = self._get_output_dir(*args, **kwargs)
if (self._verbose > 0):
print format_call(self.func, args, kwargs)
output = self.func(*args, **kwargs)
self._persist_output(output, output_dir)
duration = (time.time() - start_time)
metadata = self._persist_input(output_dir, duration, args, kwargs)
if (self._verbose > 0):
(_, name) = get_func_name(self.func)
msg = ('%s - %s' % (name, format_time(duration)))
print ((max(0, (80 - len(msg))) * '_') + msg)
return (output, metadata)
|
'Persist the given output tuple in the directory.'
| def _persist_output(self, output, dir):
| try:
filename = os.path.join(dir, 'output.pkl')
mkdirp(dir)
write_func = functools.partial(numpy_pickle.dump, compress=self.compress)
concurrency_safe_write(output, filename, write_func)
if (self._verbose > 10):
print ('Persisting in %s' % dir)
except OSError:
' Race condition in the creation of the directory '
|
'Save a small summary of the call using json format in the
output directory.
output_dir: string
directory where to write metadata.
duration: float
time taken by hashing input arguments, calling the wrapped
function and persisting its output.
args, kwargs: list and dict
input arguments for wrapped function
this_duration_limit: float
Max execution time for this function before issuing a warning.'
| def _persist_input(self, output_dir, duration, args, kwargs, this_duration_limit=0.5):
| start_time = time.time()
argument_dict = filter_args(self.func, self.ignore, args, kwargs)
input_repr = dict(((k, repr(v)) for (k, v) in argument_dict.items()))
metadata = {'duration': duration, 'input_args': input_repr}
try:
mkdirp(output_dir)
filename = os.path.join(output_dir, 'metadata.json')
def write_func(output, dest_filename):
with open(dest_filename, 'w') as f:
json.dump(output, f)
concurrency_safe_write(metadata, filename, write_func)
except Exception:
pass
this_duration = (time.time() - start_time)
if (this_duration > this_duration_limit):
warnings.warn(("Persisting input arguments took %.2fs to run.\nIf this happens often in your code, it can cause performance problems \n(results will be correct in all cases). \nThe reason for this is probably some large input arguments for a wrapped\n function (e.g. large strings).\nTHIS IS A JOBLIB ISSUE. If you can, kindly provide the joblib's team with an\n example so that they can fix the problem." % this_duration), stacklevel=5)
return metadata
|
'Parameters
cachedir: string or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
mmap_mode: {None, \'r+\', \'r\', \'w+\', \'c\'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated.
bytes_limit: int, optional
Limit in bytes of the size of the cache'
| def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1, bytes_limit=None):
| Logger.__init__(self)
self._verbose = verbose
self.mmap_mode = mmap_mode
self.timestamp = time.time()
self.compress = compress
self.bytes_limit = bytes_limit
if (compress and (mmap_mode is not None)):
warnings.warn('Compressed results cannot be memmapped', stacklevel=2)
if (cachedir is None):
self.cachedir = None
else:
self.cachedir = os.path.join(cachedir, 'joblib')
mkdirp(self.cachedir)
|
'Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, \'r+\', \'r\', \'w+\', \'c\'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
Returns
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.'
| def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False):
| if (func is None):
return functools.partial(self.cache, ignore=ignore, verbose=verbose, mmap_mode=mmap_mode)
if (self.cachedir is None):
return NotMemorizedFunc(func)
if (verbose is None):
verbose = self._verbose
if (mmap_mode is False):
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
return MemorizedFunc(func, cachedir=self.cachedir, mmap_mode=mmap_mode, ignore=ignore, compress=self.compress, verbose=verbose, timestamp=self.timestamp)
|
'Erase the complete cache directory.'
| def clear(self, warn=True):
| if warn:
self.warn('Flushing completely the cache')
if (self.cachedir is not None):
rm_subdirs(self.cachedir)
|
'Remove cache folders to make cache size fit in ``bytes_limit``.'
| def reduce_size(self):
| if ((self.cachedir is not None) and (self.bytes_limit is not None)):
cache_items_to_delete = _get_cache_items_to_delete(self.cachedir, self.bytes_limit)
for cache_item in cache_items_to_delete:
if (self._verbose > 10):
print 'Deleting cache item {}'.format(cache_item)
try:
shutil.rmtree(cache_item.path, ignore_errors=True)
except OSError:
pass
|
'Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.'
| def eval(self, func, *args, **kwargs):
| if (self.cachedir is None):
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
|
'We don\'t store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__'
| def __reduce__(self):
| cachedir = (self.cachedir[:(-7)] if (self.cachedir is not None) else None)
return (self.__class__, (cachedir, self.mmap_mode, self.compress, self._verbose))
|
'Performs inductive inference across the model.
Parameters
X : array_like, shape = [n_samples, n_features]
Returns
y : array_like, shape = [n_samples]
Predictions for input data'
| def predict(self, X):
| probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
|
'Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
X : array_like, shape = [n_samples, n_features]
Returns
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels'
| def predict_proba(self, X):
| check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if (self.kernel == 'knn'):
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
|
'Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
self : returns an instance of self.'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
graph_matrix = self._build_graph()
classes = np.unique(y)
classes = classes[(classes != (-1))]
self.classes_ = classes
(n_samples, n_classes) = (len(y), len(classes))
alpha = self.alpha
if ((self._variant == 'spreading') and ((alpha is None) or (alpha <= 0.0) or (alpha >= 1.0))):
raise ValueError(('alpha=%s is invalid: it must be inside the open interval (0, 1)' % alpha))
y = np.asarray(y)
unlabeled = (y == (-1))
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[((y == label), (classes == label))] = 1
y_static = np.copy(self.label_distributions_)
if (self._variant == 'propagation'):
y_static[unlabeled] = 0
else:
y_static *= (1 - alpha)
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if (np.abs((self.label_distributions_ - l_previous)).sum() < self.tol):
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(graph_matrix, self.label_distributions_)
if (self._variant == 'propagation'):
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(unlabeled, self.label_distributions_, y_static)
else:
self.label_distributions_ = (np.multiply(alpha, self.label_distributions_) + y_static)
else:
warnings.warn(('max_iter=%d was reached without convergence.' % self.max_iter), category=ConvergenceWarning)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)]
self.transduction_ = transduction.ravel()
return self
|
'Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).'
| def _build_graph(self):
| if (self.kernel == 'knn'):
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
|
'Graph matrix for Label Spreading computes the graph laplacian'
| def _build_graph(self):
| if (self.kernel == 'knn'):
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = sparse.csgraph.laplacian(affinity_matrix, normed=True)
laplacian = (- laplacian)
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::(n_samples + 1)] = 0.0
return laplacian
|
'Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- \'auto\': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.'
| def _solve_lsqr(self, X, y, shrinkage):
| self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (((-0.5) * np.diag(np.dot(self.means_, self.coef_.T))) + np.log(self.priors_))
|
'Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- \'auto\': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.'
| def _solve_eigen(self, X, y, shrinkage):
| self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_
St = _cov(X, shrinkage)
Sb = (St - Sw)
(evals, evecs) = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort((evals / np.sum(evals)))[::(-1)][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::(-1)]]
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (((-0.5) * np.diag(np.dot(self.means_, self.coef_.T))) + np.log(self.priors_))
|
'SVD solver.
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.'
| def _solve_svd(self, X, y):
| (n_samples, n_features) = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for (idx, group) in enumerate(self.classes_):
Xg = X[(y == group), :]
Xc.append((Xg - self.means_[idx]))
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
std = Xc.std(axis=0)
std[(std == 0)] = 1.0
fac = (1.0 / (n_samples - n_classes))
X = (np.sqrt(fac) * (Xc / std))
(U, S, V) = linalg.svd(X, full_matrices=False)
rank = np.sum((S > self.tol))
if (rank < n_features):
warnings.warn('Variables are collinear.')
scalings = ((V[:rank] / std).T / S[:rank])
X = np.dot((np.sqrt(((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T).T, scalings)
(_, S, V) = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = ((S ** 2) / np.sum((S ** 2)))[:self._max_components]
rank = np.sum((S > (self.tol * S[0])))
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot((self.means_ - self.xbar_), self.scalings_)
self.intercept_ = (((-0.5) * np.sum((coef ** 2), axis=1)) + np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
|
'Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if (self.priors is None):
(_, y_t) = np.unique(y, return_inverse=True)
self.priors_ = (np.bincount(y_t) / float(len(y)))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError('priors must be non-negative')
if (self.priors_.sum() != 1):
warnings.warn('The priors do not sum to 1. Renormalizing', UserWarning)
self.priors_ = (self.priors_ / self.priors_.sum())
if (self.n_components is None):
self._max_components = (len(self.classes_) - 1)
else:
self._max_components = min((len(self.classes_) - 1), self.n_components)
if (self.solver == 'svd'):
if (self.shrinkage is not None):
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif (self.solver == 'lsqr'):
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif (self.solver == 'eigen'):
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', 'lsqr', and 'eigen').".format(self.solver))
if (self.classes_.size == 2):
self.coef_ = np.array((self.coef_[1, :] - self.coef_[0, :]), ndmin=2)
self.intercept_ = np.array((self.intercept_[1] - self.intercept_[0]), ndmin=1)
return self
|
'Project data to maximize class separation.
Parameters
X : array-like, shape (n_samples, n_features)
Input data.
Returns
X_new : array, shape (n_samples, n_components)
Transformed data.'
| def transform(self, X):
| if (self.solver == 'lsqr'):
raise NotImplementedError("transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if (self.solver == 'svd'):
X_new = np.dot((X - self.xbar_), self.scalings_)
elif (self.solver == 'eigen'):
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
|
'Estimate probability.
Parameters
X : array-like, shape (n_samples, n_features)
Input data.
Returns
C : array, shape (n_samples, n_classes)
Estimated probabilities.'
| def predict_proba(self, X):
| prob = self.decision_function(X)
prob *= (-1)
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if (len(self.classes_) == 2):
return np.column_stack([(1 - prob), prob])
else:
prob /= prob.sum(axis=1).reshape((prob.shape[0], (-1)))
return prob
|
'Estimate log probability.
Parameters
X : array-like, shape (n_samples, n_features)
Input data.
Returns
C : array, shape (n_samples, n_classes)
Estimated log probabilities.'
| def predict_log_proba(self, X):
| return np.log(self.predict_proba(X))
|
'Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y)
check_classification_targets(y)
(self.classes_, y) = np.unique(y, return_inverse=True)
(n_samples, n_features) = X.shape
n_classes = len(self.classes_)
if (n_classes < 2):
raise ValueError('y has less than 2 classes')
if (self.priors is None):
self.priors_ = (np.bincount(y) / float(n_samples))
else:
self.priors_ = self.priors
cov = None
store_covariance = (self.store_covariance or self.store_covariances)
if self.store_covariances:
warnings.warn("'store_covariances' was renamed to store_covariance in version 0.19 and will be removed in 0.21.", DeprecationWarning)
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[(y == ind), :]
meang = Xg.mean(0)
means.append(meang)
if (len(Xg) == 1):
raise ValueError(('y has only 1 sample in class %s, covariance is ill defined.' % str(self.classes_[ind])))
Xgc = (Xg - meang)
(U, S, Vt) = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum((S > self.tol))
if (rank < n_features):
warnings.warn('Variables are collinear')
S2 = ((S ** 2) / (len(Xg) - 1))
S2 = (((1 - self.reg_param) * S2) + self.reg_param)
if (self.store_covariance or store_covariance):
cov.append(np.dot((S2 * Vt.T), Vt))
scalings.append(S2)
rotations.append(Vt.T)
if (self.store_covariance or store_covariance):
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
|
'Apply decision function to an array of samples.
Parameters
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.'
| def decision_function(self, X):
| dec_func = self._decision_function(X)
if (len(self.classes_) == 2):
return (dec_func[:, 1] - dec_func[:, 0])
return dec_func
|
'Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array, shape = [n_samples]'
| def predict(self, X):
| d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
|
'Return posterior probabilities of classification.
Parameters
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.'
| def predict_proba(self, X):
| values = self._decision_function(X)
likelihood = np.exp((values - values.max(axis=1)[:, np.newaxis]))
return (likelihood / likelihood.sum(axis=1)[:, np.newaxis])
|
'Return posterior probabilities of classification.
Parameters
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.'
| def predict_log_proba(self, X):
| probas_ = self.predict_proba(X)
return np.log(probas_)
|
'Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()'
| def _iter_test_masks(self):
| for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
(yield test_mask)
|
'Generates integer indices corresponding to test sets.'
| def _iter_test_indices(self):
| raise NotImplementedError
|
'Fit the calibrated model
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
self : object
Returns an instance of self.'
| def fit(self, X, y, sample_weight=None):
| (X, y) = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False)
(X, y) = indexable(X, y)
le = LabelBinarizer().fit(y)
self.classes_ = le.classes_
n_folds = (self.cv if isinstance(self.cv, int) else (self.cv.n_folds if hasattr(self.cv, 'n_folds') else None))
if (n_folds and np.any([(np.sum((y == class_)) < n_folds) for class_ in self.classes_])):
raise ValueError(('Requesting %d-fold cross-validation but provided less than %d examples for at least one class.' % (n_folds, n_folds)))
self.calibrated_classifiers_ = []
if (self.base_estimator is None):
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if (self.cv == 'prefit'):
calibrated_classifier = _CalibratedClassifier(base_estimator, method=self.method)
if (sample_weight is not None):
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if ((sample_weight is not None) and ('sample_weight' not in fit_parameters)):
warnings.warn(('%s does not support sample_weight. Samples weights are only used for the calibration itself.' % estimator_name))
base_estimator_sample_weight = None
else:
if (sample_weight is not None):
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
base_estimator_sample_weight = sample_weight
for (train, test) in cv.split(X, y):
this_estimator = clone(base_estimator)
if (base_estimator_sample_weight is not None):
this_estimator.fit(X[train], y[train], sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(this_estimator, method=self.method, classes=self.classes_)
if (sample_weight is not None):
calibrated_classifier.fit(X[test], y[test], sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
|
'Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
X : array-like, shape (n_samples, n_features)
The samples.
Returns
C : array, shape (n_samples, n_classes)
The predicted probas.'
| def predict_proba(self, X):
| check_is_fitted(self, ['classes_', 'calibrated_classifiers_'])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False)
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
|
'Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
X : array-like, shape (n_samples, n_features)
The samples.
Returns
C : array, shape (n_samples,)
The predicted class.'
| def predict(self, X):
| check_is_fitted(self, ['classes_', 'calibrated_classifiers_'])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
|
'Calibrate the fitted model
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
self : object
Returns an instance of self.'
| def fit(self, X, y, sample_weight=None):
| self.label_encoder_ = LabelEncoder()
if (self.classes is None):
self.label_encoder_.fit(y)
else:
self.label_encoder_.fit(self.classes)
self.classes_ = self.label_encoder_.classes_
Y = label_binarize(y, self.classes_)
(df, idx_pos_class) = self._preproc(X)
self.calibrators_ = []
for (k, this_df) in zip(idx_pos_class, df.T):
if (self.method == 'isotonic'):
calibrator = IsotonicRegression(out_of_bounds='clip')
elif (self.method == 'sigmoid'):
calibrator = _SigmoidCalibration()
else:
raise ValueError(('method should be "sigmoid" or "isotonic". Got %s.' % self.method))
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
|
'Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
X : array-like, shape (n_samples, n_features)
The samples.
Returns
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.'
| def predict_proba(self, X):
| n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
(df, idx_pos_class) = self._preproc(X)
for (k, this_df, calibrator) in zip(idx_pos_class, df.T, self.calibrators_):
if (n_classes == 2):
k += 1
proba[:, k] = calibrator.predict(this_df)
if (n_classes == 2):
proba[:, 0] = (1.0 - proba[:, 1])
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
proba[np.isnan(proba)] = (1.0 / n_classes)
proba[((1.0 < proba) & (proba <= (1.0 + 1e-05)))] = 1.0
return proba
|
'Fit the model using X, y as training data.
Parameters
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
self : object
Returns an instance of self.'
| def fit(self, X, y, sample_weight=None):
| X = column_or_1d(X)
y = column_or_1d(y)
(X, y) = indexable(X, y)
(self.a_, self.b_) = _sigmoid_calibration(X, y, sample_weight)
return self
|
'Predict new data by linear interpolation.
Parameters
T : array-like, shape (n_samples,)
Data to predict from.
Returns
T_ : array, shape (n_samples,)
The predicted data.'
| def predict(self, T):
| T = column_or_1d(T)
return (1.0 / (1.0 + np.exp(((self.a_ * T) + self.b_))))
|
'Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
classes : list of numpy arrays, shape (n_outputs)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn\'t need to contain all labels in `classes`.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
self : object
Returns self.'
| @if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
| (X, y) = check_X_y(X, y, multi_output=True, accept_sparse=True)
if (y.ndim == 1):
raise ValueError('y must have at least two dimensions for multi-output regression but has only one.')
if ((sample_weight is not None) and (not has_fit_parameter(self.estimator, 'sample_weight'))):
raise ValueError('Underlying estimator does not support sample weights.')
first_time = (not hasattr(self, 'estimators_'))
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_partial_fit_estimator)((self.estimators_[i] if (not first_time) else self.estimator), X, y[:, i], (classes[i] if (classes is not None) else None), sample_weight, first_time) for i in range(y.shape[1])))
return self
|
'Fit the model to data.
Fit a separate model for each output variable.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
self : object
Returns self.'
| def fit(self, X, y, sample_weight=None):
| if (not hasattr(self.estimator, 'fit')):
raise ValueError('The base estimator should implement a fit method')
(X, y) = check_X_y(X, y, multi_output=True, accept_sparse=True)
if is_classifier(self):
check_classification_targets(y)
if (y.ndim == 1):
raise ValueError('y must have at least two dimensions for multi-output regression but has only one.')
if ((sample_weight is not None) and (not has_fit_parameter(self.estimator, 'sample_weight'))):
raise ValueError('Underlying estimator does not support sample weights.')
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_fit_estimator)(self.estimator, X, y[:, i], sample_weight) for i in range(y.shape[1])))
return self
|
'Predict multi-output variable using a model
trained for each target variable.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.'
| def predict(self, X):
| check_is_fitted(self, 'estimators_')
if (not hasattr(self.estimator, 'predict')):
raise ValueError('The base estimator should implement a predict method')
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)((delayed(parallel_helper)(e, 'predict', X) for e in self.estimators_))
return np.asarray(y).T
|
'Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
self : object
Returns self.'
| @if_delegate_has_method('estimator')
def partial_fit(self, X, y, sample_weight=None):
| super(MultiOutputRegressor, self).partial_fit(X, y, sample_weight=sample_weight)
|
'Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the regression
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Notes
R^2 is calculated by weighting all the targets equally using
`multioutput=\'uniform_average\'`.
Parameters
X : array-like, shape (n_samples, n_features)
Test samples.
y : array-like, shape (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape [n_samples], optional
Sample weights.
Returns
score : float
R^2 of self.predict(X) wrt. y.'
| def score(self, X, y, sample_weight=None):
| from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='uniform_average')
|
'Probability estimates.
Returns prediction probabilities for each class of each output.
Parameters
X : array-like, shape (n_samples, n_features)
Data
Returns
p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.'
| def predict_proba(self, X):
| check_is_fitted(self, 'estimators_')
if (not hasattr(self.estimator, 'predict_proba')):
raise ValueError('The base estimator should implementpredict_proba method')
results = [estimator.predict_proba(X) for estimator in self.estimators_]
return results
|
'"Returns the mean accuracy on the given test data and labels.
Parameters
X : array-like, shape [n_samples, n_features]
Test samples
y : array-like, shape [n_samples, n_outputs]
True values for X
Returns
scores : float
accuracy_score of self.predict(X) versus y'
| def score(self, X, y):
| check_is_fitted(self, 'estimators_')
n_outputs_ = len(self.estimators_)
if (y.ndim == 1):
raise ValueError('y must have at least two dimensions for multi target classification but has only one')
if (y.shape[1] != n_outputs_):
raise ValueError('The number of outputs of Y for fit {0} and score {1} should be same'.format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all((y == y_pred), axis=1))
|
'Fit the model to data matrix X and targets Y.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Y : array-like, shape (n_samples, n_classes)
The target values.
Returns
self : object
Returns self.'
| def fit(self, X, Y):
| (X, Y) = check_X_y(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
check_array(X, accept_sparse=True)
self.order_ = self.order
if (self.order_ is None):
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if (self.order_ == 'random'):
self.order_ = random_state.permutation(Y.shape[1])
elif (sorted(self.order_) != list(range(Y.shape[1]))):
raise ValueError('invalid order')
self.estimators_ = [clone(self.base_estimator) for _ in range(Y.shape[1])]
self.classes_ = []
if (self.cv is None):
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for (chain_idx, estimator) in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y)
if ((self.cv is not None) and (chain_idx < (len(self.estimators_) - 1))):
col_idx = (X.shape[1] + chain_idx)
cv_result = cross_val_predict(self.base_estimator, X_aug[:, :col_idx], y=y, cv=self.cv)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
self.classes_.append(estimator.classes_)
return self
|
'Predict on the data matrix X using the ClassifierChain model.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
Y_pred : array-like, shape (n_samples, n_classes)
The predicted values.'
| def predict(self, X):
| X = check_array(X, accept_sparse=True)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for (chain_idx, estimator) in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if (chain_idx == 0):
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
|
'Predict probability estimates.
By default the inputs to later models in a chain is the binary class
predictions not the class probabilities. To use class probabilities
as features in subsequent models set the cv property to be one of
the allowed values other than None.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
Y_prob : array-like, shape (n_samples, n_classes)'
| @if_delegate_has_method('base_estimator')
def predict_proba(self, X):
| X = check_array(X, accept_sparse=True)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for (chain_idx, estimator) in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
|
'Evaluate the decision_function of the models in the chain.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
Y_decision : array-like, shape (n_samples, n_classes )
Returns the decision function of the sample for each model
in the chain.'
| @if_delegate_has_method('base_estimator')
def decision_function(self, X):
| Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for (chain_idx, estimator) in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
|
'Fit underlying estimators.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
self'
| def fit(self, X, y):
| self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_fit_binary)(self.estimator, X, column, classes=[('not %s' % self.label_binarizer_.classes_[i]), self.label_binarizer_.classes_[i]]) for (i, column) in enumerate(columns)))
return self
|
'Partially fit underlying estimators
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
self'
| @if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None):
| if _check_partial_fit_first_call(self, classes):
if (not hasattr(self.estimator, 'partial_fit')):
raise ValueError("Base estimator {0}, doesn't have partial_fit method".format(self.estimator))
self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)]
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
self.label_binarizer_.fit(self.classes_)
if len(np.setdiff1d(y, self.classes_)):
raise ValueError(('Mini-batch contains {0} while classes ' + 'must be subset of {1}').format(np.unique(y), self.classes_))
Y = self.label_binarizer_.transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_partial_fit_binary)(estimator, X, column) for (estimator, column) in izip(self.estimators_, columns)))
return self
|
'Predict multi-class targets using underlying estimators.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes].
Predicted multi-class targets.'
| def predict(self, X):
| check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], 'decision_function') and is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = 0.5
n_samples = _num_samples(X)
if (self.label_binarizer_.y_type_ == 'multiclass'):
maxima = np.empty(n_samples, dtype=float)
maxima.fill((- np.inf))
argmaxima = np.zeros(n_samples, dtype=int)
for (i, e) in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[(maxima == pred)] = i
return self.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where((_predict_binary(e, X) > thresh))[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr), shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
|
'Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.'
| @if_delegate_has_method(['_first_estimator', 'estimator'])
def predict_proba(self, X):
| check_is_fitted(self, 'estimators_')
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if (len(self.estimators_) == 1):
Y = np.concatenate(((1 - Y), Y), axis=1)
if (not self.multilabel_):
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
|
'Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
T : array-like, shape = [n_samples, n_classes]'
| @if_delegate_has_method(['_first_estimator', 'estimator'])
def decision_function(self, X):
| check_is_fitted(self, 'estimators_')
if (len(self.estimators_) == 1):
return self.estimators_[0].decision_function(X)
return np.array([est.decision_function(X).ravel() for est in self.estimators_]).T
|
'Whether this is a multilabel classifier'
| @property
def multilabel_(self):
| return self.label_binarizer_.y_type_.startswith('multilabel')
|
'Indicate if wrapped estimator is using a precomputed Gram matrix'
| @property
def _pairwise(self):
| return getattr(self.estimator, '_pairwise', False)
|
'Fit underlying estimators.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
self'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc'])
check_classification_targets(y)
self.classes_ = np.unique(y)
if (len(self.classes_) == 1):
raise ValueError('OneVsOneClassifier can not be fit when only one class is present.')
n_classes = self.classes_.shape[0]
estimators_indices = list(zip(*Parallel(n_jobs=self.n_jobs)((delayed(_fit_ovo_binary)(self.estimator, X, y, self.classes_[i], self.classes_[j]) for i in range(n_classes) for j in range((i + 1), n_classes)))))
self.estimators_ = estimators_indices[0]
try:
self.pairwise_indices_ = (estimators_indices[1] if self._pairwise else None)
except AttributeError:
self.pairwise_indices_ = None
return self
|
'Partially fit underlying estimators
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
self'
| @if_delegate_has_method(delegate='estimator')
def partial_fit(self, X, y, classes=None):
| if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for i in range(((self.n_classes_ * (self.n_classes_ - 1)) // 2))]
if len(np.setdiff1d(y, self.classes_)):
raise ValueError('Mini-batch contains {0} while it must be subset of {1}'.format(np.unique(y), self.classes_))
(X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc'])
check_classification_targets(y)
combinations = itertools.combinations(range(self.n_classes_), 2)
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_partial_fit_ovo_binary)(estimator, X, y, self.classes_[i], self.classes_[j]) for (estimator, (i, j)) in izip(self.estimators_, combinations)))
self.pairwise_indices_ = None
return self
|
'Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
y : numpy array of shape [n_samples]
Predicted multi-class targets.'
| def predict(self, X):
| Y = self.decision_function(X)
if (self.n_classes_ == 2):
return self.classes_[(Y > 0).astype(np.int)]
return self.classes_[Y.argmax(axis=1)]
|
'Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
Y : array-like, shape = [n_samples, n_classes]'
| def decision_function(self, X):
| check_is_fitted(self, 'estimators_')
indices = self.pairwise_indices_
if (indices is None):
Xs = ([X] * len(self.estimators_))
else:
Xs = [X[:, idx] for idx in indices]
predictions = np.vstack([est.predict(Xi) for (est, Xi) in zip(self.estimators_, Xs)]).T
confidences = np.vstack([_predict_binary(est, Xi) for (est, Xi) in zip(self.estimators_, Xs)]).T
Y = _ovr_decision_function(predictions, confidences, len(self.classes_))
if (self.n_classes_ == 2):
return Y[:, 1]
return Y
|
'Indicate if wrapped estimator is using a precomputed Gram matrix'
| @property
def _pairwise(self):
| return getattr(self.estimator, '_pairwise', False)
|
'Fit underlying estimators.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
self'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y)
if (self.code_size <= 0):
raise ValueError('code_size should be greater than 0, got {0}'.format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
check_classification_targets(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int((n_classes * self.code_size))
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[(self.code_book_ > 0.5)] = 1
if hasattr(self.estimator, 'decision_function'):
self.code_book_[(self.code_book_ != 1)] = (-1)
else:
self.code_book_[(self.code_book_ != 1)] = 0
classes_index = dict(((c, i) for (i, c) in enumerate(self.classes_)))
Y = np.array([self.code_book_[classes_index[y[i]]] for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_fit_binary)(self.estimator, X, Y[:, i]) for i in range(Y.shape[1])))
return self
|
'Predict multi-class targets using underlying estimators.
Parameters
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
y : numpy array of shape [n_samples]
Predicted multi-class targets.'
| def predict(self, X):
| check_is_fitted(self, 'estimators_')
X = check_array(X)
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
|
'Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
h : array, shape (n_samples, n_components)
Latent representations of the data.'
| def transform(self, X):
| check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
|
'Computes the probabilities P(h=1|v).
Parameters
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.'
| def _mean_hiddens(self, v):
| p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
|
'Sample from the distribution P(h|v).
Parameters
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.'
| def _sample_hiddens(self, v, rng):
| p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
|
'Sample from the distribution P(v|h).
Parameters
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
v : array-like, shape (n_samples, n_features)
Values of the visible layer.'
| def _sample_visibles(self, h, rng):
| p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
|
'Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
free_energy : array-like, shape (n_samples,)
The value of the free energy.'
| def _free_energy(self, v):
| return ((- safe_sparse_dot(v, self.intercept_visible_)) - np.logaddexp(0, (safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_)).sum(axis=1))
|
'Perform one Gibbs sampling step.
Parameters
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.'
| def gibbs(self, v):
| check_is_fitted(self, 'components_')
if (not hasattr(self, 'random_state_')):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
|
'Fit the model to the data X which should contain a partial
segment of the data.
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
Returns
self : BernoulliRBM
The fitted model.'
| def partial_fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr', dtype=np.float64)
if (not hasattr(self, 'random_state_')):
self.random_state_ = check_random_state(self.random_state)
if (not hasattr(self, 'components_')):
self.components_ = np.asarray(self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order='F')
if (not hasattr(self, 'intercept_hidden_')):
self.intercept_hidden_ = np.zeros(self.n_components)
if (not hasattr(self, 'intercept_visible_')):
self.intercept_visible_ = np.zeros(X.shape[1])
if (not hasattr(self, 'h_samples_')):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
|
'Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.'
| def _fit(self, v_pos, rng):
| h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = (float(self.learning_rate) / v_pos.shape[0])
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += (lr * update)
self.intercept_hidden_ += (lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)))
self.intercept_visible_ += (lr * (np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)))
h_neg[(rng.uniform(size=h_neg.shape) < h_neg)] = 1.0
self.h_samples_ = np.floor(h_neg, h_neg)
|
'Compute the pseudo-likelihood of X.
Parameters
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.'
| def score_samples(self, X):
| check_is_fitted(self, 'components_')
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = (((-2) * v[ind]) + 1)
v_ = (v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape))
else:
v_ = v.copy()
v_[ind] = (1 - v_[ind])
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return (v.shape[1] * log_logistic((fe_ - fe)))
|
'Fit the model to the data X.
Parameters
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
self : BernoulliRBM
The fitted model.'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F')
self.intercept_hidden_ = np.zeros(self.n_components)
self.intercept_visible_ = np.zeros(X.shape[1])
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil((float(n_samples) / self.batch_size)))
batch_slices = list(gen_even_slices((n_batches * self.batch_size), n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, (self.n_iter + 1)):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print ('[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs' % (type(self).__name__, iteration, self.score_samples(X).mean(), (end - begin)))
begin = end
return self
|
'Extract the coefficients and intercepts from packed_parameters.'
| def _unpack(self, packed_parameters):
| for i in range((self.n_layers_ - 1)):
(start, end, shape) = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
(start, end) = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
|
'Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function'
| def _forward_pass(self, activations):
| hidden_activation = ACTIVATIONS[self.activation]
for i in range((self.n_layers_ - 1)):
activations[(i + 1)] = safe_sparse_dot(activations[i], self.coefs_[i])
activations[(i + 1)] += self.intercepts_[i]
if ((i + 1) != (self.n_layers_ - 1)):
activations[(i + 1)] = hidden_activation(activations[(i + 1)])
output_activation = ACTIVATIONS[self.out_activation_]
activations[(i + 1)] = output_activation(activations[(i + 1)])
return activations
|
'Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.'
| def _compute_loss_grad(self, layer, n_samples, activations, deltas, coef_grads, intercept_grads):
| coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
return (coef_grads, intercept_grads)
|
'Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
loss : float
grad : array-like, shape (number of nodes of all layers,)'
| def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads):
| self._unpack(packed_coef_inter)
(loss, coef_grads, intercept_grads) = self._backprop(X, y, activations, deltas, coef_grads, intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return (loss, grad)
|
'Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1'
| def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
| n_samples = X.shape[0]
activations = self._forward_pass(activations)
loss_func_name = self.loss
if ((loss_func_name == 'log_loss') and (self.out_activation_ == 'logistic')):
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[(-1)])
values = np.sum(np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (((0.5 * self.alpha) * values) / n_samples)
last = (self.n_layers_ - 2)
deltas[last] = (activations[(-1)] - y)
(coef_grads, intercept_grads) = self._compute_loss_grad(last, n_samples, activations, deltas, coef_grads, intercept_grads)
for i in range((self.n_layers_ - 2), 0, (-1)):
deltas[(i - 1)] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative = DERIVATIVES[self.activation]
inplace_derivative(activations[i], deltas[(i - 1)])
(coef_grads, intercept_grads) = self._compute_loss_grad((i - 1), n_samples, activations, deltas, coef_grads, intercept_grads)
return (loss, coef_grads, intercept_grads)
|
'Fit the model to data matrix X and target(s) y.
Parameters
X : array-like or sparse matrix, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
self : returns a trained MLP model.'
| def fit(self, X, y):
| return self._fit(X, y, incremental=False)
|
'Fit the model to data matrix X and target y.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
self : returns a trained MLP model.'
| @property
def partial_fit(self):
| if (self.solver not in _STOCHASTIC_SOLVERS):
raise AttributeError(('partial_fit is only available for stochastic optimizers. %s is not stochastic.' % self.solver))
return self._partial_fit
|
'Predict using the trained model
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.'
| def _predict(self, X):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
hidden_layer_sizes = self.hidden_layer_sizes
if (not hasattr(hidden_layer_sizes, '__iter__')):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = (([X.shape[1]] + hidden_layer_sizes) + [self.n_outputs_])
activations = [X]
for i in range((self.n_layers_ - 1)):
activations.append(np.empty((X.shape[0], layer_units[(i + 1)])))
self._forward_pass(activations)
y_pred = activations[(-1)]
return y_pred
|
'Predict using the multi-layer perceptron classifier
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.'
| def predict(self, X):
| check_is_fitted(self, 'coefs_')
y_pred = self._predict(X)
if (self.n_outputs_ == 1):
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
|
'Fit the model to data matrix X and target(s) y.
Parameters
X : array-like or sparse matrix, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
self : returns a trained MLP model.'
| def fit(self, X, y):
| return self._fit(X, y, incremental=(self.warm_start and hasattr(self, 'classes_')))
|
'Fit the model to data matrix X and target y.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn\'t need to contain all labels in `classes`.
Returns
self : returns a trained MLP model.'
| @property
def partial_fit(self):
| if (self.solver not in _STOCHASTIC_SOLVERS):
raise AttributeError(('partial_fit is only available for stochastic optimizer. %s is not stochastic' % self.solver))
return self._partial_fit
|
'Return the log of probability estimates.
Parameters
X : array-like, shape (n_samples, n_features)
The input data.
Returns
log_y_prob : array-like, shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))'
| def predict_log_proba(self, X):
| y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
|
'Probability estimates.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.'
| def predict_proba(self, X):
| check_is_fitted(self, 'coefs_')
y_pred = self._predict(X)
if (self.n_outputs_ == 1):
y_pred = y_pred.ravel()
if (y_pred.ndim == 1):
return np.vstack([(1 - y_pred), y_pred]).T
else:
return y_pred
|
'Predict using the multi-layer perceptron model.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
y : array-like, shape (n_samples, n_outputs)
The predicted values.'
| def predict(self, X):
| check_is_fitted(self, 'coefs_')
y_pred = self._predict(X)
if (y_pred.shape[1] == 1):
return y_pred.ravel()
return y_pred
|
'Update parameters with given gradients
Parameters
grads : list, length = len(params)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params'
| def update_params(self, grads):
| updates = self._get_updates(grads)
for (param, update) in zip(self.params, updates):
param += update
|
'Perform update to learning rate and potentially other states at the
end of an iteration'
| def iteration_ends(self, time_step):
| pass
|
'Decides whether it is time to stop training
Parameters
msg : str
Message passed in for verbose output
verbose : bool
Print message to stdin if True
Returns
is_stopping : bool
True if training needs to stop'
| def trigger_stopping(self, msg, verbose):
| if verbose:
print (msg + ' Stopping.')
return True
|
'Perform updates to learning rate and potential other states at the
end of an iteration
Parameters
time_step : int
number of training samples trained on so far, used to update
learning rate for \'invscaling\''
| def iteration_ends(self, time_step):
| if (self.lr_schedule == 'invscaling'):
self.learning_rate = (float(self.learning_rate_init) / ((time_step + 1) ** self.power_t))
|
'Get the values used to update params with given gradients
Parameters
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
updates : list, length = len(grads)
The values to add to params'
| def _get_updates(self, grads):
| updates = [((self.momentum * velocity) - (self.learning_rate * grad)) for (velocity, grad) in zip(self.velocities, grads)]
self.velocities = updates
if self.nesterov:
updates = [((self.momentum * velocity) - (self.learning_rate * grad)) for (velocity, grad) in zip(self.velocities, grads)]
return updates
|
'Get the values used to update params with given gradients
Parameters
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
updates : list, length = len(grads)
The values to add to params'
| def _get_updates(self, grads):
| self.t += 1
self.ms = [((self.beta_1 * m) + ((1 - self.beta_1) * grad)) for (m, grad) in zip(self.ms, grads)]
self.vs = [((self.beta_2 * v) + ((1 - self.beta_2) * (grad ** 2))) for (v, grad) in zip(self.vs, grads)]
self.learning_rate = ((self.learning_rate_init * np.sqrt((1 - (self.beta_2 ** self.t)))) / (1 - (self.beta_1 ** self.t)))
updates = [(((- self.learning_rate) * m) / (np.sqrt(v) + self.epsilon)) for (m, v) in zip(self.ms, self.vs)]
return updates
|
'Split can be called only once'
| def split(self, X=None, y=None, groups=None):
| for index in self.indices:
(yield index)
|
'The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function'
| def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None):
| self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if (callback is not None):
callback(self)
if self.allow_nd:
X = X.reshape(len(X), (-1))
if ((X.ndim >= 3) and (not self.allow_nd)):
raise ValueError('X cannot be d')
if (sample_weight is not None):
assert_true((sample_weight.shape[0] == X.shape[0]), 'MockClassifier extra fit_param sample_weight.shape[0] is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0]))
if (class_prior is not None):
assert_true((class_prior.shape[0] == len(np.unique(y))), 'MockClassifier extra fit_param class_prior.shape[0] is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y))))
if (sparse_sample_weight is not None):
fmt = 'MockClassifier extra fit_param sparse_sample_weight.shape[0] is {0}, should be {1}'
assert_true((sparse_sample_weight.shape[0] == X.shape[0]), fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if (sparse_param is not None):
fmt = 'MockClassifier extra fit_param sparse_param.shape is ({0}, {1}), should be ({2}, {3})'
assert_true((sparse_param.shape == P_sparse.shape), fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1]))
return self
|
'The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function'
| def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None):
| self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if (callback is not None):
callback(self)
if self.allow_nd:
X = X.reshape(len(X), (-1))
if ((X.ndim >= 3) and (not self.allow_nd)):
raise ValueError('X cannot be d')
if (sample_weight is not None):
assert_true((sample_weight.shape[0] == X.shape[0]), 'MockClassifier extra fit_param sample_weight.shape[0] is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0]))
if (class_prior is not None):
assert_true((class_prior.shape[0] == len(np.unique(y))), 'MockClassifier extra fit_param class_prior.shape[0] is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y))))
if (sparse_sample_weight is not None):
fmt = 'MockClassifier extra fit_param sparse_sample_weight.shape[0] is {0}, should be {1}'
assert_true((sparse_sample_weight.shape[0] == X.shape[0]), fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if (sparse_param is not None):
fmt = 'MockClassifier extra fit_param sparse_param.shape is ({0}, {1}), should be ({2}, {3})'
assert_true((sparse_param.shape == P_sparse.shape), fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1]))
return self
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y=None, groups=None):
| (X, y, groups) = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
(yield (train_index, test_index))
|
'Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)'
| def _iter_test_masks(self, X=None, y=None, groups=None):
| for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
(yield test_mask)
|
'Generates integer indices corresponding to test sets.'
| def _iter_test_indices(self, X=None, y=None, groups=None):
| raise NotImplementedError
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X, y=None, groups=None):
| if (X is None):
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.