desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.'
| def get_n_splits(self, X, y=None, groups=None):
| if (X is None):
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y=None, groups=None):
| (X, y, groups) = indexable(X, y, groups)
n_samples = _num_samples(X)
if (self.n_splits > n_samples):
raise ValueError('Cannot have number of splits n_splits={0} greater than the number of samples: {1}.'.format(self.n_splits, n_samples))
for (train, test) in super(_BaseKFold, self).split(X, y, groups):
(yield (train, test))
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| return self.n_splits
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y, groups=None):
| y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y=None, groups=None):
| (X, y, groups) = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = (n_splits + 1)
if (n_folds > n_samples):
raise ValueError('Cannot have number of folds ={0} greater than the number of samples: {1}.'.format(n_folds, n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range((test_size + (n_samples % n_folds)), n_samples, test_size)
for test_start in test_starts:
if (self.max_train_size and (self.max_train_size < test_start)):
(yield (indices[(test_start - self.max_train_size):test_start], indices[test_start:(test_start + test_size)]))
else:
(yield (indices[:test_start], indices[test_start:(test_start + test_size)]))
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This \'groups\' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| if (groups is None):
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return len(np.unique(groups))
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This \'groups\' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| if (groups is None):
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
|
'Generates indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.'
| def split(self, X, y=None, groups=None):
| n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
for (train_index, test_index) in cv.split(X, y, groups):
(yield (train_index, test_index))
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
return (cv.get_n_splits(X, y, groups) * self.n_repeats)
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y=None, groups=None):
| (X, y, groups) = indexable(X, y, groups)
for (train, test) in self._iter_indices(X, y, groups):
(yield (train, test))
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| return self.n_splits
|
'Generate indices to split data into training and test set.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.'
| def split(self, X, y, groups=None):
| y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
|
'Generate indices to split data into training and test set.
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.'
| def split(self, X=None, y=None, groups=None):
| ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
(yield (train_index, test_index))
|
'Generates boolean masks corresponding to test sets.'
| def _iter_test_masks(self):
| for f in self.unique_folds:
test_index = np.where((self.test_fold == f))[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
(yield test_mask)
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| return len(self.unique_folds)
|
'Returns the number of splitting iterations in the cross-validator
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
n_splits : int
Returns the number of splitting iterations in the cross-validator.'
| def get_n_splits(self, X=None, y=None, groups=None):
| return len(self.cv)
|
'Generate indices to split data into training and test set.
Parameters
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.'
| def split(self, X=None, y=None, groups=None):
| for (train, test) in self.cv:
(yield (train, test))
|
'Iterate over the points in the grid.
Returns
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.'
| def __iter__(self):
| for p in self.param_grid:
items = sorted(p.items())
if (not items):
(yield {})
else:
(keys, values) = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
(yield params)
|
'Number of points on the grid.'
| def __len__(self):
| product = partial(reduce, operator.mul)
return sum(((product((len(v) for v in p.values())) if p else 1) for p in self.param_grid))
|
'Get the parameters that would be ``ind``th in iteration
Parameters
ind : int
The iteration index
Returns
params : dict of string to any
Equal to list(self)[ind]'
| def __getitem__(self, ind):
| for sub_grid in self.param_grid:
if (not sub_grid):
if (ind == 0):
return {}
else:
ind -= 1
continue
(keys, values_lists) = zip(*sorted(sub_grid.items())[::(-1)])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if (ind >= total):
ind -= total
else:
out = {}
for (key, v_list, n) in zip(keys, values_lists, sizes):
(ind, offset) = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
|
'Number of points that will be sampled.'
| def __len__(self):
| return self.n_iter
|
'Simple custom repr to summarize the main info'
| def __repr__(self):
| return 'mean: {0:.5f}, std: {1:.5f}, params: {2}'.format(self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters)
|
'Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
score : float'
| def score(self, X, y=None):
| self._check_is_fitted('score')
if (self.scorer_ is None):
raise ValueError(("No score function explicitly defined, and the estimator doesn't provide one %s" % self.best_estimator_))
score = (self.scorer_[self.refit] if self.multimetric_ else self.scorer_)
return score(self.best_estimator_, X, y)
|
'Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
| self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
|
'Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
| self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
|
'Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
| self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
|
'Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
| self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
|
'Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
| self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
|
'Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
| self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
|
'Run fit with all sets of parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator'
| def fit(self, X, y=None, groups=None, **fit_params):
| if (self.fit_params is not None):
warnings.warn('"fit_params" as a constructor argument was deprecated in version 0.19 and will be removed in version 0.21. Pass fit parameters to the "fit" method instead.', DeprecationWarning)
if fit_params:
warnings.warn('Ignoring fit_params passed as a constructor argument in favor of keyword arguments to the "fit" method.', RuntimeWarning)
else:
fit_params = self.fit_params
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
(scorers, self.multimetric_) = _check_multimetric_scoring(self.estimator, scoring=self.scoring)
if self.multimetric_:
if ((self.refit is not False) and ((not isinstance(self.refit, six.string_types)) or (self.refit not in scorers))):
raise ValueError(('For multi-metric scoring, the parameter refit must be set to a scorer key to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. %r was passed.' % self.refit))
else:
refit_metric = self.refit
else:
refit_metric = 'score'
(X, y, groups) = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
candidate_params = list(self._get_param_iterator())
n_candidates = len(candidate_params)
if (self.verbose > 0):
print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(n_splits, n_candidates, (n_candidates * n_splits)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch)((delayed(_fit_and_score)(clone(base_estimator), X, y, scorers, train, test, self.verbose, parameters, fit_params=fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=False, error_score=self.error_score) for (parameters, (train, test)) in product(candidate_params, cv.split(X, y, groups))))
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time, score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time, score_time) = zip(*out)
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = dict()
def _store(key_name, array, weights=None, splits=False, rank=False):
'A small helper to store the scores/times to the cv_results_'
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_i in range(n_splits):
results[('split%d_%s' % (split_i, key_name))] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results[('mean_%s' % key_name)] = array_means
array_stds = np.sqrt(np.average(((array - array_means[:, np.newaxis]) ** 2), axis=1, weights=weights))
results[('std_%s' % key_name)] = array_stds
if rank:
results[('rank_%s' % key_name)] = np.asarray(rankdata((- array_means), method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object))
for (cand_i, params) in enumerate(candidate_params):
for (name, value) in params.items():
param_results[('param_%s' % name)][cand_i] = value
results.update(param_results)
results['params'] = candidate_params
test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int)
for scorer_name in scorers.keys():
_store(('test_%s' % scorer_name), test_scores[scorer_name], splits=True, rank=True, weights=(test_sample_counts if self.iid else None))
if self.return_train_score:
_store(('train_%s' % scorer_name), train_scores[scorer_name], splits=True)
if (self.refit or (not self.multimetric_)):
self.best_index_ = results[('rank_test_%s' % refit_metric)].argmin()
self.best_params_ = candidate_params[self.best_index_]
self.best_score_ = results[('mean_test_%s' % refit_metric)][self.best_index_]
if self.refit:
self.best_estimator_ = clone(base_estimator).set_params(**self.best_params_)
if (y is not None):
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
self.scorer_ = (scorers if self.multimetric_ else scorers['score'])
self.cv_results_ = results
self.n_splits_ = n_splits
return self
|
'Return ParameterGrid instance for the given param_grid'
| def _get_param_iterator(self):
| return ParameterGrid(self.param_grid)
|
'Return ParameterSampler instance for the given distributions'
| def _get_param_iterator(self):
| return ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state)
|
'Iterate over the points in the grid.
Returns
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.'
| def __iter__(self):
| for p in self.param_grid:
items = sorted(p.items())
if (not items):
(yield {})
else:
(keys, values) = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
(yield params)
|
'Number of points on the grid.'
| def __len__(self):
| product = partial(reduce, operator.mul)
return sum(((product((len(v) for v in p.values())) if p else 1) for p in self.param_grid))
|
'Get the parameters that would be ``ind``th in iteration
Parameters
ind : int
The iteration index
Returns
params : dict of string to any
Equal to list(self)[ind]'
| def __getitem__(self, ind):
| for sub_grid in self.param_grid:
if (not sub_grid):
if (ind == 0):
return {}
else:
ind -= 1
continue
(keys, values_lists) = zip(*sorted(sub_grid.items())[::(-1)])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if (ind >= total):
ind -= total
else:
out = {}
for (key, v_list, n) in zip(keys, values_lists, sizes):
(ind, offset) = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
|
'Number of points that will be sampled.'
| def __len__(self):
| return self.n_iter
|
'Simple custom repr to summarize the main info'
| def __repr__(self):
| return 'mean: {0:.5f}, std: {1:.5f}, params: {2}'.format(self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters)
|
'Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
score : float
Notes
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.'
| def score(self, X, y=None):
| if (self.scorer_ is None):
raise ValueError(("No score function explicitly defined, and the estimator doesn't provide one %s" % self.best_estimator_))
return self.scorer_(self.best_estimator_, X, y)
|
'Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
| return self.best_estimator_.predict(X)
|
'Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
| return self.best_estimator_.predict_proba(X)
|
'Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
| return self.best_estimator_.predict_log_proba(X)
|
'Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
| return self.best_estimator_.decision_function(X)
|
'Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
| return self.best_estimator_.transform(X)
|
'Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.'
| @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
| return self.best_estimator_.inverse_transform(Xt)
|
'Actual fitting, performing the search over parameters.'
| def _fit(self, X, y, parameter_iterable):
| estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
(X, y) = indexable(X, y)
if (y is not None):
if (len(y) != n_samples):
raise ValueError(('Target variable (y) has a different number of samples (%i) than data (X: %i samples)' % (len(y), n_samples)))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if (self.verbose > 0):
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(len(cv), n_candidates, (n_candidates * len(cv))))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch)((delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for (train, test) in cv))
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for (this_score, this_n_test_samples, _, parameters) in out[grid_start:(grid_start + n_folds)]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
grid_scores.append(_CVScoreTuple(parameters, score, np.array(all_scores)))
self.grid_scores_ = grid_scores
best = sorted(grid_scores, key=(lambda x: x.mean_validation_score), reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
best_estimator = clone(base_estimator).set_params(**best.parameters)
if (y is not None):
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
|
'Run fit with all sets of parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.'
| def fit(self, X, y=None):
| return self._fit(X, y, ParameterGrid(self.param_grid))
|
'Run fit on the estimator with randomly drawn parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.'
| def fit(self, X, y=None):
| sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
'This is a mock delegated function'
| @if_delegate_has_method(delegate='a_prefix')
def func(self):
| pass
|
'Function f
Parameter
a : int
Parameter a
b : float
Parameter b
Results
c : list
Parameter c'
| def f_bad_sections(self, X, y):
| pass
|
'MetaEstimator to check if doctest on delegated methods work.
Parameters
delegate : estimator
Delegated estimator.'
| def __init__(self, delegate):
| self.delegate = delegate
|
'This is available only if delegate has predict.
Parameters
y : ndarray
Parameter y'
| @if_delegate_has_method(delegate='delegate')
def predict(self, X):
| return self.delegate.predict(X)
|
'This is available only if delegate has predict_proba.
Parameters
X : ndarray
Parameter X'
| @if_delegate_has_method(delegate='delegate')
def predict_proba(self, X):
| return X
|
'This is available only if delegate has predict_proba.
Parameters
y : ndarray
Parameter X'
| @deprecated('Testing deprecated function with incorrect params')
@if_delegate_has_method(delegate='delegate')
def predict_log_proba(self, X):
| return X
|
'Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.'
| def _find_prime_in_row(self, row):
| col = np.argmax((self.marked[row] == 2))
if (self.marked[(row, col)] != 2):
col = (-1)
return col
|
'Clear all covered matrix cells'
| def _clear_covers(self):
| self.row_uncovered[:] = True
self.col_uncovered[:] = True
|
'Call method
Parameters
obj : object'
| def __call__(self, obj):
| if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
|
'Decorate function fun'
| def _decorate_fun(self, fun):
| msg = ('Function %s is deprecated' % fun.__name__)
if self.extra:
msg += ('; %s' % self.extra)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
|
'Decorator to catch and hide warnings without visual nesting.'
| def __call__(self, fn):
| @wraps(fn)
def wrapper(*args, **kwargs):
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter('ignore', self.category)
return fn(*args, **kwargs)
return wrapper
|
'Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.'
| def __init__(self, mock_datasets):
| self.mock_datasets = mock_datasets
|
'Get a mask, or integer index, of the features selected
Parameters
indices : boolean (default False)
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.'
| def get_support(self, indices=False):
| mask = self._get_support_mask()
return (mask if (not indices) else np.where(mask)[0])
|
'Reduce X to the selected features.
Parameters
X : array of shape [n_samples, n_features]
The input samples.
Returns
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.'
| def transform(self, X):
| X = check_array(X, accept_sparse='csr')
mask = self.get_support()
if (not mask.any()):
warn('No features were selected: either the data is too noisy or the selection test too strict.', UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if (len(mask) != X.shape[1]):
raise ValueError('X has a different shape than during fitting.')
return X[:, safe_mask(X, mask)]
|
'Reverse the transformation operation
Parameters
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
X_r : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by `transform`.'
| def inverse_transform(self, X):
| if issparse(X):
X = X.tocsc()
it = self.inverse_transform(np.diff(X.indptr).reshape(1, (-1)))
col_nonzeros = it.ravel()
indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
Xt = csc_matrix((X.data, X.indices, indptr), shape=(X.shape[0], (len(indptr) - 1)), dtype=X.dtype)
return Xt
support = self.get_support()
X = check_array(X)
if (support.sum() != X.shape[1]):
raise ValueError('X has a different shape than during fitting.')
if (X.ndim == 1):
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
Xt[:, support] = X
return Xt
|
'Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.'
| def fit(self, X, y):
| return self._fit(X, y)
|
'Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
X : array of shape [n_samples, n_features]
The input samples.
Returns
y : array of shape [n_samples]
The predicted target values.'
| @if_delegate_has_method(delegate='estimator')
def predict(self, X):
| check_is_fitted(self, 'estimator_')
return self.estimator_.predict(self.transform(X))
|
'Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.'
| @if_delegate_has_method(delegate='estimator')
def score(self, X, y):
| check_is_fitted(self, 'estimator_')
return self.estimator_.score(self.transform(X), y)
|
'Fit the RFE model and automatically tune the number of selected
features.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y, 'csr')
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
if (0.0 < self.step < 1.0):
step = int(max(1, (self.step * n_features)))
else:
step = int(self.step)
if (step <= 0):
raise ValueError('Step must be >0')
rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step, verbose=self.verbose)
if (self.n_jobs == 1):
(parallel, func) = (list, _rfe_single_fit)
else:
(parallel, func) = (Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit))
scores = parallel((func(rfe, self.estimator, X, y, train, test, scorer) for (train, test) in cv.split(X, y)))
scores = np.sum(scores, axis=0)
n_features_to_select = max((n_features - (np.argmax(scores) * step)), n_features_to_select)
rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
self.grid_scores_ = (scores[::(-1)] / cv.get_n_splits(X, y))
return self
|
'Learn empirical variances from X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, 'toarray'):
(_, self.variances_) = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all((self.variances_ <= self.threshold)):
msg = 'No feature in X meets the variance threshold {0:.5f}'
if (X.shape[0] == 1):
msg += ' (X contains only one sample)'
raise ValueError(msg.format(self.threshold))
return self
|
'Fit the SelectFromModel meta-transformer.
Parameters
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
self : object
Returns self.'
| def fit(self, X, y=None, **fit_params):
| if self.prefit:
raise NotFittedError("Since 'prefit=True', call transform directly")
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
|
'Fit the SelectFromModel meta-transformer only once.
Parameters
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
self : object
Returns self.'
| @if_delegate_has_method('estimator')
def partial_fit(self, X, y=None, **fit_params):
| if self.prefit:
raise NotFittedError("Since 'prefit=True', call transform directly")
if (not hasattr(self, 'estimator_')):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
|
'Run score function on (X, y) and get the appropriate features.
Parameters
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
self : object
Returns self.'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if (not callable(self.score_func)):
raise TypeError(('The score function should be a callable, %s (%s) was passed.' % (self.score_func, type(self.score_func))))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
(self.scores_, self.pvalues_) = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
|
'Generate a sparse random projection matrix
Parameters
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse=['csr', 'csc'])
(n_samples, n_features) = X.shape
if (self.n_components == 'auto'):
self.n_components_ = johnson_lindenstrauss_min_dim(n_samples=n_samples, eps=self.eps)
if (self.n_components_ <= 0):
raise ValueError(('eps=%f and n_samples=%d lead to a target dimension of %d which is invalid' % (self.eps, n_samples, self.n_components_)))
elif (self.n_components_ > n_features):
raise ValueError(('eps=%f and n_samples=%d lead to a target dimension of %d which is larger than the original space with n_features=%d' % (self.eps, n_samples, self.n_components_, n_features)))
else:
if (self.n_components <= 0):
raise ValueError(('n_components must be greater than 0, got %s' % self.n_components))
elif (self.n_components > n_features):
warnings.warn(('The number of components is higher than the number of features: n_features < n_components (%s < %s).The dimensionality of the problem will not be reduced.' % (n_features, self.n_components)), DataDimensionalityWarning)
self.n_components_ = self.n_components
self.components_ = self._make_random_matrix(self.n_components_, n_features)
assert_equal(self.components_.shape, (self.n_components_, n_features), err_msg='An error has occurred the self.components_ matrix has not the proper shape.')
return self
|
'Project the data by using matrix product with the random matrix
Parameters
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
Returns
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.'
| def transform(self, X):
| X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self, 'components_')
if (X.shape[1] != self.components_.shape[1]):
raise ValueError(('Impossible to perform projection:X at fit stage had a different number of features. (%s != %s)' % (X.shape[1], self.components_.shape[1])))
X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)
return X_new
|
'Generate the random projection matrix
Parameters
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.'
| def _make_random_matrix(self, n_components, n_features):
| random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components, n_features, random_state=random_state)
|
'Generate the random projection matrix
Parameters
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.'
| def _make_random_matrix(self, n_components, n_features):
| random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
|
'Removes comments (#...) from python code.'
| @classmethod
def split_comment(cls, code):
| if ('#' not in code):
return code
subf = (lambda m: ('' if (m.group(0)[0] == '#') else m.group(0)))
return re.sub(cls.re_pytokens, subf, code)
|
'Render the template using keyword arguments as local variables.'
| def render(self, *args, **kwargs):
| for dictarg in args:
kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
|
'The Picard docs suggest setting this as a convenience.'
| def setup_environment(self, spack_env, run_env):
| run_env.prepend_path('PICARD', join_path(self.prefix, 'bin', 'picard.jar'))
|
'Make the install targets'
| @when('@:1.7.0')
def install(self, spec, prefix):
| with working_dir(self.build_directory):
install_tree(join_path(self.stage.source_path, 'include'), prefix.include)
mkdirp(prefix.lib)
install('libgtest.a', prefix.lib)
install('libgtest_main.a', prefix.lib)
|
'Internal compile.sh scripts hardcode number of cores to build with.
Filter these out so Spack can control it.'
| def patch(self):
| files = ['compile.sh', 'parallel/modified_kahip/compile.sh', 'parallel/parallel_src/compile.sh']
for f in files:
filter_file('NCORES=.*', 'NCORES={0}'.format(make_jobs), f)
|
'Build using the KaHIP compile.sh script. Uses scons internally.'
| def build(self, spec, prefix):
| builder = Executable('./compile.sh')
builder()
|
'Install under the prefix'
| def install(self, spec, prefix):
| mkdirp(prefix.bin)
mkdirp(prefix.include)
mkdirp(prefix.lib)
with working_dir('deploy'):
for f in os.listdir('.'):
if re.match('.*\\.(a|so|dylib)$', f):
install(f, prefix.lib)
elif re.match('.*\\.h$', f):
install(f, prefix.include)
else:
install(f, prefix.bin)
|
'Run before install so that the standard Spack sbang install hook
can fix up the path to the perl binary.'
| @run_before('install')
def filter_sbang(self):
| with working_dir('src/perl'):
match = '^#!/usr/bin/env perl'
perl = join_path(self.spec['perl'].prefix.bin, 'perl')
substitute = '#!{perl}'.format(perl=perl)
files = ['fill-aa', 'fill-an-ac', 'fill-fs', 'fill-ref-md5', 'tab-to-vcf', 'vcf-annotate', 'vcf-compare', 'vcf-concat', 'vcf-consensus', 'vcf-contrast', 'vcf-convert', 'vcf-fix-newlines', 'vcf-fix-ploidy', 'vcf-indel-stats', 'vcf-isec', 'vcf-merge', 'vcf-phased-join', 'vcf-query', 'vcf-shuffle-cols', 'vcf-sort', 'vcf-stats', 'vcf-subset', 'vcf-to-tab', 'vcf-tstv', 'vcf-validator']
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
filter_file(match, substitute, *files, **kwargs)
|
'Run after install to inject dependencies into LD_LIBRARY_PATH.
If we don\'t do this, the run files will clear the LD_LIBRARY_PATH.
Since the installer is a binary file, we have no means of specifying
an RPATH to use.'
| def filter_ld_library_path(self, spec, prefix):
| files = glob.glob((prefix + '/binaries/*.run'))
ld_library_path = ':'.join([spec['zlib'].prefix.lib, spec['freetype'].prefix.lib, spec['fontconfig'].prefix.lib, spec['libxrender'].prefix.lib, spec['libcanberra'].prefix.lib])
for runfile in files:
filter_file('(export LD_LIBRARY_PATH=)$', '\\1{0}'.format(ld_library_path), runfile)
|
'execute their autotools wrapper script'
| def autoreconf(self, spec, prefix):
| if os.path.exists('./buildconf.sh'):
bash = which('bash')
bash('./buildconf.sh', '--force')
|
'Build and run a small program to test the installed HDF5 Blosc plugin'
| def check_install(self, spec):
| print('Checking HDF5-Blosc plugin...')
checkdir = 'spack-check'
with working_dir(checkdir, create=True):
source = '\\\n#include <hdf5.h>\n#include <assert.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define FILTER_BLOSC 32001 /* Blosc filter ID registered with the HDF group */\n\nint main(int argc, char **argv) {\n herr_t herr;\n hid_t file = H5Fcreate("file.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);\n assert(file >= 0);\n hsize_t dims[3] = {10, 10, 10};\n hid_t space = H5Screate_simple(3, dims, NULL);\n assert(space >= 0);\n hid_t create_proplist = H5Pcreate(H5P_DATASET_CREATE);\n assert(create_proplist >= 0);\n herr = H5Pset_chunk(create_proplist, 3, dims);\n assert(herr >= 0);\n herr = H5Pset_filter(create_proplist, FILTER_BLOSC, H5Z_FLAG_OPTIONAL, 0,\n NULL);\n assert(herr >= 0);\n htri_t all_filters_avail = H5Pall_filters_avail(create_proplist);\n assert(all_filters_avail > 0);\n hid_t dataset = H5Dcreate(file, "dataset", H5T_NATIVE_DOUBLE, space,\n H5P_DEFAULT, create_proplist, H5P_DEFAULT);\n assert(dataset >= 0);\n double data[10][10][10];\n for (int k=0; k<10; ++k) {\n for (int j=0; j<10; ++j) {\n for (int i=0; i<10; ++i) {\n data[k][j][i] = 1.0 / (1.0 + i + j + k);\n }\n }\n }\n herr = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, space, space, H5P_DEFAULT,\n &data[0][0][0]);\n assert(herr >= 0);\n herr = H5Pclose(create_proplist);\n assert(herr >= 0);\n herr = H5Dclose(dataset);\n assert(herr >= 0);\n herr = H5Sclose(space);\n assert(herr >= 0);\n herr = H5Fclose(file);\n assert(herr >= 0);\n printf("Done.\\n");\n return 0;\n}\n'
expected = 'Done.\n'
with open('check.c', 'w') as f:
f.write(source)
if ('+mpi' in spec['hdf5']):
cc = Executable(spec['mpi'].mpicc)
else:
cc = Executable(self.compiler.cc)
cc('-c', ('-I%s' % spec['hdf5'].prefix.include), 'check.c')
cc('-o', 'check', 'check.o', ('-L%s' % spec['hdf5'].prefix.lib), '-lhdf5')
try:
check = Executable('./check')
output = check(output=str)
except:
output = ''
success = (output == expected)
if (not success):
print('Produced output does not match expected output.')
print('Expected output:')
print(('-' * 80))
print(expected)
print(('-' * 80))
print('Produced output:')
print(('-' * 80))
print(output)
print(('-' * 80))
print('Environment:')
env = which('env')
env()
raise RuntimeError('HDF5 Blosc plugin check failed')
shutil.rmtree(checkdir)
|
'Called before Octave modules\' install() methods.
In most cases, extensions will only need to have one line:
octave(\'--eval\', \'pkg install %s\' % self.stage.archive_file)'
| def setup_dependent_package(self, module, dependent_spec):
| module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
|
'Provide location of the OpenFOAM project.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.'
| def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
| spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
|
'Absolute location of project directory: WM_PROJECT_DIR/'
| @property
def projectdir(self):
| return self.prefix
|
'Relative location of architecture-specific executables'
| @property
def archbin(self):
| return join_path('platforms', self.foam_arch, 'bin')
|
'Relative location of architecture-specific libraries'
| @property
def archlib(self):
| return join_path('platforms', self.foam_arch, 'lib')
|
'Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching.'
| def patch(self):
| add_extra_files(self, self.common, self.assets)
edits = {'WM_THIRD_PARTY_DIR': '$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party'}
rewrite_environ_files(edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc'))
|
'Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don\'t properly fit get placed in the etc/prefs.sh file (similiarly for
csh).'
| def configure(self, spec, prefix):
| edits = {}
edits.update(self.foam_arch.foam_dict())
rewrite_environ_files(edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc'))
self.etc_prefs = {}
user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}')
self.etc_config = {'CGAL': [('BOOST_ARCH_PATH', spec['boost'].prefix), ('CGAL_ARCH_PATH', spec['cgal'].prefix), ('LD_LIBRARY_PATH', foamAddLib(pkglib(spec['boost'], '${BOOST_ARCH_PATH}'), pkglib(spec['cgal'], '${CGAL_ARCH_PATH}')))], 'FFTW': [('FFTW_ARCH_PATH', spec['fftw'].prefix), ('LD_LIBRARY_PATH', foamAddLib(pkglib(spec['fftw'], '${BOOST_ARCH_PATH}')))], 'mpi-user': [('MPI_ARCH_PATH', spec['mpi'].prefix), ('LD_LIBRARY_PATH', foamAddLib(user_mpi['libdir'])), ('PATH', foamAddPath(user_mpi['bindir']))], 'scotch': {}, 'metis': {}, 'paraview': [], 'gperftools': []}
if ('+scotch' in spec):
self.etc_config['scotch'] = {'SCOTCH_ARCH_PATH': spec['scotch'].prefix, 'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version)}
if ('+metis' in spec):
self.etc_config['metis'] = {'METIS_ARCH_PATH': spec['metis'].prefix}
if ('+paraview' in spec):
pvMajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2))
self.etc_config['paraview'] = [('ParaView_DIR', spec['paraview'].prefix), ('ParaView_INCLUDE_DIR', ('${ParaView_DIR}/include/' + pvMajor)), ('PV_PLUGIN_PATH', ('$FOAM_LIBBIN/' + pvMajor)), ('PATH', foamAddPath('${ParaView_DIR}/bin'))]
if ('+mgridgen' in spec):
self.etc_config['mgridgen'] = {'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix}
if ('+zoltan' in spec):
self.etc_config['zoltan'] = {'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix}
if self.etc_prefs:
write_environ(self.etc_prefs, posix=join_path('etc', 'prefs.sh'), cshell=join_path('etc', 'prefs.csh'))
for (component, subdict) in self.etc_config.items():
write_environ(subdict, posix=join_path('etc', 'config.sh', component), cshell=join_path('etc', 'config.csh', component))
|
'Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.'
| def build(self, spec, prefix):
| self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = ['-silent']
if self.parallel:
args.append('-j{0}'.format(make_jobs))
builder = Executable(self.build_script)
builder(*args)
|
'Install under the projectdir'
| def install(self, spec, prefix):
| mkdirp(self.projectdir)
projdir = os.path.basename(self.projectdir)
edits = {'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir), 'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir)}
if ('+source' in spec):
ignored = re.compile('^spack-.*')
else:
ignored = re.compile('^(Allwmake|spack-).*')
files = [f for f in glob.glob('*') if (os.path.isfile(f) and (not ignored.search(f)))]
for f in files:
install(f, self.projectdir)
dirs = ['etc', 'bin', 'wmake']
if ('+source' in spec):
dirs.extend(['applications', 'src', 'tutorials'])
for d in dirs:
install_tree(d, join_path(self.projectdir, d), symlinks=True)
dirs = ['platforms']
if ('+source' in spec):
dirs.extend(['doc'])
ignored = ['src', 'applications', 'html', 'Guides']
for d in dirs:
install_tree(d, join_path(self.projectdir, d), ignore=shutil.ignore_patterns(*ignored), symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files(edits, posix=join_path(etc_dir, 'bashrc'), cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
|
'Add symlinks into bin/, lib/ (eg, for other applications)'
| def install_links(self):
| with working_dir(self.projectdir):
os.symlink(join_path('.spack', 'build.out'), join_path(('log.' + str(self.foam_arch))))
if (not self.config['link']):
return
with working_dir(self.projectdir):
if os.path.isdir(self.archlib):
os.symlink(self.archlib, 'lib')
with working_dir(join_path(self.projectdir, 'bin')):
for f in [f for f in glob.glob(join_path('..', self.archbin, '*')) if os.path.isfile(f)]:
os.symlink(f, os.path.basename(f))
|
'Returns a dictionary for OpenFOAM prefs, bashrc, cshrc.'
| def foam_dict(self):
| return dict([('WM_COMPILER', self.compiler), ('WM_ARCH_OPTION', self.arch_option), ('WM_LABEL_SIZE', self.label_size), ('WM_PRECISION_OPTION', self.precision_option), ('WM_COMPILE_OPTION', self.compile_option), ('WM_MPLIB', self.mplib)])
|
'The wmake/rules/ compiler directory'
| def _rule_directory(self, projdir=None, general=False):
| if general:
relative = os.path.join('wmake', 'rules', 'General')
else:
relative = os.path.join('wmake', 'rules', self.rule)
if projdir:
return os.path.join(projdir, relative)
else:
return relative
|
'Verify that a wmake/rules/ compiler rule exists in the project
directory.'
| def has_rule(self, projdir):
| rule_dir = self._rule_directory(projdir)
if (not os.path.isdir(rule_dir)):
raise InstallError('No wmake rule for {0}'.format(self.rule))
if (not re.match('.+Opt$', self.compile_option)):
raise InstallError("WM_COMPILE_OPTION={0} is not type '*Opt'".format(self.compile_option))
return True
|
'Create cRpathOpt,c++RpathOpt and mplibUSER,mplibUSERMPI
rules in the specified project directory.
The compiler rules are based on the respective cOpt,c++Opt rules
but with additional rpath information for the OpenFOAM libraries.
The rpath rules allow wmake to use spack information with minimal
modification to OpenFOAM.
The rpath is used for the installed libpath (continue to use
LD_LIBRARY_PATH for values during the build).'
| def create_rules(self, projdir, foam_pkg):
| rpath = '{0}{1}'.format(foam_pkg.compiler.cxx_rpath_arg, join_path(foam_pkg.projectdir, foam_pkg.archlib))
user_mpi = mplib_content(foam_pkg.spec)
rule_dir = self._rule_directory(projdir)
with working_dir(rule_dir):
for lang in ['c', 'c++']:
src = '{0}Opt'.format(lang)
dst = '{0}{1}'.format(lang, self.compile_option)
with open(src, 'r') as infile:
with open(dst, 'w') as outfile:
for line in infile:
line = line.rstrip()
outfile.write(line)
if re.match('^\\S+DBUG\\s*=', line):
outfile.write(' ')
outfile.write(rpath)
outfile.write('\n')
for mplib in ['mplibUSER', 'mplibUSERMPI']:
with open(mplib, 'w') as out:
out.write('# Use mpi from spack ({name})\n\nPFLAGS = {FLAGS}\nPINC = {PINC}\nPLIBS = {PLIBS}\n'.format(**user_mpi))
|
'Run after install to tell the Makefile and SConstruct files to use
the compilers that Spack built the package with.
If this isn\'t done, they\'ll have CC, CXX, F77, and FC set to Spack\'s
generic cc, c++, f77, and f90. We want them to be bound to whatever
compiler they were built with.'
| @run_after('install')
def filter_compilers(self):
| kwargs = {'ignore_absent': True, 'backup': False, 'string': True}
dirname = os.path.join(self.prefix, 'share/cantera/samples')
cc_files = ['cxx/rankine/Makefile', 'cxx/NASA_coeffs/Makefile', 'cxx/kinetics1/Makefile', 'cxx/flamespeed/Makefile', 'cxx/combustor/Makefile', 'f77/SConstruct']
cxx_files = ['cxx/rankine/Makefile', 'cxx/NASA_coeffs/Makefile', 'cxx/kinetics1/Makefile', 'cxx/flamespeed/Makefile', 'cxx/combustor/Makefile']
f77_files = ['f77/Makefile', 'f77/SConstruct']
fc_files = ['f90/Makefile', 'f90/SConstruct']
for filename in cc_files:
filter_file(os.environ['CC'], self.compiler.cc, os.path.join(dirname, filename), **kwargs)
for filename in cxx_files:
filter_file(os.environ['CXX'], self.compiler.cxx, os.path.join(dirname, filename), **kwargs)
for filename in f77_files:
filter_file(os.environ['F77'], self.compiler.f77, os.path.join(dirname, filename), **kwargs)
for filename in fc_files:
filter_file(os.environ['FC'], self.compiler.fc, os.path.join(dirname, filename), **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.