text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Apply a transform function to portion of selected features.
<END_TASK>
<USER_TASK:>
Description:
def _transform_selected(X, transform, selected, copy=True):
"""Apply a transform function to portion of selected features.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all", "auto" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
""" |
if selected == "all":
return transform(X)
if len(selected) == 0:
return X
X = check_array(X, accept_sparse='csc', force_all_finite=False)
X_sel, X_not_sel, n_selected, n_features = _X_selected(X, selected)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X_sel)
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel), format='csr')
else:
return np.hstack((X_sel, X_not_sel)) |
<SYSTEM_TASK:>
Adjust all values in X to encode for NaNs and infinities in the data.
<END_TASK>
<USER_TASK:>
Description:
def _matrix_adjust(self, X):
"""Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
""" |
data_matrix = X.data if sparse.issparse(X) else X
# Shift all values to specially encode for NAN/infinity/OTHER and 0
# Old value New Value
# --------- ---------
# N (0..int_max) N + 3
# np.NaN 2
# infinity 2
# *other* 1
#
# A value of 0 is reserved, as that is specially handled in sparse
# matrices.
data_matrix += len(SPARSE_ENCODINGS) + 1
data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN']
return X |
<SYSTEM_TASK:>
Fit OneHotEncoder to X, then transform X.
<END_TASK>
<USER_TASK:>
Description:
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
y: array-like {n_samples,} (Optional, ignored)
Feature labels
""" |
if self.categorical_features == "auto":
self.categorical_features = auto_select_categorical_features(X, threshold=self.threshold)
return _transform_selected(
X,
self._fit_transform,
self.categorical_features,
copy=True
) |
<SYSTEM_TASK:>
Transform X using one-hot encoding.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
""" |
return _transform_selected(
X, self._transform,
self.categorical_features,
copy=True
) |
<SYSTEM_TASK:>
Helper function to update the _optimized_pipeline field.
<END_TASK>
<USER_TASK:>
Description:
def _update_top_pipeline(self):
"""Helper function to update the _optimized_pipeline field.""" |
# Store the pipeline with the highest internal testing score
if self._pareto_front:
self._optimized_pipeline_score = -float('inf')
for pipeline, pipeline_scores in zip(self._pareto_front.items, reversed(self._pareto_front.keys)):
if pipeline_scores.wvalues[1] > self._optimized_pipeline_score:
self._optimized_pipeline = pipeline
self._optimized_pipeline_score = pipeline_scores.wvalues[1]
if not self._optimized_pipeline:
raise RuntimeError('There was an error in the TPOT optimization '
'process. This could be because the data was '
'not formatted properly, or because data for '
'a regression problem was provided to the '
'TPOTClassifier object. Please make sure you '
'passed the data to TPOT correctly.')
else:
pareto_front_wvalues = [pipeline_scores.wvalues[1] for pipeline_scores in self._pareto_front.keys]
if not self._last_optimized_pareto_front:
self._last_optimized_pareto_front = pareto_front_wvalues
elif self._last_optimized_pareto_front == pareto_front_wvalues:
self._last_optimized_pareto_front_n_gens += 1
else:
self._last_optimized_pareto_front = pareto_front_wvalues
self._last_optimized_pareto_front_n_gens = 0
else:
# If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet.
# need raise RuntimeError because no pipeline has been optimized
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.') |
<SYSTEM_TASK:>
Print out best pipeline at the end of optimization process.
<END_TASK>
<USER_TASK:>
Description:
def _summary_of_best_pipeline(self, features, target):
"""Print out best pipeline at the end of optimization process.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples}
List of class labels for prediction
Returns
-------
self: object
Returns a copy of the fitted TPOT object
""" |
if not self._optimized_pipeline:
raise RuntimeError('There was an error in the TPOT optimization '
'process. This could be because the data was '
'not formatted properly, or because data for '
'a regression problem was provided to the '
'TPOTClassifier object. Please make sure you '
'passed the data to TPOT correctly.')
else:
self.fitted_pipeline_ = self._toolbox.compile(expr=self._optimized_pipeline)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.fitted_pipeline_.fit(features, target)
if self.verbosity in [1, 2]:
# Add an extra line of spacing if the progress bar was used
if self.verbosity >= 2:
print('')
optimized_pipeline_str = self.clean_pipeline_string(self._optimized_pipeline)
print('Best pipeline:', optimized_pipeline_str)
# Store and fit the entire Pareto front as fitted models for convenience
self.pareto_front_fitted_pipelines_ = {}
for pipeline in self._pareto_front.items:
self.pareto_front_fitted_pipelines_[str(pipeline)] = self._toolbox.compile(expr=pipeline)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.pareto_front_fitted_pipelines_[str(pipeline)].fit(features, target) |
<SYSTEM_TASK:>
Use the optimized pipeline to predict the target for a feature set.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, features):
"""Use the optimized pipeline to predict the target for a feature set.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
Returns
----------
array-like: {n_samples}
Predicted target for the samples in the feature matrix
""" |
if not self.fitted_pipeline_:
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
features = self._check_dataset(features, target=None, sample_weight=None)
return self.fitted_pipeline_.predict(features) |
<SYSTEM_TASK:>
Call fit and predict in sequence.
<END_TASK>
<USER_TASK:>
Description:
def fit_predict(self, features, target, sample_weight=None, groups=None):
"""Call fit and predict in sequence.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples}
List of class labels for prediction
sample_weight: array-like {n_samples}, optional
Per-sample weights. Higher weights force TPOT to put more emphasis on those points
groups: array-like, with shape {n_samples, }, optional
Group labels for the samples used when performing cross-validation.
This parameter should only be used in conjunction with sklearn's Group cross-validation
functions, such as sklearn.model_selection.GroupKFold
Returns
----------
array-like: {n_samples}
Predicted target for the provided features
""" |
self.fit(features, target, sample_weight=sample_weight, groups=groups)
return self.predict(features) |
<SYSTEM_TASK:>
Return the score on the given testing data using the user-specified scoring function.
<END_TASK>
<USER_TASK:>
Description:
def score(self, testing_features, testing_target):
"""Return the score on the given testing data using the user-specified scoring function.
Parameters
----------
testing_features: array-like {n_samples, n_features}
Feature matrix of the testing set
testing_target: array-like {n_samples}
List of class labels for prediction in the testing set
Returns
-------
accuracy_score: float
The estimated test set accuracy
""" |
if self.fitted_pipeline_ is None:
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
testing_features, testing_target = self._check_dataset(testing_features, testing_target, sample_weight=None)
# If the scoring function is a string, we must adjust to use the sklearn
# scoring interface
score = SCORERS[self.scoring_function](
self.fitted_pipeline_,
testing_features.astype(np.float64),
testing_target.astype(np.float64)
)
return score |
<SYSTEM_TASK:>
Use the optimized pipeline to estimate the class probabilities for a feature set.
<END_TASK>
<USER_TASK:>
Description:
def predict_proba(self, features):
"""Use the optimized pipeline to estimate the class probabilities for a feature set.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix of the testing set
Returns
-------
array-like: {n_samples, n_target}
The class probabilities of the input samples
""" |
if not self.fitted_pipeline_:
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
else:
if not (hasattr(self.fitted_pipeline_, 'predict_proba')):
raise RuntimeError('The fitted pipeline does not have the predict_proba() function.')
features = self._check_dataset(features, target=None, sample_weight=None)
return self.fitted_pipeline_.predict_proba(features) |
<SYSTEM_TASK:>
Provide a string of the individual without the parameter prefixes.
<END_TASK>
<USER_TASK:>
Description:
def clean_pipeline_string(self, individual):
"""Provide a string of the individual without the parameter prefixes.
Parameters
----------
individual: individual
Individual which should be represented by a pretty string
Returns
-------
A string like str(individual), but with parameter prefixes removed.
""" |
dirty_string = str(individual)
# There are many parameter prefixes in the pipeline strings, used solely for
# making the terminal name unique, eg. LinearSVC__.
parameter_prefixes = [(m.start(), m.end()) for m in re.finditer(', [\w]+__', dirty_string)]
# We handle them in reverse so we do not mess up indices
pretty = dirty_string
for (start, end) in reversed(parameter_prefixes):
pretty = pretty[:start + 2] + pretty[end:]
return pretty |
<SYSTEM_TASK:>
Export the optimized pipeline as Python code.
<END_TASK>
<USER_TASK:>
Description:
def export(self, output_file_name, data_file_path=''):
"""Export the optimized pipeline as Python code.
Parameters
----------
output_file_name: string
String containing the path and file name of the desired output file
data_file_path: string (default: '')
By default, the path of input dataset is 'PATH/TO/DATA/FILE' by default.
If data_file_path is another string, the path will be replaced.
Returns
-------
False if it skipped writing the pipeline to file
True if the pipeline was actually written
""" |
if self._optimized_pipeline is None:
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
to_write = export_pipeline(self._optimized_pipeline,
self.operators, self._pset,
self._imputed, self._optimized_pipeline_score,
self.random_state,
data_file_path=data_file_path)
with open(output_file_name, 'w') as output_file:
output_file.write(to_write) |
<SYSTEM_TASK:>
Impute missing values in a feature set.
<END_TASK>
<USER_TASK:>
Description:
def _impute_values(self, features):
"""Impute missing values in a feature set.
Parameters
----------
features: array-like {n_samples, n_features}
A feature matrix
Returns
-------
array-like {n_samples, n_features}
""" |
if self.verbosity > 1:
print('Imputing missing values in feature set')
if self._fitted_imputer is None:
self._fitted_imputer = Imputer(strategy="median")
self._fitted_imputer.fit(features)
return self._fitted_imputer.transform(features) |
<SYSTEM_TASK:>
Check if a dataset has a valid feature set and labels.
<END_TASK>
<USER_TASK:>
Description:
def _check_dataset(self, features, target, sample_weight=None):
"""Check if a dataset has a valid feature set and labels.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples} or None
List of class labels for prediction
sample_weight: array-like {n_samples} (optional)
List of weights indicating relative importance
Returns
-------
(features, target)
""" |
# Check sample_weight
if sample_weight is not None:
try: sample_weight = np.array(sample_weight).astype('float')
except ValueError as e:
raise ValueError('sample_weight could not be converted to float array: %s' % e)
if np.any(np.isnan(sample_weight)):
raise ValueError('sample_weight contained NaN values.')
try: check_consistent_length(sample_weight, target)
except ValueError as e:
raise ValueError('sample_weight dimensions did not match target: %s' % e)
# If features is a sparse matrix, do not apply imputation
if sparse.issparse(features):
if self.config_dict in [None, "TPOT light", "TPOT MDR"]:
raise ValueError(
'Not all operators in {} supports sparse matrix. '
'Please use \"TPOT sparse\" for sparse matrix.'.format(self.config_dict)
)
elif self.config_dict != "TPOT sparse":
print(
'Warning: Since the input matrix is a sparse matrix, please makes sure all the operators in the '
'customized config dictionary supports sparse matriies.'
)
else:
if isinstance(features, np.ndarray):
if np.any(np.isnan(features)):
self._imputed = True
elif isinstance(features, DataFrame):
if features.isnull().values.any():
self._imputed = True
if self._imputed:
features = self._impute_values(features)
try:
if target is not None:
X, y = check_X_y(features, target, accept_sparse=True, dtype=None)
if self._imputed:
return X, y
else:
return features, target
else:
X = check_array(features, accept_sparse=True, dtype=None)
if self._imputed:
return X
else:
return features
except (AssertionError, ValueError):
raise ValueError(
'Error: Input data is not in a valid format. Please confirm '
'that the input data is scikit-learn compatible. For example, '
'the features must be a 2-D array and target labels must be a '
'1-D array.'
) |
<SYSTEM_TASK:>
Compile a DEAP pipeline into a sklearn pipeline.
<END_TASK>
<USER_TASK:>
Description:
def _compile_to_sklearn(self, expr):
"""Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline
""" |
sklearn_pipeline_str = generate_pipeline_code(expr_to_tree(expr, self._pset), self.operators)
sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)
sklearn_pipeline.memory = self._memory
return sklearn_pipeline |
<SYSTEM_TASK:>
Recursively iterate through all objects in the pipeline and set a given parameter.
<END_TASK>
<USER_TASK:>
Description:
def _set_param_recursive(self, pipeline_steps, parameter, value):
"""Recursively iterate through all objects in the pipeline and set a given parameter.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
parameter: str
The parameter to assign a value for in each pipeline object
value: any
The value to assign the parameter to in each pipeline object
Returns
-------
None
""" |
for (_, obj) in pipeline_steps:
recursive_attrs = ['steps', 'transformer_list', 'estimators']
for attr in recursive_attrs:
if hasattr(obj, attr):
self._set_param_recursive(getattr(obj, attr), parameter, value)
if hasattr(obj, 'estimator'): # nested estimator
est = getattr(obj, 'estimator')
if hasattr(est, parameter):
setattr(est, parameter, value)
if hasattr(obj, parameter):
setattr(obj, parameter, value) |
<SYSTEM_TASK:>
Stop optimization process once maximum minutes have elapsed.
<END_TASK>
<USER_TASK:>
Description:
def _stop_by_max_time_mins(self):
"""Stop optimization process once maximum minutes have elapsed.""" |
if self.max_time_mins:
total_mins_elapsed = (datetime.now() - self._start_datetime).total_seconds() / 60.
if total_mins_elapsed >= self.max_time_mins:
raise KeyboardInterrupt('{} minutes have elapsed. TPOT will close down.'.format(total_mins_elapsed)) |
<SYSTEM_TASK:>
Combine the stats with operator count and cv score and preprare to be written to _evaluated_individuals
<END_TASK>
<USER_TASK:>
Description:
def _combine_individual_stats(self, operator_count, cv_score, individual_stats):
"""Combine the stats with operator count and cv score and preprare to be written to _evaluated_individuals
Parameters
----------
operator_count: int
number of components in the pipeline
cv_score: float
internal cross validation score
individual_stats: dictionary
dict containing statistics about the individual. currently:
'generation': generation in which the individual was evaluated
'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively
'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively
'predecessor': string representation of the individual
Returns
-------
stats: dictionary
dict containing the combined statistics:
'operator_count': number of operators in the pipeline
'internal_cv_score': internal cross validation score
and all the statistics contained in the 'individual_stats' parameter
""" |
stats = deepcopy(individual_stats) # Deepcopy, since the string reference to predecessor should be cloned
stats['operator_count'] = operator_count
stats['internal_cv_score'] = cv_score
return stats |
<SYSTEM_TASK:>
Preprocess DEAP individuals before pipeline evaluation.
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_individuals(self, individuals):
"""Preprocess DEAP individuals before pipeline evaluation.
Parameters
----------
individuals: a list of DEAP individual
One individual is a list of pipeline operators and model parameters that can be
compiled by DEAP into a callable function
Returns
-------
operator_counts: dictionary
a dictionary of operator counts in individuals for evaluation
eval_individuals_str: list
a list of string of individuals for evaluation
sklearn_pipeline_list: list
a list of scikit-learn pipelines converted from DEAP individuals for evaluation
stats_dicts: dictionary
A dict where 'key' is the string representation of an individual and 'value' is a dict containing statistics about the individual
""" |
# update self._pbar.total
if not (self.max_time_mins is None) and not self._pbar.disable and self._pbar.total <= self._pbar.n:
self._pbar.total += self._lambda
# Check we do not evaluate twice the same individual in one pass.
_, unique_individual_indices = np.unique([str(ind) for ind in individuals], return_index=True)
unique_individuals = [ind for i, ind in enumerate(individuals) if i in unique_individual_indices]
# update number of duplicate pipelines
self._update_pbar(pbar_num=len(individuals) - len(unique_individuals))
# a dictionary for storing operator counts
operator_counts = {}
stats_dicts = {}
# 2 lists of DEAP individuals' string, their sklearn pipelines for parallel computing
eval_individuals_str = []
sklearn_pipeline_list = []
for individual in unique_individuals:
# Disallow certain combinations of operators because they will take too long or take up too much RAM
# This is a fairly hacky way to prevent TPOT from getting stuck on bad pipelines and should be improved in a future release
individual_str = str(individual)
if not len(individual): # a pipeline cannot be randomly generated
self.evaluated_individuals_[individual_str] = self._combine_individual_stats(5000.,
-float('inf'),
individual.statistics)
self._update_pbar(pbar_msg='Invalid pipeline encountered. Skipping its evaluation.')
continue
sklearn_pipeline_str = generate_pipeline_code(expr_to_tree(individual, self._pset), self.operators)
if sklearn_pipeline_str.count('PolynomialFeatures') > 1:
self.evaluated_individuals_[individual_str] = self._combine_individual_stats(5000.,
-float('inf'),
individual.statistics)
self._update_pbar(pbar_msg='Invalid pipeline encountered. Skipping its evaluation.')
# Check if the individual was evaluated before
elif individual_str in self.evaluated_individuals_:
self._update_pbar(pbar_msg=('Pipeline encountered that has previously been evaluated during the '
'optimization process. Using the score from the previous evaluation.'))
else:
try:
# Transform the tree expression into an sklearn pipeline
sklearn_pipeline = self._toolbox.compile(expr=individual)
# Fix random state when the operator allows
self._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
# Setting the seed is needed for XGBoost support because XGBoost currently stores
# both a seed and random_state, and they're not synced correctly.
# XGBoost will raise an exception if random_state != seed.
if 'XGB' in sklearn_pipeline_str:
self._set_param_recursive(sklearn_pipeline.steps, 'seed', 42)
# Count the number of pipeline operators as a measure of pipeline complexity
operator_count = self._operator_count(individual)
operator_counts[individual_str] = max(1, operator_count)
stats_dicts[individual_str] = individual.statistics
except Exception:
self.evaluated_individuals_[individual_str] = self._combine_individual_stats(5000.,
-float('inf'),
individual.statistics)
self._update_pbar()
continue
eval_individuals_str.append(individual_str)
sklearn_pipeline_list.append(sklearn_pipeline)
return operator_counts, eval_individuals_str, sklearn_pipeline_list, stats_dicts |
<SYSTEM_TASK:>
Update self.evaluated_individuals_ and error message during pipeline evaluation.
<END_TASK>
<USER_TASK:>
Description:
def _update_evaluated_individuals_(self, result_score_list, eval_individuals_str, operator_counts, stats_dicts):
"""Update self.evaluated_individuals_ and error message during pipeline evaluation.
Parameters
----------
result_score_list: list
A list of CV scores for evaluated pipelines
eval_individuals_str: list
A list of strings for evaluated pipelines
operator_counts: dict
A dict where 'key' is the string representation of an individual and 'value' is the number of operators in the pipeline
stats_dicts: dict
A dict where 'key' is the string representation of an individual and 'value' is a dict containing statistics about the individual
Returns
-------
None
""" |
for result_score, individual_str in zip(result_score_list, eval_individuals_str):
if type(result_score) in [float, np.float64, np.float32]:
self.evaluated_individuals_[individual_str] = self._combine_individual_stats(operator_counts[individual_str],
result_score,
stats_dicts[individual_str])
else:
raise ValueError('Scoring function does not return a float.') |
<SYSTEM_TASK:>
Update self._pbar and error message during pipeline evaluation.
<END_TASK>
<USER_TASK:>
Description:
def _update_pbar(self, pbar_num=1, pbar_msg=None):
"""Update self._pbar and error message during pipeline evaluation.
Parameters
----------
pbar_num: int
How many pipelines has been processed
pbar_msg: None or string
Error message
Returns
-------
None
""" |
if not isinstance(self._pbar, type(None)):
if self.verbosity > 2 and pbar_msg is not None:
self._pbar.write(pbar_msg, file=self._file)
if not self._pbar.disable:
self._pbar.update(pbar_num) |
<SYSTEM_TASK:>
Perform a replacement, insertion, or shrink mutation on an individual.
<END_TASK>
<USER_TASK:>
Description:
def _random_mutation_operator(self, individual, allow_shrink=True):
"""Perform a replacement, insertion, or shrink mutation on an individual.
Parameters
----------
individual: DEAP individual
A list of pipeline operators and model parameters that can be
compiled by DEAP into a callable function
allow_shrink: bool (True)
If True the `mutShrink` operator, which randomly shrinks the pipeline,
is allowed to be chosen as one of the random mutation operators.
If False, `mutShrink` will never be chosen as a mutation operator.
Returns
-------
mut_ind: DEAP individual
Returns the individual with one of the mutations applied to it
""" |
if self.tree_structure:
mutation_techniques = [
partial(gp.mutInsert, pset=self._pset),
partial(mutNodeReplacement, pset=self._pset)
]
# We can't shrink pipelines with only one primitive, so we only add it if we find more primitives.
number_of_primitives = sum([isinstance(node, deap.gp.Primitive) for node in individual])
if number_of_primitives > 1 and allow_shrink:
mutation_techniques.append(partial(gp.mutShrink))
else:
mutation_techniques = [partial(mutNodeReplacement, pset=self._pset)]
mutator = np.random.choice(mutation_techniques)
unsuccesful_mutations = 0
for _ in range(self._max_mut_loops):
# We have to clone the individual because mutator operators work in-place.
ind = self._toolbox.clone(individual)
offspring, = mutator(ind)
if str(offspring) not in self.evaluated_individuals_:
# Update statistics
# crossover_count is kept the same as for the predecessor
# mutation count is increased by 1
# predecessor is set to the string representation of the individual before mutation
# generation is set to 'INVALID' such that we can recognize that it should be updated accordingly
offspring.statistics['crossover_count'] = individual.statistics['crossover_count']
offspring.statistics['mutation_count'] = individual.statistics['mutation_count'] + 1
offspring.statistics['predecessor'] = (str(individual),)
offspring.statistics['generation'] = 'INVALID'
break
else:
unsuccesful_mutations += 1
# Sometimes you have pipelines for which every shrunk version has already been explored too.
# To still mutate the individual, one of the two other mutators should be applied instead.
if ((unsuccesful_mutations == 50) and
(type(mutator) is partial and mutator.func is gp.mutShrink)):
offspring, = self._random_mutation_operator(individual, allow_shrink=False)
return offspring, |
<SYSTEM_TASK:>
Generate an expression where each leaf might have a different depth between min_ and max_.
<END_TASK>
<USER_TASK:>
Description:
def _gen_grow_safe(self, pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth between min_ and max_.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
type_: class
The type that should return the tree when called, when
:obj:None (default) the type of :pset: (pset.ret)
is assumed.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths.
""" |
def condition(height, depth, type_):
"""Stop when the depth is equal to height or when a node should be a terminal."""
return type_ not in self.ret_types or depth == height
return self._generate(pset, min_, max_, condition, type_) |
<SYSTEM_TASK:>
Count the number of pipeline operators as a measure of pipeline complexity.
<END_TASK>
<USER_TASK:>
Description:
def _operator_count(self, individual):
"""Count the number of pipeline operators as a measure of pipeline complexity.
Parameters
----------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function.
Returns
-------
operator_count: int
How many operators in a pipeline
""" |
operator_count = 0
for i in range(len(individual)):
node = individual[i]
if type(node) is deap.gp.Primitive and node.name != 'CombineDFs':
operator_count += 1
return operator_count |
<SYSTEM_TASK:>
Update values in the list of result scores and self._pbar during pipeline evaluation.
<END_TASK>
<USER_TASK:>
Description:
def _update_val(self, val, result_score_list):
"""Update values in the list of result scores and self._pbar during pipeline evaluation.
Parameters
----------
val: float or "Timeout"
CV scores
result_score_list: list
A list of CV scores
Returns
-------
result_score_list: list
A updated list of CV scores
""" |
self._update_pbar()
if val == 'Timeout':
self._update_pbar(pbar_msg=('Skipped pipeline #{0} due to time out. '
'Continuing to the next pipeline.'.format(self._pbar.n)))
result_score_list.append(-float('inf'))
else:
result_score_list.append(val)
return result_score_list |
<SYSTEM_TASK:>
Generate a Tree as a list of lists.
<END_TASK>
<USER_TASK:>
Description:
def _generate(self, pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of lists.
The tree is build from the root to the leaves, and it stop growing when
the condition is fulfilled.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: function
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
type_: class
The type that should return the tree when called, when
:obj:None (default) no return type is enforced.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function.
""" |
if type_ is None:
type_ = pset.ret
expr = []
height = np.random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
# We've added a type_ parameter to the condition function
if condition(height, depth, type_):
try:
term = np.random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError(
'The gp.generate function tried to add '
'a terminal of type {}, but there is'
'none available. {}'.format(type_, traceback)
)
if inspect.isclass(term):
term = term()
expr.append(term)
else:
try:
prim = np.random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError(
'The gp.generate function tried to add '
'a primitive of type {}, but there is'
'none available. {}'.format(type_, traceback)
)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr |
<SYSTEM_TASK:>
Select categorical features and transform them using OneHotEncoder.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Select categorical features and transform them using OneHotEncoder.
Parameters
----------
X: numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples and n_components is the number of components.
Returns
-------
array-like, {n_samples, n_components}
""" |
selected = auto_select_categorical_features(X, threshold=self.threshold)
X_sel, _, n_selected, _ = _X_selected(X, selected)
if n_selected == 0:
# No features selected.
raise ValueError('No categorical feature was found!')
else:
ohe = OneHotEncoder(categorical_features='all', sparse=False, minimum_fraction=self.minimum_fraction)
return ohe.fit_transform(X_sel) |
<SYSTEM_TASK:>
Select continuous features and transform them using PCA.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Select continuous features and transform them using PCA.
Parameters
----------
X: numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples and n_components is the number of components.
Returns
-------
array-like, {n_samples, n_components}
""" |
selected = auto_select_categorical_features(X, threshold=self.threshold)
_, X_sel, n_selected, _ = _X_selected(X, selected)
if n_selected == 0:
# No features selected.
raise ValueError('No continuous feature was found!')
else:
pca = PCA(svd_solver=self.svd_solver, iterated_power=self.iterated_power, random_state=self.random_state)
return pca.fit_transform(X_sel) |
<SYSTEM_TASK:>
Fit the StackingEstimator meta-transformer.
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y=None, **fit_params):
"""Fit the StackingEstimator meta-transformer.
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
fit_params:
Other estimator-specific parameters.
Returns
-------
self: object
Returns a copy of the estimator
""" |
self.estimator.fit(X, y, **fit_params)
return self |
<SYSTEM_TASK:>
Transform data by adding two virtual features.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, y=None):
"""Transform data by adding two virtual features.
Parameters
----------
X: numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples and n_components
is the number of components.
y: None
Unused
Returns
-------
X_transformed: array-like, shape (n_samples, n_features)
The transformed feature set
""" |
X = check_array(X)
n_features = X.shape[1]
X_transformed = np.copy(X)
non_zero_vector = np.count_nonzero(X_transformed, axis=1)
non_zero = np.reshape(non_zero_vector, (-1, 1))
zero_col = np.reshape(n_features - non_zero_vector, (-1, 1))
X_transformed = np.hstack((non_zero, X_transformed))
X_transformed = np.hstack((zero_col, X_transformed))
return X_transformed |
<SYSTEM_TASK:>
Decode operator source and import operator class.
<END_TASK>
<USER_TASK:>
Description:
def source_decode(sourcecode, verbose=0):
"""Decode operator source and import operator class.
Parameters
----------
sourcecode: string
a string of operator source (e.g 'sklearn.feature_selection.RFE')
verbose: int, optional (default: 0)
How much information TPOT communicates while it's running.
0 = none, 1 = minimal, 2 = high, 3 = all.
if verbose > 2 then ImportError will rasie during initialization
Returns
-------
import_str: string
a string of operator class source (e.g. 'sklearn.feature_selection')
op_str: string
a string of operator class (e.g. 'RFE')
op_obj: object
operator class (e.g. RFE)
""" |
tmp_path = sourcecode.split('.')
op_str = tmp_path.pop()
import_str = '.'.join(tmp_path)
try:
if sourcecode.startswith('tpot.'):
exec('from {} import {}'.format(import_str[4:], op_str))
else:
exec('from {} import {}'.format(import_str, op_str))
op_obj = eval(op_str)
except Exception as e:
if verbose > 2:
raise ImportError('Error: could not import {}.\n{}'.format(sourcecode, e))
else:
print('Warning: {} is not available and will not be used by TPOT.'.format(sourcecode))
op_obj = None
return import_str, op_str, op_obj |
<SYSTEM_TASK:>
Recursively iterates through all objects in the pipeline and sets sample weight.
<END_TASK>
<USER_TASK:>
Description:
def set_sample_weight(pipeline_steps, sample_weight=None):
"""Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
""" |
sample_weight_dict = {}
if not isinstance(sample_weight, type(None)):
for (pname, obj) in pipeline_steps:
if inspect.getargspec(obj.fit).args.count('sample_weight'):
step_sw = pname + '__sample_weight'
sample_weight_dict[step_sw] = sample_weight
if sample_weight_dict:
return sample_weight_dict
else:
return None |
<SYSTEM_TASK:>
Ensure that the provided value is a positive integer.
<END_TASK>
<USER_TASK:>
Description:
def positive_integer(value):
"""Ensure that the provided value is a positive integer.
Parameters
----------
value: int
The number to evaluate
Returns
-------
value: int
Returns a positive integer
""" |
try:
value = int(value)
except Exception:
raise argparse.ArgumentTypeError('Invalid int value: \'{}\''.format(value))
if value < 0:
raise argparse.ArgumentTypeError('Invalid positive int value: \'{}\''.format(value))
return value |
<SYSTEM_TASK:>
converts mymodule.myfunc in the myfunc
<END_TASK>
<USER_TASK:>
Description:
def load_scoring_function(scoring_func):
"""
converts mymodule.myfunc in the myfunc
object itself so tpot receives a scoring function
""" |
if scoring_func and ("." in scoring_func):
try:
module_name, func_name = scoring_func.rsplit('.', 1)
module_path = os.getcwd()
sys.path.insert(0, module_path)
scoring_func = getattr(import_module(module_name), func_name)
sys.path.pop(0)
print('manual scoring function: {}'.format(scoring_func))
print('taken from module: {}'.format(module_name))
except Exception as e:
print('failed importing custom scoring function, error: {}'.format(str(e)))
raise ValueError(e)
return scoring_func |
<SYSTEM_TASK:>
Fit FeatureSetSelector for feature selection
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y=None):
"""Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator
""" |
subset_df = pd.read_csv(self.subset_list, header=0, index_col=0)
if isinstance(self.sel_subset, int):
self.sel_subset_name = subset_df.index[self.sel_subset]
elif isinstance(self.sel_subset, str):
self.sel_subset_name = self.sel_subset
else: # list or tuple
self.sel_subset_name = []
for s in self.sel_subset:
if isinstance(s, int):
self.sel_subset_name.append(subset_df.index[s])
else:
self.sel_subset_name.append(s)
sel_features = subset_df.loc[self.sel_subset_name, 'Features']
if not isinstance(sel_features, str):
sel_features = ";".join(sel_features.tolist())
sel_uniq_features = set(sel_features.split(';'))
if isinstance(X, pd.DataFrame): # use columns' names
self.feature_names = list(X.columns.values)
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list]
elif isinstance(X, np.ndarray): # use index
self.feature_names = list(range(X.shape[1]))
sel_uniq_features = [int(val) for val in sel_uniq_features]
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = self.feat_list
if not len(self.feat_list):
raise ValueError('No feature is found on the subset list!')
return self |
<SYSTEM_TASK:>
Make subset after fit
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Make subset after fit
Parameters
----------
X: numpy ndarray, {n_samples, n_features}
New data, where n_samples is the number of samples and n_features is the number of features.
Returns
-------
X_transformed: array-like, shape (n_samples, n_features + 1) or (n_samples, n_features + 1 + n_classes) for classifier with predict_proba attribute
The transformed feature set.
""" |
if isinstance(X, pd.DataFrame):
X_transformed = X[self.feat_list].values
elif isinstance(X, np.ndarray):
X_transformed = X[:, self.feat_list_idx]
return X_transformed.astype(np.float64) |
<SYSTEM_TASK:>
Pick two individuals from the population which can do crossover, that is, they share a primitive.
<END_TASK>
<USER_TASK:>
Description:
def pick_two_individuals_eligible_for_crossover(population):
"""Pick two individuals from the population which can do crossover, that is, they share a primitive.
Parameters
----------
population: array of individuals
Returns
----------
tuple: (individual, individual)
Two individuals which are not the same, but share at least one primitive.
Alternatively, if no such pair exists in the population, (None, None) is returned instead.
""" |
primitives_by_ind = [set([node.name for node in ind if isinstance(node, gp.Primitive)])
for ind in population]
pop_as_str = [str(ind) for ind in population]
eligible_pairs = [(i, i+1+j) for i, ind1_prims in enumerate(primitives_by_ind)
for j, ind2_prims in enumerate(primitives_by_ind[i+1:])
if not ind1_prims.isdisjoint(ind2_prims) and
pop_as_str[i] != pop_as_str[i+1+j]]
# Pairs are eligible in both orders, this ensures that both orders are considered
eligible_pairs += [(j, i) for (i, j) in eligible_pairs]
if not eligible_pairs:
# If there are no eligible pairs, the caller should decide what to do
return None, None
pair = np.random.randint(0, len(eligible_pairs))
idx1, idx2 = eligible_pairs[pair]
return population[idx1], population[idx2] |
<SYSTEM_TASK:>
Picks a random individual from the population, and performs mutation on a copy of it.
<END_TASK>
<USER_TASK:>
Description:
def mutate_random_individual(population, toolbox):
"""Picks a random individual from the population, and performs mutation on a copy of it.
Parameters
----------
population: array of individuals
Returns
----------
individual: individual
An individual which is a mutated copy of one of the individuals in population,
the returned individual does not have fitness.values
""" |
idx = np.random.randint(0,len(population))
ind = population[idx]
ind, = toolbox.mutate(ind)
del ind.fitness.values
return ind |
<SYSTEM_TASK:>
Return operator class instance by name.
<END_TASK>
<USER_TASK:>
Description:
def get_by_name(opname, operators):
"""Return operator class instance by name.
Parameters
----------
opname: str
Name of the sklearn class that belongs to a TPOT operator
operators: list
List of operator classes from operator library
Returns
-------
ret_op_class: class
An operator class
""" |
ret_op_classes = [op for op in operators if op.__name__ == opname]
if len(ret_op_classes) == 0:
raise TypeError('Cannot found operator {} in operator dictionary'.format(opname))
elif len(ret_op_classes) > 1:
raise ValueError(
'Found duplicate operators {} in operator dictionary. Please check '
'your dictionary file.'.format(opname)
)
ret_op_class = ret_op_classes[0]
return ret_op_class |
<SYSTEM_TASK:>
Convert the unstructured DEAP pipeline into a tree data-structure.
<END_TASK>
<USER_TASK:>
Description:
def expr_to_tree(ind, pset):
"""Convert the unstructured DEAP pipeline into a tree data-structure.
Parameters
----------
ind: deap.creator.Individual
The pipeline that is being exported
Returns
-------
pipeline_tree: list
List of operators in the current optimized pipeline
EXAMPLE:
pipeline:
"DecisionTreeClassifier(input_matrix, 28.0)"
pipeline_tree:
['DecisionTreeClassifier', 'input_matrix', 28.0]
""" |
def prim_to_list(prim, args):
if isinstance(prim, deap.gp.Terminal):
if prim.name in pset.context:
return pset.context[prim.name]
else:
return prim.value
return [prim.name] + args
tree = []
stack = []
for node in ind:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
tree = prim_to_list(prim, args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(tree)
return tree |
<SYSTEM_TASK:>
Generate code specific to the construction of the sklearn Pipeline.
<END_TASK>
<USER_TASK:>
Description:
def generate_pipeline_code(pipeline_tree, operators):
"""Generate code specific to the construction of the sklearn Pipeline.
Parameters
----------
pipeline_tree: list
List of operators in the current optimized pipeline
Returns
-------
Source code for the sklearn pipeline
""" |
steps = _process_operator(pipeline_tree, operators)
pipeline_text = "make_pipeline(\n{STEPS}\n)".format(STEPS=_indent(",\n".join(steps), 4))
return pipeline_text |
<SYSTEM_TASK:>
Generate code specific to the construction of the sklearn Pipeline for export_pipeline.
<END_TASK>
<USER_TASK:>
Description:
def generate_export_pipeline_code(pipeline_tree, operators):
"""Generate code specific to the construction of the sklearn Pipeline for export_pipeline.
Parameters
----------
pipeline_tree: list
List of operators in the current optimized pipeline
Returns
-------
Source code for the sklearn pipeline
""" |
steps = _process_operator(pipeline_tree, operators)
# number of steps in a pipeline
num_step = len(steps)
if num_step > 1:
pipeline_text = "make_pipeline(\n{STEPS}\n)".format(STEPS=_indent(",\n".join(steps), 4))
# only one operator (root = True)
else:
pipeline_text = "{STEPS}".format(STEPS=_indent(",\n".join(steps), 0))
return pipeline_text |
<SYSTEM_TASK:>
Indent a multiline string by some number of spaces.
<END_TASK>
<USER_TASK:>
Description:
def _indent(text, amount):
"""Indent a multiline string by some number of spaces.
Parameters
----------
text: str
The text to be indented
amount: int
The number of spaces to indent the text
Returns
-------
indented_text
""" |
indentation = amount * ' '
return indentation + ('\n' + indentation).join(text.split('\n')) |
<SYSTEM_TASK:>
Get the next value in the page.
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Get the next value in the page.""" |
item = six.next(self._item_iter)
result = self._item_to_value(self._parent, item)
# Since we've successfully got the next value from the
# iterator, we update the number of remaining.
self._remaining -= 1
return result |
<SYSTEM_TASK:>
Verifies the parameters don't use any reserved parameter.
<END_TASK>
<USER_TASK:>
Description:
def _verify_params(self):
"""Verifies the parameters don't use any reserved parameter.
Raises:
ValueError: If a reserved parameter is used.
""" |
reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params)
if reserved_in_use:
raise ValueError("Using a reserved parameter", reserved_in_use) |
<SYSTEM_TASK:>
Getter for query parameters for the next request.
<END_TASK>
<USER_TASK:>
Description:
def _get_query_params(self):
"""Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters.
""" |
result = {}
if self.next_page_token is not None:
result[self._PAGE_TOKEN] = self.next_page_token
if self.max_results is not None:
result[self._MAX_RESULTS] = self.max_results - self.num_results
result.update(self.extra_params)
return result |
<SYSTEM_TASK:>
Determines whether or not there are more pages with results.
<END_TASK>
<USER_TASK:>
Description:
def _has_next_page(self):
"""Determines whether or not there are more pages with results.
Returns:
bool: Whether the iterator has more pages.
""" |
if self.page_number == 0:
return True
if self.max_results is not None:
if self.num_results >= self.max_results:
return False
# Note: intentionally a falsy check instead of a None check. The RPC
# can return an empty string indicating no more pages.
return True if self.next_page_token else False |
<SYSTEM_TASK:>
Main comparison function for all Firestore types.
<END_TASK>
<USER_TASK:>
Description:
def compare(cls, left, right):
"""
Main comparison function for all Firestore types.
@return -1 is left < right, 0 if left == right, otherwise 1
""" |
# First compare the types.
leftType = TypeOrder.from_value(left).value
rightType = TypeOrder.from_value(right).value
if leftType != rightType:
if leftType < rightType:
return -1
return 1
value_type = left.WhichOneof("value_type")
if value_type == "null_value":
return 0 # nulls are all equal
elif value_type == "boolean_value":
return cls._compare_to(left.boolean_value, right.boolean_value)
elif value_type == "integer_value":
return cls.compare_numbers(left, right)
elif value_type == "double_value":
return cls.compare_numbers(left, right)
elif value_type == "timestamp_value":
return cls.compare_timestamps(left, right)
elif value_type == "string_value":
return cls._compare_to(left.string_value, right.string_value)
elif value_type == "bytes_value":
return cls.compare_blobs(left, right)
elif value_type == "reference_value":
return cls.compare_resource_paths(left, right)
elif value_type == "geo_point_value":
return cls.compare_geo_points(left, right)
elif value_type == "array_value":
return cls.compare_arrays(left, right)
elif value_type == "map_value":
return cls.compare_objects(left, right)
else:
raise ValueError("Unknown ``value_type``", str(value_type)) |
<SYSTEM_TASK:>
Run asynchronous image detection and annotation for a list of images.
<END_TASK>
<USER_TASK:>
Description:
def async_batch_annotate_images(
self,
requests,
output_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Run asynchronous image detection and annotation for a list of images.
Progress and results can be retrieved through the
``google.longrunning.Operations`` interface. ``Operation.metadata``
contains ``OperationMetadata`` (metadata). ``Operation.response``
contains ``AsyncBatchAnnotateImagesResponse`` (results).
This service will write image annotation outputs to json files in
customer GCS bucket, each json file containing
BatchAnnotateImagesResponse proto.
Example:
>>> from google.cloud import vision_v1p4beta1
>>>
>>> client = vision_v1p4beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.async_batch_annotate_images(requests, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p4beta1.types.AnnotateImageRequest]]): Individual image annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p4beta1.types.AnnotateImageRequest`
output_config (Union[dict, ~google.cloud.vision_v1p4beta1.types.OutputConfig]): Required. The desired output location and metadata (e.g. format).
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p4beta1.types.OutputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p4beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "async_batch_annotate_images" not in self._inner_api_calls:
self._inner_api_calls[
"async_batch_annotate_images"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.async_batch_annotate_images,
default_retry=self._method_configs["AsyncBatchAnnotateImages"].retry,
default_timeout=self._method_configs[
"AsyncBatchAnnotateImages"
].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.AsyncBatchAnnotateImagesRequest(
requests=requests, output_config=output_config
)
operation = self._inner_api_calls["async_batch_annotate_images"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
image_annotator_pb2.AsyncBatchAnnotateImagesResponse,
metadata_type=image_annotator_pb2.OperationMetadata,
) |
<SYSTEM_TASK:>
Called by IPython when this module is loaded as an IPython extension.
<END_TASK>
<USER_TASK:>
Description:
def load_ipython_extension(ipython):
"""Called by IPython when this module is loaded as an IPython extension.""" |
from google.cloud.bigquery.magics import _cell_magic
ipython.register_magic_function(
_cell_magic, magic_kind="cell", magic_name="bigquery"
) |
<SYSTEM_TASK:>
Make a request over the Http transport to the Cloud Datastore API.
<END_TASK>
<USER_TASK:>
Description:
def _request(http, project, method, data, base_url):
"""Make a request over the Http transport to the Cloud Datastore API.
:type http: :class:`requests.Session`
:param http: HTTP object to make requests.
:type project: str
:param project: The project to make the request for.
:type method: str
:param method: The API call method name (ie, ``runQuery``,
``lookup``, etc)
:type data: str
:param data: The data to send with the API call.
Typically this is a serialized Protobuf string.
:type base_url: str
:param base_url: The base URL where the API lives.
:rtype: str
:returns: The string response content from the API call.
:raises: :class:`google.cloud.exceptions.GoogleCloudError` if the
response code is not 200 OK.
""" |
headers = {
"Content-Type": "application/x-protobuf",
"User-Agent": connection_module.DEFAULT_USER_AGENT,
connection_module.CLIENT_INFO_HEADER: _CLIENT_INFO,
}
api_url = build_api_url(project, method, base_url)
response = http.request(url=api_url, method="POST", headers=headers, data=data)
if response.status_code != 200:
error_status = status_pb2.Status.FromString(response.content)
raise exceptions.from_http_status(
response.status_code, error_status.message, errors=[error_status]
)
return response.content |
<SYSTEM_TASK:>
Make a protobuf RPC request.
<END_TASK>
<USER_TASK:>
Description:
def _rpc(http, project, method, base_url, request_pb, response_pb_cls):
"""Make a protobuf RPC request.
:type http: :class:`requests.Session`
:param http: HTTP object to make requests.
:type project: str
:param project: The project to connect to. This is
usually your project name in the cloud console.
:type method: str
:param method: The name of the method to invoke.
:type base_url: str
:param base_url: The base URL where the API lives.
:type request_pb: :class:`google.protobuf.message.Message` instance
:param request_pb: the protobuf instance representing the request.
:type response_pb_cls: A :class:`google.protobuf.message.Message`
subclass.
:param response_pb_cls: The class used to unmarshall the response
protobuf.
:rtype: :class:`google.protobuf.message.Message`
:returns: The RPC message parsed from the response.
""" |
req_data = request_pb.SerializeToString()
response = _request(http, project, method, req_data, base_url)
return response_pb_cls.FromString(response) |
<SYSTEM_TASK:>
Construct the URL for a particular API call.
<END_TASK>
<USER_TASK:>
Description:
def build_api_url(project, method, base_url):
"""Construct the URL for a particular API call.
This method is used internally to come up with the URL to use when
making RPCs to the Cloud Datastore API.
:type project: str
:param project: The project to connect to. This is
usually your project name in the cloud console.
:type method: str
:param method: The API method to call (e.g. 'runQuery', 'lookup').
:type base_url: str
:param base_url: The base URL where the API lives.
:rtype: str
:returns: The API URL created.
""" |
return API_URL_TEMPLATE.format(
api_base=base_url, api_version=API_VERSION, project=project, method=method
) |
<SYSTEM_TASK:>
Perform a ``lookup`` request.
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, project_id, keys, read_options=None):
"""Perform a ``lookup`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type keys: List[.entity_pb2.Key]
:param keys: The keys to retrieve from the datastore.
:type read_options: :class:`.datastore_pb2.ReadOptions`
:param read_options: (Optional) The options for this lookup. Contains
either the transaction for the read or
``STRONG`` or ``EVENTUAL`` read consistency.
:rtype: :class:`.datastore_pb2.LookupResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.LookupRequest(
project_id=project_id, read_options=read_options, keys=keys
)
return _rpc(
self.client._http,
project_id,
"lookup",
self.client._base_url,
request_pb,
_datastore_pb2.LookupResponse,
) |
<SYSTEM_TASK:>
Perform a ``runQuery`` request.
<END_TASK>
<USER_TASK:>
Description:
def run_query(
self, project_id, partition_id, read_options=None, query=None, gql_query=None
):
"""Perform a ``runQuery`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type partition_id: :class:`.entity_pb2.PartitionId`
:param partition_id: Partition ID corresponding to an optional
namespace and project ID.
:type read_options: :class:`.datastore_pb2.ReadOptions`
:param read_options: (Optional) The options for this query. Contains
either the transaction for the read or
``STRONG`` or ``EVENTUAL`` read consistency.
:type query: :class:`.query_pb2.Query`
:param query: (Optional) The query protobuf to run. At most one of
``query`` and ``gql_query`` can be specified.
:type gql_query: :class:`.query_pb2.GqlQuery`
:param gql_query: (Optional) The GQL query to run. At most one of
``query`` and ``gql_query`` can be specified.
:rtype: :class:`.datastore_pb2.RunQueryResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.RunQueryRequest(
project_id=project_id,
partition_id=partition_id,
read_options=read_options,
query=query,
gql_query=gql_query,
)
return _rpc(
self.client._http,
project_id,
"runQuery",
self.client._base_url,
request_pb,
_datastore_pb2.RunQueryResponse,
) |
<SYSTEM_TASK:>
Perform a ``beginTransaction`` request.
<END_TASK>
<USER_TASK:>
Description:
def begin_transaction(self, project_id, transaction_options=None):
"""Perform a ``beginTransaction`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type transaction_options: ~.datastore_v1.types.TransactionOptions
:param transaction_options: (Optional) Options for a new transaction.
:rtype: :class:`.datastore_pb2.BeginTransactionResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.BeginTransactionRequest()
return _rpc(
self.client._http,
project_id,
"beginTransaction",
self.client._base_url,
request_pb,
_datastore_pb2.BeginTransactionResponse,
) |
<SYSTEM_TASK:>
Perform a ``commit`` request.
<END_TASK>
<USER_TASK:>
Description:
def commit(self, project_id, mode, mutations, transaction=None):
"""Perform a ``commit`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type mode: :class:`.gapic.datastore.v1.enums.CommitRequest.Mode`
:param mode: The type of commit to perform. Expected to be one of
``TRANSACTIONAL`` or ``NON_TRANSACTIONAL``.
:type mutations: list
:param mutations: List of :class:`.datastore_pb2.Mutation`, the
mutations to perform.
:type transaction: bytes
:param transaction: (Optional) The transaction ID returned from
:meth:`begin_transaction`. Non-transactional
commits must pass :data:`None`.
:rtype: :class:`.datastore_pb2.CommitResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.CommitRequest(
project_id=project_id,
mode=mode,
transaction=transaction,
mutations=mutations,
)
return _rpc(
self.client._http,
project_id,
"commit",
self.client._base_url,
request_pb,
_datastore_pb2.CommitResponse,
) |
<SYSTEM_TASK:>
Perform a ``rollback`` request.
<END_TASK>
<USER_TASK:>
Description:
def rollback(self, project_id, transaction):
"""Perform a ``rollback`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type transaction: bytes
:param transaction: The transaction ID to rollback.
:rtype: :class:`.datastore_pb2.RollbackResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.RollbackRequest(
project_id=project_id, transaction=transaction
)
# Response is empty (i.e. no fields) but we return it anyway.
return _rpc(
self.client._http,
project_id,
"rollback",
self.client._base_url,
request_pb,
_datastore_pb2.RollbackResponse,
) |
<SYSTEM_TASK:>
Perform an ``allocateIds`` request.
<END_TASK>
<USER_TASK:>
Description:
def allocate_ids(self, project_id, keys):
"""Perform an ``allocateIds`` request.
:type project_id: str
:param project_id: The project to connect to. This is
usually your project name in the cloud console.
:type keys: List[.entity_pb2.Key]
:param keys: The keys for which the backend should allocate IDs.
:rtype: :class:`.datastore_pb2.AllocateIdsResponse`
:returns: The returned protobuf response object.
""" |
request_pb = _datastore_pb2.AllocateIdsRequest(keys=keys)
return _rpc(
self.client._http,
project_id,
"allocateIds",
self.client._base_url,
request_pb,
_datastore_pb2.AllocateIdsResponse,
) |
<SYSTEM_TASK:>
Creates a request to read rows in a table.
<END_TASK>
<USER_TASK:>
Description:
def _create_row_request(
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type end_inclusive: bool
:param end_inclusive: (Optional) Whether the ``end_key`` should be
considered inclusive. The default is False (exclusive).
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:type row_set: :class:`row_set.RowSet`
:param row_set: (Optional) The row set containing multiple row keys and
row_ranges.
:rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_set`` and one of ``start_key`` or ``end_key`` are set
""" |
request_kwargs = {"table_name": table_name}
if (start_key is not None or end_key is not None) and row_set is not None:
raise ValueError("Row range and row set cannot be " "set simultaneously")
if filter_ is not None:
request_kwargs["filter"] = filter_.to_pb()
if limit is not None:
request_kwargs["rows_limit"] = limit
if app_profile_id is not None:
request_kwargs["app_profile_id"] = app_profile_id
message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
if start_key is not None or end_key is not None:
row_set = RowSet()
row_set.add_row_range(RowRange(start_key, end_key, end_inclusive=end_inclusive))
if row_set is not None:
row_set._update_message_request(message)
return message |
<SYSTEM_TASK:>
Creates a request to mutate rows in a table.
<END_TASK>
<USER_TASK:>
Description:
def _mutate_rows_request(table_name, rows, app_profile_id=None):
"""Creates a request to mutate rows in a table.
:type table_name: str
:param table_name: The name of the table to write to.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:rtype: :class:`data_messages_v2_pb2.MutateRowsRequest`
:returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs.
:raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is
greater than 100,000
""" |
request_pb = data_messages_v2_pb2.MutateRowsRequest(
table_name=table_name, app_profile_id=app_profile_id
)
mutations_count = 0
for row in rows:
_check_row_table_name(table_name, row)
_check_row_type(row)
mutations = row._get_mutations()
request_pb.entries.add(row_key=row.row_key, mutations=mutations)
mutations_count += len(mutations)
if mutations_count > _MAX_BULK_MUTATIONS:
raise TooManyMutationsError(
"Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,)
)
return request_pb |
<SYSTEM_TASK:>
Checks that a row belongs to a table.
<END_TASK>
<USER_TASK:>
Description:
def _check_row_table_name(table_name, row):
"""Checks that a row belongs to a table.
:type table_name: str
:param table_name: The name of the table.
:type row: :class:`~google.cloud.bigtable.row.Row`
:param row: An instance of :class:`~google.cloud.bigtable.row.Row`
subclasses.
:raises: :exc:`~.table.TableMismatchError` if the row does not belong to
the table.
""" |
if row.table is not None and row.table.name != table_name:
raise TableMismatchError(
"Row %s is a part of %s table. Current table: %s"
% (row.row_key, row.table.name, table_name)
) |
<SYSTEM_TASK:>
Table name used in requests.
<END_TASK>
<USER_TASK:>
Description:
def name(self):
"""Table name used in requests.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_table_name]
:end-before: [END bigtable_table_name]
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../instances/../tables/{table_id}"``
:rtype: str
:returns: The table name.
""" |
project = self._instance._client.project
instance_id = self._instance.instance_id
table_client = self._instance._client.table_data_client
return table_client.table_path(
project=project, instance=instance_id, table=self.table_id
) |
<SYSTEM_TASK:>
Factory to create a row associated with this table.
<END_TASK>
<USER_TASK:>
Description:
def row(self, row_key, filter_=None, append=False):
"""Factory to create a row associated with this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_table_row]
:end-before: [END bigtable_table_row]
.. warning::
At most one of ``filter_`` and ``append`` can be used in a
:class:`~google.cloud.bigtable.row.Row`.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.ConditionalRow` for more details.
:type append: bool
:param append: (Optional) Flag to determine if the row should be used
for append mutations.
:rtype: :class:`~google.cloud.bigtable.row.Row`
:returns: A row owned by this table.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``filter_`` and ``append`` are used.
""" |
if append and filter_ is not None:
raise ValueError("At most one of filter_ and append can be set")
if append:
return AppendRow(row_key, self)
elif filter_ is not None:
return ConditionalRow(row_key, self, filter_=filter_)
else:
return DirectRow(row_key, self) |
<SYSTEM_TASK:>
Creates this table.
<END_TASK>
<USER_TASK:>
Description:
def create(self, initial_split_keys=[], column_families={}):
"""Creates this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_table]
:end-before: [END bigtable_create_table]
.. note::
A create request returns a
:class:`._generated.table_pb2.Table` but we don't use
this response.
:type initial_split_keys: list
:param initial_split_keys: (Optional) list of row keys in bytes that
will be used to initially split the table
into several tablets.
:type column_families: dict
:param column_failies: (Optional) A map columns to create. The key is
the column_id str and the value is a
:class:`GarbageCollectionRule`
""" |
table_client = self._instance._client.table_admin_client
instance_name = self._instance.name
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
table = admin_messages_v2_pb2.Table(column_families=families)
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=_to_bytes(key)) for key in initial_split_keys]
table_client.create_table(
parent=instance_name,
table_id=self.table_id,
table=table,
initial_splits=splits,
) |
<SYSTEM_TASK:>
Check whether the table exists.
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
"""Check whether the table exists.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_check_table_exists]
:end-before: [END bigtable_check_table_exists]
:rtype: bool
:returns: True if the table exists, else False.
""" |
table_client = self._instance._client.table_admin_client
try:
table_client.get_table(name=self.name, view=VIEW_NAME_ONLY)
return True
except NotFound:
return False |
<SYSTEM_TASK:>
List the column families owned by this table.
<END_TASK>
<USER_TASK:>
Description:
def list_column_families(self):
"""List the column families owned by this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_list_column_families]
:end-before: [END bigtable_list_column_families]
:rtype: dict
:returns: Dictionary of column families attached to this table. Keys
are strings (column family names) and values are
:class:`.ColumnFamily` instances.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
""" |
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name)
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id, gc_rule=gc_rule)
result[column_family_id] = column_family
return result |
<SYSTEM_TASK:>
List the cluster states owned by this table.
<END_TASK>
<USER_TASK:>
Description:
def get_cluster_states(self):
"""List the cluster states owned by this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_get_cluster_states]
:end-before: [END bigtable_get_cluster_states]
:rtype: dict
:returns: Dictionary of cluster states for this table.
Keys are cluster ids and values are
:class: 'ClusterState' instances.
""" |
REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW)
return {
cluster_id: ClusterState(value_pb.replication_state)
for cluster_id, value_pb in table_pb.cluster_states.items()
} |
<SYSTEM_TASK:>
Read a single row from this table.
<END_TASK>
<USER_TASK:>
Description:
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_read_row]
:end-before: [END bigtable_read_row]
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
""" |
row_set = RowSet()
row_set.add_row_key(row_key)
result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set))
row = next(result_iter, None)
if next(result_iter, None) is not None:
raise ValueError("More than one row was returned.")
return row |
<SYSTEM_TASK:>
Mutates multiple rows in bulk.
<END_TASK>
<USER_TASK:>
Description:
def mutate_rows(self, rows, retry=DEFAULT_RETRY):
"""Mutates multiple rows in bulk.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_mutate_rows]
:end-before: [END bigtable_mutate_rows]
The method tries to update all specified rows.
If some of the rows weren't updated, it would not remove mutations.
They can be applied to the row separately.
If row mutations finished successfully, they would be cleaned up.
Optionally, a ``retry`` strategy can be specified to re-attempt
mutations on rows that return transient errors. This method will retry
until all rows succeed or until the request deadline is reached. To
specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0``
can be specified.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry:
(Optional) Retry delay and deadline arguments. To override, the
default value :attr:`DEFAULT_RETRY` can be used and modified with
the :meth:`~google.api_core.retry.Retry.with_delay` method or the
:meth:`~google.api_core.retry.Retry.with_deadline` method.
:rtype: list
:returns: A list of response statuses (`google.rpc.status_pb2.Status`)
corresponding to success or failure of each row mutation
sent. These will be in the same order as the `rows`.
""" |
retryable_mutate_rows = _RetryableMutateRowsWorker(
self._instance._client,
self.name,
rows,
app_profile_id=self._app_profile_id,
timeout=self.mutation_timeout,
)
return retryable_mutate_rows(retry=retry) |
<SYSTEM_TASK:>
Read a sample of row keys in the table.
<END_TASK>
<USER_TASK:>
Description:
def sample_row_keys(self):
"""Read a sample of row keys in the table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_sample_row_keys]
:end-before: [END bigtable_sample_row_keys]
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:rtype: :class:`~google.cloud.exceptions.GrpcRendezvous`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
""" |
data_client = self._instance._client.table_data_client
response_iterator = data_client.sample_row_keys(
self.name, app_profile_id=self._app_profile_id
)
return response_iterator |
<SYSTEM_TASK:>
Truncate the table
<END_TASK>
<USER_TASK:>
Description:
def truncate(self, timeout=None):
"""Truncate the table
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_truncate_table]
:end-before: [END bigtable_truncate_table]
:type timeout: float
:param timeout: (Optional) The amount of time, in seconds, to wait
for the request to complete.
:raise: google.api_core.exceptions.GoogleAPICallError: If the
request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
client = self._instance._client
table_admin_client = client.table_admin_client
if timeout:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True, timeout=timeout
)
else:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True
) |
<SYSTEM_TASK:>
Factory to create a mutation batcher associated with this instance.
<END_TASK>
<USER_TASK:>
Description:
def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES):
"""Factory to create a mutation batcher associated with this instance.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_mutations_batcher]
:end-before: [END bigtable_mutations_batcher]
:type table: class
:param table: class:`~google.cloud.bigtable.table.Table`.
:type flush_count: int
:param flush_count: (Optional) Maximum number of rows per batch. If it
reaches the max number of rows it calls finish_batch() to
mutate the current row batch. Default is FLUSH_COUNT (1000
rows).
:type max_row_bytes: int
:param max_row_bytes: (Optional) Max number of row mutations size to
flush. If it reaches the max number of row mutations size it
calls finish_batch() to mutate the current row batch.
Default is MAX_ROW_BYTES (5 MB).
""" |
return MutationsBatcher(self, flush_count, max_row_bytes) |
<SYSTEM_TASK:>
Mutate all the rows that are eligible for retry.
<END_TASK>
<USER_TASK:>
Description:
def _do_mutate_retryable_rows(self):
"""Mutate all the rows that are eligible for retry.
A row is eligible for retry if it has not been tried or if it resulted
in a transient error in a previous call.
:rtype: list
:return: The responses statuses, which is a list of
:class:`~google.rpc.status_pb2.Status`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
""" |
retryable_rows = []
index_into_all_rows = []
for index, status in enumerate(self.responses_statuses):
if self._is_retryable(status):
retryable_rows.append(self.rows[index])
index_into_all_rows.append(index)
if not retryable_rows:
# All mutations are either successful or non-retryable now.
return self.responses_statuses
mutate_rows_request = _mutate_rows_request(
self.table_name, retryable_rows, app_profile_id=self.app_profile_id
)
data_client = self.client.table_data_client
inner_api_calls = data_client._inner_api_calls
if "mutate_rows" not in inner_api_calls:
default_retry = (data_client._method_configs["MutateRows"].retry,)
if self.timeout is None:
default_timeout = data_client._method_configs["MutateRows"].timeout
else:
default_timeout = timeout.ExponentialTimeout(deadline=self.timeout)
data_client._inner_api_calls["mutate_rows"] = wrap_method(
data_client.transport.mutate_rows,
default_retry=default_retry,
default_timeout=default_timeout,
client_info=data_client._client_info,
)
responses = data_client._inner_api_calls["mutate_rows"](
mutate_rows_request, retry=None
)
num_responses = 0
num_retryable_responses = 0
for response in responses:
for entry in response.entries:
num_responses += 1
index = index_into_all_rows[entry.index]
self.responses_statuses[index] = entry.status
if self._is_retryable(entry.status):
num_retryable_responses += 1
if entry.status.code == 0:
self.rows[index].clear()
if len(retryable_rows) != num_responses:
raise RuntimeError(
"Unexpected number of responses",
num_responses,
"Expected",
len(retryable_rows),
)
if num_retryable_responses:
raise _BigtableRetryableError
return self.responses_statuses |
<SYSTEM_TASK:>
Periodically send heartbeats.
<END_TASK>
<USER_TASK:>
Description:
def heartbeat(self):
"""Periodically send heartbeats.""" |
while self._manager.is_active and not self._stop_event.is_set():
self._manager.heartbeat()
_LOGGER.debug("Sent heartbeat.")
self._stop_event.wait(timeout=self._period)
_LOGGER.info("%s exiting.", _HEARTBEAT_WORKER_NAME) |
<SYSTEM_TASK:>
Report an individual error event.
<END_TASK>
<USER_TASK:>
Description:
def report_error_event(
self,
project_name,
event,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Report an individual error event.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ReportErrorsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `event`:
>>> event = {}
>>>
>>> response = client.report_error_event(project_name, event)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
event (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ReportedErrorEvent]): [Required] The error event to be reported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ReportedErrorEvent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.ReportErrorEventResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "report_error_event" not in self._inner_api_calls:
self._inner_api_calls[
"report_error_event"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.report_error_event,
default_retry=self._method_configs["ReportErrorEvent"].retry,
default_timeout=self._method_configs["ReportErrorEvent"].timeout,
client_info=self._client_info,
)
request = report_errors_service_pb2.ReportErrorEventRequest(
project_name=project_name, event=event
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["report_error_event"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Convert a scalar value into a query parameter.
<END_TASK>
<USER_TASK:>
Description:
def scalar_to_query_parameter(value, name=None):
"""Convert a scalar value into a query parameter.
:type value: any
:param value: A scalar value to convert into a query parameter.
:type name: str
:param name: (Optional) Name of the query parameter.
:rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter`
:returns:
A query parameter corresponding with the type and value of the plain
Python object.
:raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError`
if the type cannot be determined.
""" |
parameter_type = None
if isinstance(value, bool):
parameter_type = "BOOL"
elif isinstance(value, numbers.Integral):
parameter_type = "INT64"
elif isinstance(value, numbers.Real):
parameter_type = "FLOAT64"
elif isinstance(value, decimal.Decimal):
parameter_type = "NUMERIC"
elif isinstance(value, six.text_type):
parameter_type = "STRING"
elif isinstance(value, six.binary_type):
parameter_type = "BYTES"
elif isinstance(value, datetime.datetime):
parameter_type = "DATETIME" if value.tzinfo is None else "TIMESTAMP"
elif isinstance(value, datetime.date):
parameter_type = "DATE"
elif isinstance(value, datetime.time):
parameter_type = "TIME"
else:
raise exceptions.ProgrammingError(
"encountered parameter {} with value {} of unexpected type".format(
name, value
)
)
return bigquery.ScalarQueryParameter(name, parameter_type, value) |
<SYSTEM_TASK:>
Converts a dictionary of parameter values into query parameters.
<END_TASK>
<USER_TASK:>
Description:
def to_query_parameters_dict(parameters):
"""Converts a dictionary of parameter values into query parameters.
:type parameters: Mapping[str, Any]
:param parameters: Dictionary of query parameter values.
:rtype: List[google.cloud.bigquery.query._AbstractQueryParameter]
:returns: A list of named query parameters.
""" |
return [
scalar_to_query_parameter(value, name=name)
for name, value in six.iteritems(parameters)
] |
<SYSTEM_TASK:>
Converts DB-API parameter values into query parameters.
<END_TASK>
<USER_TASK:>
Description:
def to_query_parameters(parameters):
"""Converts DB-API parameter values into query parameters.
:type parameters: Mapping[str, Any] or Sequence[Any]
:param parameters: A dictionary or sequence of query parameter values.
:rtype: List[google.cloud.bigquery.query._AbstractQueryParameter]
:returns: A list of query parameters.
""" |
if parameters is None:
return []
if isinstance(parameters, collections_abc.Mapping):
return to_query_parameters_dict(parameters)
return to_query_parameters_list(parameters) |
<SYSTEM_TASK:>
Refresh an operation using a gRPC client.
<END_TASK>
<USER_TASK:>
Description:
def _refresh_grpc(operations_stub, operation_name):
"""Refresh an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
""" |
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
return operations_stub.GetOperation(request_pb) |
<SYSTEM_TASK:>
Cancel an operation using a gRPC client.
<END_TASK>
<USER_TASK:>
Description:
def _cancel_grpc(operations_stub, operation_name):
"""Cancel an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
""" |
request_pb = operations_pb2.CancelOperationRequest(name=operation_name)
operations_stub.CancelOperation(request_pb) |
<SYSTEM_TASK:>
Create an operation future using a gRPC client.
<END_TASK>
<USER_TASK:>
Description:
def from_grpc(operation, operations_stub, result_type, **kwargs):
"""Create an operation future using a gRPC client.
This interacts with the long-running operations `service`_ (specific
to a given API) via gRPC.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The operations stub.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
""" |
refresh = functools.partial(_refresh_grpc, operations_stub, operation.name)
cancel = functools.partial(_cancel_grpc, operations_stub, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs) |
<SYSTEM_TASK:>
Create an operation future from a gapic client.
<END_TASK>
<USER_TASK:>
Description:
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
""" |
refresh = functools.partial(operations_client.get_operation, operation.name)
cancel = functools.partial(operations_client.cancel_operation, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs) |
<SYSTEM_TASK:>
Set the result or exception from the operation if it is complete.
<END_TASK>
<USER_TASK:>
Description:
def _set_result_from_operation(self):
"""Set the result or exception from the operation if it is complete.""" |
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
# Note: self._result_set is set to True in set_result and
# set_exception, in case those methods are invoked directly.
if not self._operation.done or self._result_set:
return
if self._operation.HasField("response"):
response = protobuf_helpers.from_any_pb(
self._result_type, self._operation.response
)
self.set_result(response)
elif self._operation.HasField("error"):
exception = exceptions.GoogleAPICallError(
self._operation.error.message,
errors=(self._operation.error,),
response=self._operation,
)
self.set_exception(exception)
else:
exception = exceptions.GoogleAPICallError(
"Unexpected state: Long-running operation had neither "
"response nor error set."
)
self.set_exception(exception) |
<SYSTEM_TASK:>
Refresh the operation and update the result if needed.
<END_TASK>
<USER_TASK:>
Description:
def _refresh_and_update(self):
"""Refresh the operation and update the result if needed.""" |
# If the currently cached operation is done, no need to make another
# RPC as it will not change once done.
if not self._operation.done:
self._operation = self._refresh()
self._set_result_from_operation() |
<SYSTEM_TASK:>
True if the operation was cancelled.
<END_TASK>
<USER_TASK:>
Description:
def cancelled(self):
"""True if the operation was cancelled.""" |
self._refresh_and_update()
return (
self._operation.HasField("error")
and self._operation.error.code == code_pb2.CANCELLED
) |
<SYSTEM_TASK:>
Remove a role from the entity.
<END_TASK>
<USER_TASK:>
Description:
def revoke(self, role):
"""Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity.
""" |
if role in self.roles:
self.roles.remove(role) |
<SYSTEM_TASK:>
Ensures predefined is in list of predefined json values
<END_TASK>
<USER_TASK:>
Description:
def validate_predefined(cls, predefined):
"""Ensures predefined is in list of predefined json values
:type predefined: str
:param predefined: name of a predefined acl
:type predefined: str
:param predefined: validated JSON name of predefined acl
:raises: :exc: `ValueError`: If predefined is not a valid acl
""" |
predefined = cls.PREDEFINED_XML_ACLS.get(predefined, predefined)
if predefined and predefined not in cls.PREDEFINED_JSON_ACLS:
raise ValueError("Invalid predefined ACL: %s" % (predefined,))
return predefined |
<SYSTEM_TASK:>
Build an _ACLEntity object from a dictionary of data.
<END_TASK>
<USER_TASK:>
Description:
def entity_from_dict(self, entity_dict):
"""Build an _ACLEntity object from a dictionary of data.
An entity is a mutable object that represents a list of roles
belonging to either a user or group or the special types for all
users and all authenticated users.
:type entity_dict: dict
:param entity_dict: Dictionary full of data from an ACL lookup.
:rtype: :class:`_ACLEntity`
:returns: An Entity constructed from the dictionary.
""" |
entity = entity_dict["entity"]
role = entity_dict["role"]
if entity == "allUsers":
entity = self.all()
elif entity == "allAuthenticatedUsers":
entity = self.all_authenticated()
elif "-" in entity:
entity_type, identifier = entity.split("-", 1)
entity = self.entity(entity_type=entity_type, identifier=identifier)
if not isinstance(entity, _ACLEntity):
raise ValueError("Invalid dictionary: %s" % entity_dict)
entity.grant(role)
return entity |
<SYSTEM_TASK:>
Gets an entity object from the ACL.
<END_TASK>
<USER_TASK:>
Description:
def get_entity(self, entity, default=None):
"""Gets an entity object from the ACL.
:type entity: :class:`_ACLEntity` or string
:param entity: The entity to get lookup in the ACL.
:type default: anything
:param default: This value will be returned if the entity
doesn't exist.
:rtype: :class:`_ACLEntity`
:returns: The corresponding entity or the value provided
to ``default``.
""" |
self._ensure_loaded()
return self.entities.get(str(entity), default) |
<SYSTEM_TASK:>
Add an entity to the ACL.
<END_TASK>
<USER_TASK:>
Description:
def add_entity(self, entity):
"""Add an entity to the ACL.
:type entity: :class:`_ACLEntity`
:param entity: The entity to add to this ACL.
""" |
self._ensure_loaded()
self.entities[str(entity)] = entity |
<SYSTEM_TASK:>
Factory method for creating an Entity.
<END_TASK>
<USER_TASK:>
Description:
def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
""" |
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity)
else:
self.add_entity(entity)
return entity |
<SYSTEM_TASK:>
Reload the ACL data from Cloud Storage.
<END_TASK>
<USER_TASK:>
Description:
def reload(self, client=None):
"""Reload the ACL data from Cloud Storage.
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
""" |
path = self.reload_path
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
self.entities.clear()
found = client._connection.api_request(
method="GET", path=path, query_params=query_params
)
self.loaded = True
for entry in found.get("items", ()):
self.add_entity(self.entity_from_dict(entry)) |
<SYSTEM_TASK:>
Save this ACL for the current bucket.
<END_TASK>
<USER_TASK:>
Description:
def save(self, acl=None, client=None):
"""Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
""" |
if acl is None:
acl = self
save_to_backend = acl.loaded
else:
save_to_backend = True
if save_to_backend:
self._save(acl, None, client) |
<SYSTEM_TASK:>
Save this ACL for the current bucket using a predefined ACL.
<END_TASK>
<USER_TASK:>
Description:
def save_predefined(self, predefined, client=None):
"""Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
""" |
predefined = self.validate_predefined(predefined)
self._save(None, predefined, client) |
<SYSTEM_TASK:>
Return a fully-qualified incident string.
<END_TASK>
<USER_TASK:>
Description:
def incident_path(cls, project, incident):
"""Return a fully-qualified incident string.""" |
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}",
project=project,
incident=incident,
) |
<SYSTEM_TASK:>
Return a fully-qualified annotation string.
<END_TASK>
<USER_TASK:>
Description:
def annotation_path(cls, project, incident, annotation):
"""Return a fully-qualified annotation string.""" |
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}/annotations/{annotation}",
project=project,
incident=incident,
annotation=annotation,
) |
<SYSTEM_TASK:>
Return a fully-qualified artifact string.
<END_TASK>
<USER_TASK:>
Description:
def artifact_path(cls, project, incident, artifact):
"""Return a fully-qualified artifact string.""" |
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}/artifacts/{artifact}",
project=project,
incident=incident,
artifact=artifact,
) |
<SYSTEM_TASK:>
Return a fully-qualified role_assignment string.
<END_TASK>
<USER_TASK:>
Description:
def role_assignment_path(cls, project, incident, role_assignment):
"""Return a fully-qualified role_assignment string.""" |
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}/roleAssignments/{role_assignment}",
project=project,
incident=incident,
role_assignment=role_assignment,
) |
<SYSTEM_TASK:>
Return a fully-qualified subscription string.
<END_TASK>
<USER_TASK:>
Description:
def subscription_path(cls, project, incident, subscription):
"""Return a fully-qualified subscription string.""" |
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}/subscriptions/{subscription}",
project=project,
incident=incident,
subscription=subscription,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.