text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get dictionary representation of gradient arrays.
<END_TASK>
<USER_TASK:>
Description:
def grad_dict(self):
"""Get dictionary representation of gradient arrays.
Returns
-------
grad_dict : dict of str to NDArray
The dictionary that maps name of arguments to gradient arrays.
""" |
if self._grad_dict is None:
self._grad_dict = Executor._get_dict(
self._symbol.list_arguments(), self.grad_arrays)
return self._grad_dict |
<SYSTEM_TASK:>
Get dictionary representation of auxiliary states arrays.
<END_TASK>
<USER_TASK:>
Description:
def aux_dict(self):
"""Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states.
""" |
if self._aux_dict is None:
self._aux_dict = Executor._get_dict(
self._symbol.list_auxiliary_states(), self.aux_arrays)
return self._aux_dict |
<SYSTEM_TASK:>
Get dictionary representation of output arrays.
<END_TASK>
<USER_TASK:>
Description:
def output_dict(self):
"""Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
""" |
if self._output_dict is None:
self._output_dict = Executor._get_dict(
self._symbol.list_outputs(), self.outputs)
return self._output_dict |
<SYSTEM_TASK:>
Copy parameters from arg_params, aux_params into executor's internal array.
<END_TASK>
<USER_TASK:>
Description:
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False):
"""Copy parameters from arg_params, aux_params into executor's internal array.
Parameters
----------
arg_params : dict of str to NDArray
Parameters, dict of name to NDArray of arguments.
aux_params : dict of str to NDArray, optional
Parameters, dict of name to NDArray of auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Raises
------
ValueError
If there is additional parameters in the dict but ``allow_extra_params=False``.
Examples
--------
>>> # set parameters with existing model checkpoint
>>> model_prefix = 'mx_mlp'
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
>>> texec.copy_params_from(arg_params, aux_params)
""" |
for name, array in arg_params.items():
if name in self.arg_dict:
dst = self.arg_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name \"%s\" that is not in the arguments' % name)
if aux_params is None:
return
for name, array in aux_params.items():
if name in self.aux_dict:
dst = self.aux_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name %s that is not in the auxiliary states' % name) |
<SYSTEM_TASK:>
Helper function to split params dictionary into args and aux params
<END_TASK>
<USER_TASK:>
Description:
def split_params(sym, params):
"""Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
""" |
arg_params = {}
aux_params = {}
for args in sym.list_arguments():
if args in params:
arg_params.update({args: nd.array(params[args])})
for aux in sym.list_auxiliary_states():
if aux in params:
aux_params.update({aux: nd.array(params[aux])})
return arg_params, aux_params |
<SYSTEM_TASK:>
Infer output shapes and return dictionary of output name to shape
<END_TASK>
<USER_TASK:>
Description:
def get_outputs(sym, params, in_shape, in_label):
""" Infer output shapes and return dictionary of output name to shape
:param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on
:param dic of (str, nd.NDArray) params:
:param list of tuple(int, ...) in_shape: list of all input shapes
:param in_label: name of label typically used in loss that may be left in graph. This name is
removed from list of inputs required by symbol
:return: dictionary of output name to shape
:rtype: dict of (str, tuple(int, ...))
""" |
# remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided
# by user. Also remove in_label, which is the name of the label symbol that may have been used
# as the label for loss during training.
inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label],
in_shape)}
# Add params and their shape to list of inputs
inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()})
# Provide input data as well as input params to infer_shape()
_, out_shapes, _ = sym.infer_shape(**inputs)
out_names = list()
for name in sym.list_outputs():
if name.endswith('_output'):
out_names.append(name[:-len('_output')])
else:
logging.info("output '%s' does not end with '_output'", name)
out_names.append(name)
assert len(out_shapes) == len(out_names)
# bind output shapes with output names
graph_outputs = {n: s for n, s in zip(out_names, out_shapes)}
return graph_outputs |
<SYSTEM_TASK:>
Convert weights to numpy
<END_TASK>
<USER_TASK:>
Description:
def convert_weights_to_numpy(weights_dict):
"""Convert weights to numpy""" |
return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy())
for k, v in weights_dict.items()]) |
<SYSTEM_TASK:>
Compute learning rate and refactor scheduler
<END_TASK>
<USER_TASK:>
Description:
def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio,
num_example, batch_size, begin_epoch):
"""
Compute learning rate and refactor scheduler
Parameters:
---------
learning_rate : float
original learning rate
lr_refactor_step : comma separated str
epochs to change learning rate
lr_refactor_ratio : float
lr *= ratio at certain steps
num_example : int
number of training images, used to estimate the iterations given epochs
batch_size : int
training batch size
begin_epoch : int
starting epoch
Returns:
---------
(learning_rate, mx.lr_scheduler) as tuple
""" |
assert lr_refactor_ratio > 0
iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()]
if lr_refactor_ratio >= 1:
return (learning_rate, None)
else:
lr = learning_rate
epoch_size = num_example // batch_size
for s in iter_refactor:
if begin_epoch >= s:
lr *= lr_refactor_ratio
if lr != learning_rate:
logging.getLogger().info("Adjusted learning rate to {} for epoch {}".format(lr, begin_epoch))
steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch]
if not steps:
return (lr, None)
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio)
return (lr, lr_scheduler) |
<SYSTEM_TASK:>
This is a set of 50 images representative of ImageNet images.
<END_TASK>
<USER_TASK:>
Description:
def imagenet50(display=False, resolution=224):
""" This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
""" |
prefix = github_data_url + "imagenet50_"
X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32)
y = np.loadtxt(cache(prefix + "labels.csv"))
return X, y |
<SYSTEM_TASK:>
Return the boston housing data in a nice package.
<END_TASK>
<USER_TASK:>
Description:
def boston(display=False):
""" Return the boston housing data in a nice package. """ |
d = sklearn.datasets.load_boston()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target |
<SYSTEM_TASK:>
Return the clssic IMDB sentiment analysis training data in a nice package.
<END_TASK>
<USER_TASK:>
Description:
def imdb(display=False):
""" Return the clssic IMDB sentiment analysis training data in a nice package.
Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
""" |
with open(cache(github_data_url + "imdb_train.txt")) as f:
data = f.readlines()
y = np.ones(25000, dtype=np.bool)
y[:12500] = 0
return data, y |
<SYSTEM_TASK:>
Predict total number of non-violent crimes per 100K popuation.
<END_TASK>
<USER_TASK:>
Description:
def communitiesandcrime(display=False):
""" Predict total number of non-violent crimes per 100K popuation.
This dataset is from the classic UCI Machine Learning repository:
https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
""" |
raw_data = pd.read_csv(
cache(github_data_url + "CommViolPredUnnormalizedData.txt"),
na_values="?"
)
# find the indices where the total violent crimes are known
valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0]
y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float)
# extract the predictive features and remove columns with missing values
X = raw_data.iloc[valid_inds,5:-18]
valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0]
X = X.iloc[:,valid_cols]
return X, y |
<SYSTEM_TASK:>
Return the classic iris data in a nice package.
<END_TASK>
<USER_TASK:>
Description:
def iris(display=False):
""" Return the classic iris data in a nice package. """ |
d = sklearn.datasets.load_iris()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
if display:
return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101
else:
return df, d.target |
<SYSTEM_TASK:>
Return the Adult census data in a nice package.
<END_TASK>
<USER_TASK:>
Description:
def adult(display=False):
""" Return the Adult census data in a nice package. """ |
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_data = pd.read_csv(
cache(github_data_url + "adult.data"),
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
data["Target"] = data["Target"] == " >50K"
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
data[k] = np.array([rcode[v.strip()] for v in data[k]])
else:
data[k] = data[k].cat.codes
if display:
return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values
else:
return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values |
<SYSTEM_TASK:>
A nicely packaged version of NHANES I data with surivival times as labels.
<END_TASK>
<USER_TASK:>
Description:
def nhanesi(display=False):
""" A nicely packaged version of NHANES I data with surivival times as labels.
""" |
X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv"))
y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"]
if display:
X_display = X.copy()
X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]]
return X_display, np.array(y)
else:
return X, np.array(y) |
<SYSTEM_TASK:>
A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
<END_TASK>
<USER_TASK:>
Description:
def cric(display=False):
""" A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
""" |
X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv"))
y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv"))
if display:
X_display = X.copy()
return X_display, y
else:
return X, y |
<SYSTEM_TASK:>
Correlated Groups 60
<END_TASK>
<USER_TASK:>
Description:
def corrgroups60(display=False):
""" Correlated Groups 60
A simulated dataset with tight correlations among distinct groups of features.
""" |
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
# build a correlation matrix with groups of 3 tightly correlated features
C = np.eye(M)
for i in range(0,30,3):
C[i,i+1] = C[i+1,i] = 0.99
C[i,i+2] = C[i+2,i] = 0.99
C[i+1,i+2] = C[i+2,i+1] = 0.99
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X_centered = X_start - X_start.mean(0)
Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0]
W = np.linalg.cholesky(np.linalg.inv(Sigma)).T
X_white = np.matmul(X_centered, W.T)
assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data
# create the final data
X_final = np.matmul(X_white, np.linalg.cholesky(C).T)
X = X_final
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y |
<SYSTEM_TASK:>
A simulated dataset with tight correlations among distinct groups of features.
<END_TASK>
<USER_TASK:>
Description:
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
""" |
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y |
<SYSTEM_TASK:>
Ranking datasets from lightgbm repository.
<END_TASK>
<USER_TASK:>
Description:
def rank():
""" Ranking datasets from lightgbm repository.
""" |
rank_data_url = 'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/'
x_train, y_train = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.train'))
x_test, y_test = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.test'))
q_train = np.loadtxt(cache(rank_data_url + 'rank.train.query'))
q_test = np.loadtxt(cache(rank_data_url + 'rank.test.query'))
return x_train, y_train, x_test, y_test, q_train, q_test |
<SYSTEM_TASK:>
An approximation of holdout that only retraines the model once.
<END_TASK>
<USER_TASK:>
Description:
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
""" |
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked) |
<SYSTEM_TASK:>
The model is retrained for each test sample with the non-important features set to a constant.
<END_TASK>
<USER_TASK:>
Description:
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
""" |
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test) |
<SYSTEM_TASK:>
The model is revaluated for each test sample with the non-important features set to their mean.
<END_TASK>
<USER_TASK:>
Description:
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
""" |
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test) |
<SYSTEM_TASK:>
The model is revaluated for each test sample with the non-important features set to an imputed value.
<END_TASK>
<USER_TASK:>
Description:
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
""" |
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test) |
<SYSTEM_TASK:>
The model is revaluated for each test sample with the non-important features set to resample background values.
<END_TASK>
<USER_TASK:>
Description:
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" | # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test) |
<SYSTEM_TASK:>
The how well do the features plus a constant base rate sum up to the model output.
<END_TASK>
<USER_TASK:>
Description:
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
""" |
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1)) |
<SYSTEM_TASK:>
Generate a random array with a fixed seed.
<END_TASK>
<USER_TASK:>
Description:
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
""" |
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out |
<SYSTEM_TASK:>
Shuffle an array in-place with a fixed seed.
<END_TASK>
<USER_TASK:>
Description:
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
""" |
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed) |
<SYSTEM_TASK:>
A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
<END_TASK>
<USER_TASK:>
Description:
def hclust_ordering(X, metric="sqeuclidean"):
""" A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
""" |
# compute a hierarchical clustering
D = sp.spatial.distance.pdist(X, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
# merge clusters, rotating them to make the end points match as best we can
sets = [[i] for i in range(X.shape[0])]
for i in range(cluster_matrix.shape[0]):
s1 = sets[int(cluster_matrix[i,0])]
s2 = sets[int(cluster_matrix[i,1])]
# compute distances between the end points of the lists
d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0]
d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0]
d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0]
d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0]
# concatenete the lists in the way the minimizes the difference between
# the samples at the junction
best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r)
if best == d_s1_s2:
sets.append(s1 + s2)
elif best == d_s2_s1:
sets.append(s2 + s1)
elif best == d_s1r_s2:
sets.append(list(reversed(s1)) + s2)
else:
sets.append(s1 + list(reversed(s2)))
return sets[-1] |
<SYSTEM_TASK:>
Order other features by how much interaction they seem to have with the feature at the given index.
<END_TASK>
<USER_TASK:>
Description:
def approximate_interactions(index, shap_values, X, feature_names=None):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
""" |
# convert from DataFrames if we got any
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = X.columns
X = X.values
index = convert_name(index, shap_values, feature_names)
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = X[inds, index]
srt = np.argsort(x)
shap_ref = shap_values[inds, index]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
val_other = X[inds, i][srt].astype(np.float)
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(X[inds, i][srt].astype(np.float))
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions)) |
<SYSTEM_TASK:>
Converts human agreement differences to numerical scores for coloring.
<END_TASK>
<USER_TASK:>
Description:
def _human_score_map(human_consensus, methods_attrs):
""" Converts human agreement differences to numerical scores for coloring.
""" |
v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0)
return v |
<SYSTEM_TASK:>
Draw the bars and separators.
<END_TASK>
<USER_TASK:>
Description:
def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators.""" |
rectangle_list = []
separator_list = []
pre_val = out_value
for index, features in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5']
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = - np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[right_bound, 0],
[left_bound, 0],
[left_bound, width_bar],
[right_bound, width_bar],
[right_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound + separator_indent * 0.90, (width_bar / 2)],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent * 0.90, (width_bar / 2)]]
line = plt.Polygon(points_rectangle, closed=True, fill=True,
facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0],
[separator_pos + separator_indent, (width_bar / 2)],
[separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None,
edgecolor=colors[1], lw=3)
separator_list += [line]
return rectangle_list, separator_list |
<SYSTEM_TASK:>
Draw additive plot.
<END_TASK>
<USER_TASK:>
Description:
def draw_additive_plot(data, figsize, show, text_rotation=0):
"""Draw additive plot.""" |
# Turn off interactive plot
if show == False:
plt.ioff()
# Format data
neg_features, total_neg, pos_features, total_pos = format_data(data)
# Compute overall metrics
base_value = data['baseValue']
out_value = data['outValue']
offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04
# Define plots
fig, ax = plt.subplots(figsize=figsize)
# Compute axis limit
update_axis_limits(ax, total_pos, pos_features, total_neg,
neg_features, base_value)
# Define width of bar
width_bar = 0.1
width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200
# Create bar for negative shap values
rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Create bar for positive shap values
rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Add labels
total_effect = np.abs(total_neg) + total_pos
fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
# higher lower legend
draw_higher_lower_element(out_value, offset_text)
# Add label for base value
draw_base_element(base_value, ax)
# Add output label
out_names = data['outNames'][0]
draw_output_element(out_names, out_value, ax)
if show:
plt.show()
else:
return plt.gcf() |
<SYSTEM_TASK:>
Fails gracefully when various install steps don't work.
<END_TASK>
<USER_TASK:>
Description:
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
""" |
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!") |
<SYSTEM_TASK:>
The backward hook which computes the deeplift
<END_TASK>
<USER_TASK:>
Description:
def deeplift_grad(module, grad_input, grad_output):
"""The backward hook which computes the deeplift
gradient for an nn.Module
""" |
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(module_type))
return grad_input |
<SYSTEM_TASK:>
The forward hook used to save interim tensors, detached
<END_TASK>
<USER_TASK:>
Description:
def add_interim_values(module, input, output):
"""The forward hook used to save interim tensors, detached
from the graph. Used to calculate the multipliers
""" |
try:
del module.x
except AttributeError:
pass
try:
del module.y
except AttributeError:
pass
module_type = module.__class__.__name__
if module_type in op_handler:
func_name = op_handler[module_type].__name__
# First, check for cases where we don't need to save the x and y tensors
if func_name == 'passthrough':
pass
else:
# check only the 0th input varies
for i in range(len(input)):
if i != 0 and type(output) is tuple:
assert input[i] == output[i], "Only the 0th input may vary!"
# if a new method is added, it must be added here too. This ensures tensors
# are only saved if necessary
if func_name in ['maxpool', 'nonlinear_1d']:
# only save tensors if necessary
if type(input) is tuple:
setattr(module, 'x', torch.nn.Parameter(input[0].detach()))
else:
setattr(module, 'x', torch.nn.Parameter(input.detach()))
if type(output) is tuple:
setattr(module, 'y', torch.nn.Parameter(output[0].detach()))
else:
setattr(module, 'y', torch.nn.Parameter(output.detach()))
if module_type in failure_case_modules:
input[0].register_hook(deeplift_tensor_grad) |
<SYSTEM_TASK:>
A forward hook which saves the tensor - attached to its graph.
<END_TASK>
<USER_TASK:>
Description:
def get_target_input(module, input, output):
"""A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
""" |
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input) |
<SYSTEM_TASK:>
Add handles to all non-container layers in the model.
<END_TASK>
<USER_TASK:>
Description:
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
""" |
handles_list = []
for child in model.children():
if 'nn.modules.container' in str(type(child)):
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else:
handles_list.append(child.register_forward_hook(forward_handle))
handles_list.append(child.register_backward_hook(backward_handle))
return handles_list |
<SYSTEM_TASK:>
Removes the x and y attributes which were added by the forward handles
<END_TASK>
<USER_TASK:>
Description:
def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
""" |
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass |
<SYSTEM_TASK:>
This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
<END_TASK>
<USER_TASK:>
Description:
def get_xgboost_json(model):
""" This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
""" |
fnames = model.feature_names
model.feature_names = None
json_trees = model.get_dump(with_stats=True, dump_format="json")
model.feature_names = fnames
# this fixes a bug where XGBoost can return invalid JSON
json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees]
json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees]
return json_trees |
<SYSTEM_TASK:>
This computes the expected value conditioned on the given label value.
<END_TASK>
<USER_TASK:>
Description:
def __dynamic_expected_value(self, y):
""" This computes the expected value conditioned on the given label value.
""" |
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0) |
<SYSTEM_TASK:>
Return the values for the model applied to X.
<END_TASK>
<USER_TASK:>
Description:
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None):
""" Return the values for the model applied to X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to
explain the model's output.
ranked_outputs : None or int
If ranked_outputs is None then we explain all the outputs in a multi-output model. If
ranked_outputs is a positive integer then we only explain that many of the top model
outputs (where "top" is determined by output_rank_order). Note that this causes a pair
of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of
the output ranks, and indexes is a matrix that tells for each sample which output indexes
were choses as "top".
output_rank_order : "max", "min", "max_abs", or "custom"
How to order the model outputs when using ranked_outputs, either by maximum, minimum, or
maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes.
rseed : None or int
Seeding the randomness in shap value computation (background example choice,
interpolation between current and background example, smoothing).
Returns
-------
For a models with a single output this returns a tensor of SHAP values with the same shape
as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of
which are the same shape as X. If ranked_outputs is None then this list of tensors matches
the number of model outputs. If ranked_outputs is a positive integer a pair is returned
(shap_values, indexes), where shap_values is a list of tensors with a length of
ranked_outputs, and indexes is a matrix that tells for each sample which output indexes
were chosen as "top".
""" |
return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed) |
<SYSTEM_TASK:>
Follows a set of ops assuming their value is False and find blocked Switch paths.
<END_TASK>
<USER_TASK:>
Description:
def tensors_blocked_by_false(ops):
""" Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
""" |
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked |
<SYSTEM_TASK:>
Get the SHAP value computation graph for a given model output.
<END_TASK>
<USER_TASK:>
Description:
def phi_symbolic(self, i):
""" Get the SHAP value computation graph for a given model output.
""" |
if self.phi_symbolics[i] is None:
# replace the gradients for all the non-linear activations
# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)
reg = tf_ops._gradient_registry._registry
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
if op_handlers[n] is not passthrough:
reg[n]["type"] = self.custom_grad
elif n in self.used_types:
raise Exception(n + " was used in the model but is not in the gradient registry!")
# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using custom a gradient-like computation
try:
out = self.model_output[:,i] if self.multi_output else self.model_output
self.phi_symbolics[i] = tf.gradients(out, self.model_inputs)
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
reg[n]["type"] = self.orig_grads[n]
return self.phi_symbolics[i] |
<SYSTEM_TASK:>
Runs the model while also setting the learning phase flags to False.
<END_TASK>
<USER_TASK:>
Description:
def run(self, out, model_inputs, X):
""" Runs the model while also setting the learning phase flags to False.
""" |
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict) |
<SYSTEM_TASK:>
Passes a gradient op creation request to the correct handler.
<END_TASK>
<USER_TASK:>
Description:
def custom_grad(self, op, *grads):
""" Passes a gradient op creation request to the correct handler.
""" |
return op_handlers[op.type](self, op, *grads) |
<SYSTEM_TASK:>
Use ssh to run the experiments on remote machines in parallel.
<END_TASK>
<USER_TASK:>
Description:
def run_remote_experiments(experiments, thread_hosts, rate_limit=10):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
""" |
global ssh_conn_per_min_limit
ssh_conn_per_min_limit = rate_limit
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
global nexperiments, total_sent, total_done, total_failed, host_records
nexperiments = len(experiments)
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join() |
<SYSTEM_TASK:>
Summarize a dataset with k mean samples weighted by the number of data points they
<END_TASK>
<USER_TASK:>
Description:
def kmeans(X, k, round_values=True):
""" Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
""" |
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_)) |
<SYSTEM_TASK:>
Use the SHAP values as an embedding which we project to 2D for visualization.
<END_TASK>
<USER_TASK:>
Description:
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True):
""" Use the SHAP values as an embedding which we project to 2D for visualization.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to use to color the embedding.
If this is a string it is either the name of the feature, or it can have the
form "rank(int)" to specify the feature with that rank (ordered by mean absolute
SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values,
which is the model's output (minus it's expected value).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
feature_names : None or list
The names of the features in the shap_values array.
method : "pca" or numpy.array
How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D
PCA projection of shap_values is used. If a numpy array then is should be
(# samples x 2) and represent the embedding of that values.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
""" |
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
ind = convert_name(ind, shap_values, feature_names)
if ind == "sum()":
cvals = shap_values.sum(1)
fname = "sum(SHAP values)"
else:
cvals = shap_values[:,ind]
fname = feature_names[ind]
# see if we need to compute the embedding
if type(method) == str and method == "pca":
pca = sklearn.decomposition.PCA(2)
embedding_values = pca.fit_transform(shap_values)
elif hasattr(method, "shape") and method.shape[1] == 2:
embedding_values = method
else:
print("Unsupported embedding method:", method)
pl.scatter(
embedding_values[:,0], embedding_values[:,1], c=cvals,
cmap=colors.red_blue, alpha=alpha, linewidth=0
)
pl.axis("off")
#pl.title(feature_names[ind])
cb = pl.colorbar()
cb.set_label("SHAP value for\n"+fname, size=13)
cb.outline.set_visible(False)
pl.gcf().set_size_inches(7.5, 5)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 10)
cb.set_alpha(1)
if show:
pl.show() |
<SYSTEM_TASK:>
Local Accuracy
<END_TASK>
<USER_TASK:>
Description:
def local_accuracy(X, y, model_generator, method_name):
""" Local Accuracy
transform = "identity"
sort_order = 2
""" |
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name) |
<SYSTEM_TASK:>
Test an explanation method.
<END_TASK>
<USER_TASK:>
Description:
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"):
""" Test an explanation method.
""" |
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest()
for i in range(nreps):
X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i)
# define the model we are going to explain, caching so we onlu build it once
model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle"
cache_file = os.path.join(cache_dir, model_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
model = pickle.load(f)
else:
model = model_generator()
model.fit(X_train, y_train)
with open(cache_file, "wb") as f:
pickle.dump(model, f)
attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash])
def score(attr_function):
def cached_attr_function(X_inner):
if attr_key not in _attribution_cache:
_attribution_cache[attr_key] = attr_function(X_inner)
return _attribution_cache[attr_key]
#cached_attr_function = lambda X: __check_cache(attr_function, X)
if fcounts is None:
return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i)
else:
scores = []
for f in fcounts:
scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i))
return np.array(scores)
# evaluate the method (only building the attribution function if we need to)
if attr_key not in _attribution_cache:
method_reps.append(score(getattr(methods, method_name)(model, X_train)))
else:
method_reps.append(score(None))
np.random.seed(old_seed)
return np.array(method_reps).mean(0) |
<SYSTEM_TASK:>
Uses block matrix inversion identities to quickly estimate transforms.
<END_TASK>
<USER_TASK:>
Description:
def _estimate_transforms(self, nsamples):
""" Uses block matrix inversion identities to quickly estimate transforms.
After a bit of matrix math we can isolate a transform matrix (# features x # features)
that is independent of any sample we are explaining. It is the result of averaging over
all feature permutations, but we just use a fixed number of samples to estimate the value.
TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could
happen through a recursive method that uses the same block matrix inversion as below.
""" |
M = len(self.coef)
mean_transform = np.zeros((M,M))
x_transform = np.zeros((M,M))
inds = np.arange(M, dtype=np.int)
for _ in tqdm(range(nsamples), "Estimating transforms"):
np.random.shuffle(inds)
cov_inv_SiSi = np.zeros((0,0))
cov_Si = np.zeros((M,0))
for j in range(M):
i = inds[j]
# use the last Si as the new S
cov_S = cov_Si
cov_inv_SS = cov_inv_SiSi
# get the new cov_Si
cov_Si = self.cov[:,inds[:j+1]]
# compute the new cov_inv_SiSi from cov_inv_SS
d = cov_Si[i,:-1].T
t = np.matmul(cov_inv_SS, d)
Z = self.cov[i, i]
u = Z - np.matmul(t.T, d)
cov_inv_SiSi = np.zeros((j+1, j+1))
if j > 0:
cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u
cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u
cov_inv_SiSi[-1, -1] = 1 / u
# + coef @ (Q(bar(Sui)) - Q(bar(S)))
mean_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]])
mean_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]])
mean_transform[i, inds[:j]] -= coef_R_S
# - coef @ (Q(Sui) - Q(S))
x_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
x_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
x_transform[i, inds[:j]] -= coef_R_S
mean_transform /= nsamples
x_transform /= nsamples
return mean_transform, x_transform |
<SYSTEM_TASK:>
Return approximate SHAP values for the model applied to the data given by X.
<END_TASK>
<USER_TASK:>
Description:
def shap_values(self, X, ranked_outputs=None, output_rank_order='max'):
""" Return approximate SHAP values for the model applied to the data given by X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to
explain the model's output.
ranked_outputs : None or int
If ranked_outputs is None then we explain all the outputs in a multi-output model. If
ranked_outputs is a positive integer then we only explain that many of the top model
outputs (where "top" is determined by output_rank_order). Note that this causes a pair
of values to be returned (shap_values, indexes), where shap_values is a list of numpy
arrays for each of the output ranks, and indexes is a matrix that indicates for each sample
which output indexes were choses as "top".
output_rank_order : "max", "min", or "max_abs"
How to order the model outputs when using ranked_outputs, either by maximum, minimum, or
maximum absolute value.
Returns
-------
For a models with a single output this returns a tensor of SHAP values with the same shape
as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of
which are the same shape as X. If ranked_outputs is None then this list of tensors matches
the number of model outputs. If ranked_outputs is a positive integer a pair is returned
(shap_values, indexes), where shap_values is a list of tensors with a length of
ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes
were chosen as "top".
""" |
return self.explainer.shap_values(X, ranked_outputs, output_rank_order) |
<SYSTEM_TASK:>
Returns dummy agent class for if PyTorch etc. is not installed.
<END_TASK>
<USER_TASK:>
Description:
def _agent_import_failed(trace):
"""Returns dummy agent class for if PyTorch etc. is not installed.""" |
class _AgentImportFailed(Trainer):
_name = "AgentImportFailed"
_default_config = with_common_config({})
def _setup(self, config):
raise ImportError(trace)
return _AgentImportFailed |
<SYSTEM_TASK:>
Executes training.
<END_TASK>
<USER_TASK:>
Description:
def run(run_or_experiment,
name=None,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_function=None,
checkpoint_freq=0,
checkpoint_at_end=False,
export_formats=None,
max_failures=3,
restore=None,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True):
"""Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec.
name (str): Name of experiment.
stop (dict): The stopping criteria. The keys may be any field in
the return result of 'train()', whichever is reached first.
Defaults to empty dict.
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict): Machine resources to allocate per trial,
e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
assigned unless you specify them here. Defaults to 1 CPU and 0
GPUs in ``Trainable.default_resource_request()``.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
upload_dir (str): Optional URI to sync training results
to (e.g. ``s3://bucket``).
trial_name_creator (func): Optional function for generating
the trial string representation.
loggers (list): List of logger creators to be used with
each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
See `ray/tune/logger.py`.
sync_function (func|str): Function for syncing the local_dir to
upload_dir. If string, then it must be a string template for
syncer to run. If not provided, the sync command defaults
to standard S3 or gsutil sync comamnds.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial from its last
checkpoint at least this many times. Only applies if
checkpointing is enabled. Setting to -1 will lead to infinite
recovery retries. Defaults to 3.
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, and HyperBand.
with_server (bool): Starts a background Tune server. Needed for
using the Client API.
server_port (int): Port number for launching TuneServer.
verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
1 = only status updates, 2 = status and trial results.
resume (bool|"prompt"): If checkpoint exists, the experiment will
resume from there. If resume is "prompt", Tune will prompt if
checkpoint detected.
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
Returns:
List of Trial objects.
Raises:
TuneError if any trials failed and `raise_on_failed_trial` is True.
Examples:
>>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
>>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
>>> tune.run(
"PG",
num_samples=5,
config={
"env": "CartPole-v0",
"lr": tune.sample_from(lambda _: np.random.rand())
}
)
""" |
experiment = run_or_experiment
if not isinstance(run_or_experiment, Experiment):
experiment = Experiment(
name, run_or_experiment, stop, config, resources_per_trial,
num_samples, local_dir, upload_dir, trial_name_creator, loggers,
sync_function, checkpoint_freq, checkpoint_at_end, export_formats,
max_failures, restore)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
checkpoint_dir = _find_checkpoint_dir(experiment)
should_restore = _prompt_restore(checkpoint_dir, resume)
runner = None
if should_restore:
try:
runner = TrialRunner.restore(checkpoint_dir, search_alg, scheduler,
trial_executor)
except Exception:
logger.exception("Runner restore failed. Restarting experiment.")
else:
logger.info("Starting a new experiment.")
if not runner:
scheduler = scheduler or FIFOScheduler()
search_alg = search_alg or BasicVariantGenerator()
search_alg.add_configurations([experiment])
runner = TrialRunner(
search_alg,
scheduler=scheduler,
metadata_checkpoint_dir=checkpoint_dir,
launch_web_server=with_server,
server_port=server_port,
verbose=bool(verbose > 1),
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor)
if verbose:
print(runner.debug_string(max_debug=99999))
last_debug = 0
while not runner.is_finished():
runner.step()
if time.time() - last_debug > DEBUG_PRINT_INTERVAL:
if verbose:
print(runner.debug_string())
last_debug = time.time()
if verbose:
print(runner.debug_string(max_debug=99999))
wait_for_log_sync()
errored_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError("Trials did not complete", errored_trials)
else:
logger.error("Trials did not complete: %s", errored_trials)
return runner.get_trials() |
<SYSTEM_TASK:>
Runs and blocks until all trials finish.
<END_TASK>
<USER_TASK:>
Description:
def run_experiments(experiments,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_experiments(
>>> experiments=experiment_spec,
>>> scheduler=MedianStoppingRule(...))
>>> run_experiments(
>>> experiments=experiment_spec,
>>> search_alg=SearchAlgorithm(),
>>> scheduler=MedianStoppingRule(...))
Returns:
List of Trial objects, holding data for each executed trial.
""" |
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
trials = []
for exp in experiments:
trials += run(
exp,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial)
return trials |
<SYSTEM_TASK:>
Flushes remaining output records in the output queues to plasma.
<END_TASK>
<USER_TASK:>
Description:
def _flush(self, close=False):
"""Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also marked as 'closed' (True) or not (False) after flushing.
""" |
for channel in self.forward_channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_key_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.round_robin_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes() |
<SYSTEM_TASK:>
Returns an appropriate preprocessor class for the given space.
<END_TASK>
<USER_TASK:>
Description:
def get_preprocessor(space):
"""Returns an appropriate preprocessor class for the given space.""" |
legacy_patch_shapes(space)
obs_shape = space.shape
if isinstance(space, gym.spaces.Discrete):
preprocessor = OneHotPreprocessor
elif obs_shape == ATARI_OBS_SHAPE:
preprocessor = GenericPixelPreprocessor
elif obs_shape == ATARI_RAM_OBS_SHAPE:
preprocessor = AtariRamPreprocessor
elif isinstance(space, gym.spaces.Tuple):
preprocessor = TupleFlatteningPreprocessor
elif isinstance(space, gym.spaces.Dict):
preprocessor = DictFlatteningPreprocessor
else:
preprocessor = NoPreprocessor
return preprocessor |
<SYSTEM_TASK:>
Assigns shapes to spaces that don't have shapes.
<END_TASK>
<USER_TASK:>
Description:
def legacy_patch_shapes(space):
"""Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shapes properly
for Tuple and Discrete spaces.
""" |
if not hasattr(space, "shape"):
if isinstance(space, gym.spaces.Discrete):
space.shape = ()
elif isinstance(space, gym.spaces.Tuple):
shapes = []
for s in space.spaces:
shape = legacy_patch_shapes(s)
shapes.append(shape)
space.shape = tuple(shapes)
return space.shape |
<SYSTEM_TASK:>
Get a new batch from the internal ring buffer.
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""Get a new batch from the internal ring buffer.
Returns:
buf: Data item saved from inqueue.
released: True if the item is now removed from the ring buffer.
""" |
if self.ttl[self.idx] <= 0:
self.buffers[self.idx] = self.inqueue.get(timeout=300.0)
self.ttl[self.idx] = self.cur_max_ttl
if self.cur_max_ttl < self.max_ttl:
self.cur_max_ttl += 1
buf = self.buffers[self.idx]
self.ttl[self.idx] -= 1
released = self.ttl[self.idx] <= 0
if released:
self.buffers[self.idx] = None
self.idx = (self.idx + 1) % len(self.buffers)
return buf, released |
<SYSTEM_TASK:>
Runs one logical iteration of training.
<END_TASK>
<USER_TASK:>
Description:
def train(self):
"""Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train().
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
""" |
start = time.time()
result = self._train()
assert isinstance(result, dict), "_train() needs to return a dict."
# We do not modify internal state nor update this result if duplicate.
if RESULT_DUPLICATE in result:
return result
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._episodes_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
result.setdefault(TRAINING_ITERATION, self._iteration)
# Provides auto-filled neg_mean_loss for avoiding regressions
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=os.uname()[1],
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore)
self._log_result(result)
return result |
<SYSTEM_TASK:>
Saves the current model state to a checkpoint.
<END_TASK>
<USER_TASK:>
Description:
def save(self, checkpoint_dir=None):
"""Saves the current model state to a checkpoint.
Subclasses should override ``_save()`` instead to save state.
This method dumps additional metadata alongside the saved path.
Args:
checkpoint_dir (str): Optional dir to place the checkpoint.
Returns:
Checkpoint path that may be passed to restore().
""" |
checkpoint_dir = os.path.join(checkpoint_dir or self.logdir,
"checkpoint_{}".format(self._iteration))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint = self._save(checkpoint_dir)
saved_as_dict = False
if isinstance(checkpoint, string_types):
if (not checkpoint.startswith(checkpoint_dir)
or checkpoint == checkpoint_dir):
raise ValueError(
"The returned checkpoint path must be within the "
"given checkpoint dir {}: {}".format(
checkpoint_dir, checkpoint))
if not os.path.exists(checkpoint):
raise ValueError(
"The returned checkpoint path does not exist: {}".format(
checkpoint))
checkpoint_path = checkpoint
elif isinstance(checkpoint, dict):
saved_as_dict = True
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint")
with open(checkpoint_path, "wb") as f:
pickle.dump(checkpoint, f)
else:
raise ValueError(
"`_save` must return a dict or string type: {}".format(
str(type(checkpoint))))
with open(checkpoint_path + ".tune_metadata", "wb") as f:
pickle.dump({
"experiment_id": self._experiment_id,
"iteration": self._iteration,
"timesteps_total": self._timesteps_total,
"time_total": self._time_total,
"episodes_total": self._episodes_total,
"saved_as_dict": saved_as_dict
}, f)
return checkpoint_path |
<SYSTEM_TASK:>
Saves the current model state to a Python object. It also
<END_TASK>
<USER_TASK:>
Description:
def save_to_object(self):
"""Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
""" |
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_prefix = self.save(tmpdir)
data = {}
base_dir = os.path.dirname(checkpoint_prefix)
for path in os.listdir(base_dir):
path = os.path.join(base_dir, path)
if path.startswith(checkpoint_prefix):
with open(path, "rb") as f:
data[os.path.basename(path)] = f.read()
out = io.BytesIO()
data_dict = pickle.dumps({
"checkpoint_name": os.path.basename(checkpoint_prefix),
"data": data,
})
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue() |
<SYSTEM_TASK:>
Restores training state from a checkpoint object.
<END_TASK>
<USER_TASK:>
Description:
def restore_from_object(self, obj):
"""Restores training state from a checkpoint object.
These checkpoints are returned from calls to save_to_object().
""" |
info = pickle.loads(obj)
data = info["data"]
tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir)
checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"])
for file_name, file_contents in data.items():
with open(os.path.join(tmpdir, file_name), "wb") as f:
f.write(file_contents)
self.restore(checkpoint_path)
shutil.rmtree(tmpdir) |
<SYSTEM_TASK:>
Exports model based on export_formats.
<END_TASK>
<USER_TASK:>
Description:
def export_model(self, export_formats, export_dir=None):
"""Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Optional dir to place the exported model.
Defaults to self.logdir.
Return:
A dict that maps ExportFormats to successfully exported models.
""" |
export_dir = export_dir or self.logdir
return self._export_model(export_formats, export_dir) |
<SYSTEM_TASK:>
Dump a whole json record into the given file.
<END_TASK>
<USER_TASK:>
Description:
def dump_json(json_info, json_file, overwrite=True):
"""Dump a whole json record into the given file.
Overwrite the file if the overwrite flag set.
Args:
json_info (dict): Information dict to be dumped.
json_file (str): File path to be dumped to.
overwrite(boolean)
""" |
if overwrite:
mode = "w"
else:
mode = "w+"
try:
with open(json_file, mode) as f:
f.write(json.dumps(json_info))
except BaseException as e:
logging.error(e.message) |
<SYSTEM_TASK:>
Parse a whole json record from the given file.
<END_TASK>
<USER_TASK:>
Description:
def parse_json(json_file):
"""Parse a whole json record from the given file.
Return None if the json file does not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
Returns:
A dict of json info.
""" |
if not os.path.exists(json_file):
return None
try:
with open(json_file, "r") as f:
info_str = f.readlines()
info_str = "".join(info_str)
json_info = json.loads(info_str)
return unicode2str(json_info)
except BaseException as e:
logging.error(e.message)
return None |
<SYSTEM_TASK:>
Parse multiple json records from the given file.
<END_TASK>
<USER_TASK:>
Description:
def parse_multiple_json(json_file, offset=None):
"""Parse multiple json records from the given file.
Seek to the offset as the start point before parsing
if offset set. return empty list if the json file does
not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
offset (int): Initial seek position of the file.
Returns:
A dict of json info.
New offset after parsing.
""" |
json_info_list = []
if not os.path.exists(json_file):
return json_info_list
try:
with open(json_file, "r") as f:
if offset:
f.seek(offset)
for line in f:
if line[-1] != "\n":
# Incomplete line
break
json_info = json.loads(line)
json_info_list.append(json_info)
offset += len(line)
except BaseException as e:
logging.error(e.message)
return json_info_list, offset |
<SYSTEM_TASK:>
Convert the unicode element of the content to str recursively.
<END_TASK>
<USER_TASK:>
Description:
def unicode2str(content):
"""Convert the unicode element of the content to str recursively.""" |
if isinstance(content, dict):
result = {}
for key in content.keys():
result[unicode2str(key)] = unicode2str(content[key])
return result
elif isinstance(content, list):
return [unicode2str(element) for element in content]
elif isinstance(content, int) or isinstance(content, float):
return content
else:
return content.encode("utf-8") |
<SYSTEM_TASK:>
Computes the loss of the network.
<END_TASK>
<USER_TASK:>
Description:
def loss(self, xs, ys):
"""Computes the loss of the network.""" |
return float(
self.sess.run(
self.cross_entropy, feed_dict={
self.x: xs,
self.y_: ys
})) |
<SYSTEM_TASK:>
Computes the gradients of the network.
<END_TASK>
<USER_TASK:>
Description:
def grad(self, xs, ys):
"""Computes the gradients of the network.""" |
return self.sess.run(
self.cross_entropy_grads, feed_dict={
self.x: xs,
self.y_: ys
}) |
<SYSTEM_TASK:>
Creates the queue and preprocessing operations for the dataset.
<END_TASK>
<USER_TASK:>
Description:
def build_data(data_path, size, dataset):
"""Creates the queue and preprocessing operations for the dataset.
Args:
data_path: Filename for cifar10 data.
size: The number of images in the dataset.
dataset: The dataset we are using.
Returns:
queue: A Tensorflow queue for extracting the images and labels.
""" |
image_size = 32
if dataset == "cifar10":
label_bytes = 1
label_offset = 0
elif dataset == "cifar100":
label_bytes = 1
label_offset = 1
depth = 3
image_bytes = image_size * image_size * depth
record_bytes = label_bytes + label_offset + image_bytes
def load_transform(value):
# Convert these examples to dense labels and processed images.
record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]),
tf.int32)
# Convert from string to [depth * height * width] to
# [depth, height, width].
depth_major = tf.reshape(
tf.slice(record, [label_bytes], [image_bytes]),
[depth, image_size, image_size])
# Convert from [depth, height, width] to [height, width, depth].
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return (image, label)
# Read examples from files in the filename queue.
data_files = tf.gfile.Glob(data_path)
data = tf.contrib.data.FixedLengthRecordDataset(data_files,
record_bytes=record_bytes)
data = data.map(load_transform)
data = data.batch(size)
iterator = data.make_one_shot_iterator()
return iterator.get_next() |
<SYSTEM_TASK:>
Build CIFAR image and labels.
<END_TASK>
<USER_TASK:>
Description:
def build_input(data, batch_size, dataset, train):
"""Build CIFAR image and labels.
Args:
data_path: Filename for cifar10 data.
batch_size: Input batch size.
train: True if we are training and false if we are testing.
Returns:
images: Batches of images of size
[batch_size, image_size, image_size, 3].
labels: Batches of labels of size [batch_size, num_classes].
Raises:
ValueError: When the specified dataset is not supported.
""" |
image_size = 32
depth = 3
num_classes = 10 if dataset == "cifar10" else 100
images, labels = data
num_samples = images.shape[0] - images.shape[0] % batch_size
dataset = tf.contrib.data.Dataset.from_tensor_slices(
(images[:num_samples], labels[:num_samples]))
def map_train(image, label):
image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4,
image_size + 4)
image = tf.random_crop(image, [image_size, image_size, 3])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
return (image, label)
def map_test(image, label):
image = tf.image.resize_image_with_crop_or_pad(image, image_size,
image_size)
image = tf.image.per_image_standardization(image)
return (image, label)
dataset = dataset.map(map_train if train else map_test)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()
if train:
dataset = dataset.shuffle(buffer_size=16 * batch_size)
images, labels = dataset.make_one_shot_iterator().get_next()
images = tf.reshape(images, [batch_size, image_size, image_size, depth])
labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
labels = tf.sparse_to_dense(
tf.concat([indices, labels], 1),
[batch_size, num_classes], 1.0, 0.0)
assert len(images.get_shape()) == 4
assert images.get_shape()[0] == batch_size
assert images.get_shape()[-1] == 3
assert len(labels.get_shape()) == 2
assert labels.get_shape()[0] == batch_size
assert labels.get_shape()[1] == num_classes
if not train:
tf.summary.image("images", images)
return images, labels |
<SYSTEM_TASK:>
Uploads and runs a script on the specified cluster.
<END_TASK>
<USER_TASK:>
Description:
def submit(cluster_config_file, docker, screen, tmux, stop, start,
cluster_name, port_forward, script, script_args):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
""" |
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
if start:
create_or_update_cluster(cluster_config_file, None, None, False, False,
True, cluster_name)
target = os.path.join("~", os.path.basename(script))
rsync(cluster_config_file, script, target, cluster_name, down=False)
cmd = " ".join(["python", target] + list(script_args))
exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, False,
cluster_name, port_forward) |
<SYSTEM_TASK:>
Build a whole graph for the model.
<END_TASK>
<USER_TASK:>
Description:
def build_graph(self):
"""Build a whole graph for the model.""" |
self.global_step = tf.Variable(0, trainable=False)
self._build_model()
if self.mode == "train":
self._build_train_op()
else:
# Additional initialization for the test network.
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
self.cost)
self.summaries = tf.summary.merge_all() |
<SYSTEM_TASK:>
Forward pass of the multi-agent controller.
<END_TASK>
<USER_TASK:>
Description:
def _mac(model, obs, h):
"""Forward pass of the multi-agent controller.
Arguments:
model: TorchModel class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_size]
""" |
B, n_agents = obs.size(0), obs.size(1)
obs_flat = obs.reshape([B * n_agents, -1])
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, _, _, h_flat = model.forward({"obs": obs_flat}, h_flat)
return q_flat.reshape(
[B, n_agents, -1]), [s.reshape([B, n_agents, -1]) for s in h_flat] |
<SYSTEM_TASK:>
Forward pass of the loss.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, rewards, actions, terminated, mask, obs, action_mask):
"""Forward pass of the loss.
Arguments:
rewards: Tensor of shape [B, T-1, n_agents]
actions: Tensor of shape [B, T-1, n_agents]
terminated: Tensor of shape [B, T-1, n_agents]
mask: Tensor of shape [B, T-1, n_agents]
obs: Tensor of shape [B, T, n_agents, obs_size]
action_mask: Tensor of shape [B, T, n_agents, n_actions]
""" |
B, T = obs.size(0), obs.size(1)
# Calculate estimated Q-Values
mac_out = []
h = [s.expand([B, self.n_agents, -1]) for s in self.model.state_init()]
for t in range(T):
q, h = _mac(self.model, obs[:, t], h)
mac_out.append(q)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken -> [B * n_agents, T-1]
chosen_action_qvals = th.gather(
mac_out[:, :-1], dim=3, index=actions.unsqueeze(3)).squeeze(3)
# Calculate the Q-Values necessary for the target
target_mac_out = []
target_h = [
s.expand([B, self.n_agents, -1])
for s in self.target_model.state_init()
]
for t in range(T):
target_q, target_h = _mac(self.target_model, obs[:, t], target_h)
target_mac_out.append(target_q)
# We don't need the first timesteps Q-Value estimate for targets
target_mac_out = th.stack(
target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[action_mask[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out[action_mask == 0] = -9999999
cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3,
cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
# TODO(ekl) add support for handling global state? This is just
# treating the stacked agent obs as the state.
chosen_action_qvals = self.mixer(chosen_action_qvals, obs[:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, obs[:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error**2).sum() / mask.sum()
return loss, mask, masked_td_error, chosen_action_qvals, targets |
<SYSTEM_TASK:>
Get a named actor which was previously created.
<END_TASK>
<USER_TASK:>
Description:
def get_actor(name):
"""Get a named actor which was previously created.
If the actor doesn't exist, an exception will be raised.
Args:
name: The name of the named actor.
Returns:
The ActorHandle object corresponding to the name.
""" |
actor_name = _calculate_key(name)
pickled_state = _internal_kv_get(actor_name)
if pickled_state is None:
raise ValueError("The actor with name={} doesn't exist".format(name))
handle = pickle.loads(pickled_state)
return handle |
<SYSTEM_TASK:>
Register a named actor under a string key.
<END_TASK>
<USER_TASK:>
Description:
def register_actor(name, actor_handle):
"""Register a named actor under a string key.
Args:
name: The name of the named actor.
actor_handle: The actor object to be associated with this name
""" |
if not isinstance(name, str):
raise TypeError("The name argument must be a string.")
if not isinstance(actor_handle, ray.actor.ActorHandle):
raise TypeError("The actor_handle argument must be an ActorHandle "
"object.")
actor_name = _calculate_key(name)
pickled_state = pickle.dumps(actor_handle)
# Add the actor to Redis if it does not already exist.
already_exists = _internal_kv_put(actor_name, pickled_state)
if already_exists:
# If the registration fails, then erase the new actor handle that
# was added when pickling the actor handle.
actor_handle._ray_new_actor_handles.pop()
raise ValueError(
"Error: the actor with name={} already exists".format(name)) |
<SYSTEM_TASK:>
Make sure all items of config are in schema
<END_TASK>
<USER_TASK:>
Description:
def check_extraneous(config, schema):
"""Make sure all items of config are in schema""" |
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
for k in config:
if k not in schema:
raise ValueError("Unexpected config key `{}` not in {}".format(
k, list(schema.keys())))
v, kreq = schema[k]
if v is None:
continue
elif isinstance(v, type):
if not isinstance(config[k], v):
if v is str and isinstance(config[k], string_types):
continue
raise ValueError(
"Config key `{}` has wrong type {}, expected {}".format(
k,
type(config[k]).__name__, v.__name__))
else:
check_extraneous(config[k], v) |
<SYSTEM_TASK:>
Required Dicts indicate that no extra fields can be introduced.
<END_TASK>
<USER_TASK:>
Description:
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA):
"""Required Dicts indicate that no extra fields can be introduced.""" |
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
check_required(config, schema)
check_extraneous(config, schema) |
<SYSTEM_TASK:>
Update the settings according to the keyword arguments.
<END_TASK>
<USER_TASK:>
Description:
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
""" |
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update: %s" % arg)
self._check_usage() |
<SYSTEM_TASK:>
Update the settings when the target fields are None.
<END_TASK>
<USER_TASK:>
Description:
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
""" |
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage() |
<SYSTEM_TASK:>
Deterministically compute an actor handle ID.
<END_TASK>
<USER_TASK:>
Description:
def compute_actor_handle_id(actor_handle_id, num_forks):
"""Deterministically compute an actor handle ID.
A new actor handle ID is generated when it is forked from another actor
handle. The new handle ID is computed as hash(old_handle_id || num_forks).
Args:
actor_handle_id (common.ObjectID): The original actor handle ID.
num_forks: The number of times the original actor handle has been
forked so far.
Returns:
An ID for the new actor handle.
""" |
assert isinstance(actor_handle_id, ActorHandleID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(str(num_forks).encode("ascii"))
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id) |
<SYSTEM_TASK:>
Deterministically compute an actor handle ID in the non-forked case.
<END_TASK>
<USER_TASK:>
Description:
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id):
"""Deterministically compute an actor handle ID in the non-forked case.
This code path is used whenever an actor handle is pickled and unpickled
(for example, if a remote function closes over an actor handle). Then,
whenever the actor handle is used, a new actor handle ID will be generated
on the fly as a deterministic function of the actor ID, the previous actor
handle ID and the current task ID.
TODO(rkn): It may be possible to cause problems by closing over multiple
actor handles in a remote function, which then get unpickled and give rise
to the same actor handle IDs.
Args:
actor_handle_id: The original actor handle ID.
current_task_id: The ID of the task that is unpickling the handle.
Returns:
An ID for the new actor handle.
""" |
assert isinstance(actor_handle_id, ActorHandleID)
assert isinstance(current_task_id, TaskID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(current_task_id.binary())
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id) |
<SYSTEM_TASK:>
Annotate an actor method.
<END_TASK>
<USER_TASK:>
Description:
def method(*args, **kwargs):
"""Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
""" |
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method |
<SYSTEM_TASK:>
Intentionally exit the current actor.
<END_TASK>
<USER_TASK:>
Description:
def exit_actor():
"""Intentionally exit the current actor.
This function is used to disconnect an actor and exit the worker.
Raises:
Exception: An exception is raised if this is a driver or this
worker is not an actor.
""" |
worker = ray.worker.global_worker
if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil():
# Disconnect the worker from the raylet. The point of
# this is so that when the worker kills itself below, the
# raylet won't push an error message to the driver.
worker.raylet_client.disconnect()
ray.disconnect()
# Disconnect global state from GCS.
ray.global_state.disconnect()
sys.exit(0)
assert False, "This process should have terminated."
else:
raise Exception("exit_actor called on a non-actor worker.") |
<SYSTEM_TASK:>
Get the available checkpoints for the given actor ID, return a list
<END_TASK>
<USER_TASK:>
Description:
def get_checkpoints_for_actor(actor_id):
"""Get the available checkpoints for the given actor ID, return a list
sorted by checkpoint timestamp in descending order.
""" |
checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id)
if checkpoint_info is None:
return []
checkpoints = [
Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in
zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"])
]
return sorted(
checkpoints,
key=lambda checkpoint: checkpoint.timestamp,
reverse=True,
) |
<SYSTEM_TASK:>
Method execution stub for an actor handle.
<END_TASK>
<USER_TASK:>
Description:
def _actor_method_call(self,
method_name,
args=None,
kwargs=None,
num_return_vals=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
num_return_vals (int): The number of return values for the method.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
""" |
worker = ray.worker.get_global_worker()
worker.check_connected()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in LOCAL_MODE
# Copy args to prevent the function from mutating them.
if worker.mode == ray.LOCAL_MODE:
return getattr(worker.actors[self._ray_actor_id],
method_name)(*copy.deepcopy(args))
function_descriptor = FunctionDescriptor(
self._ray_module_name, method_name, self._ray_class_name)
with self._ray_actor_lock:
object_ids = worker.submit_task(
function_descriptor,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
actor_creation_dummy_object_id=(
self._ray_actor_creation_dummy_object_id),
execution_dependencies=[self._ray_actor_cursor],
new_actor_handles=self._ray_new_actor_handles,
# We add one for the dummy return ID.
num_return_vals=num_return_vals + 1,
resources={"CPU": self._ray_actor_method_cpus},
placement_resources={},
driver_id=self._ray_actor_driver_id,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
self._ray_actor_cursor = object_ids.pop()
# We have notified the backend of the new actor handles to expect
# since the last task was submitted, so clear the list.
self._ray_new_actor_handles = []
if len(object_ids) == 1:
object_ids = object_ids[0]
elif len(object_ids) == 0:
object_ids = None
return object_ids |
<SYSTEM_TASK:>
Run a single step of SGD.
<END_TASK>
<USER_TASK:>
Description:
def optimize(self, sess, batch_index):
"""Run a single step of SGD.
Runs a SGD step over a slice of the preloaded batch with size given by
self._loaded_per_device_batch_size and offset given by the batch_index
argument.
Updates shared model weights based on the averaged per-device
gradients.
Args:
sess: TensorFlow session.
batch_index: Offset into the preloaded data. This value must be
between `0` and `tuples_per_device`. The amount of data to
process is at most `max_per_device_batch_size`.
Returns:
The outputs of extra_ops evaluated over the batch.
""" |
feed_dict = {
self._batch_index: batch_index,
self._per_device_batch_size: self._loaded_per_device_batch_size,
self._max_seq_len: self._loaded_max_seq_len,
}
for tower in self._towers:
feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())
fetches = {"train": self._train_op}
for tower in self._towers:
fetches.update(tower.loss_graph.extra_compute_grad_fetches())
return sess.run(fetches, feed_dict=feed_dict) |
<SYSTEM_TASK:>
Perform selection action to candidates.
<END_TASK>
<USER_TASK:>
Description:
def _selection(candidate):
"""Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _selection([gene1, gene2])
>>> # new_gene could be gene1 overwritten with the
>>> # 2nd parameter of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[0]
Returns:
New gene (encoding)
""" |
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
select_index = np.random.choice(len(sample_1))
logger.info(
LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s",
sample_index2, sample_index1, select_index)
next_gen = []
for i in range(len(sample_1)):
if i is select_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen |
<SYSTEM_TASK:>
Perform crossover action to candidates.
<END_TASK>
<USER_TASK:>
Description:
def _crossover(candidate):
"""Perform crossover action to candidates.
For example, new gene = 60% sample_1 + 40% sample_2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _crossover([gene1, gene2])
>>> # new_gene could be the first [n=1] parameters of
>>> # gene1 + the rest of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[1]
Returns:
New gene (encoding)
""" |
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7))
logger.info(
LOGGING_PREFIX +
"Perform crossover between %sth and %sth at index=%s",
sample_index1, sample_index2, cross_index)
next_gen = []
for i in range(len(sample_1)):
if i > cross_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen |
<SYSTEM_TASK:>
Perform mutation action to candidates.
<END_TASK>
<USER_TASK:>
Description:
def _mutation(candidate, rate=0.1):
"""Perform mutation action to candidates.
For example, randomly change 10% of original sample
Args:
candidate: List of candidate genes (encodings).
rate: Percentage of mutation bits
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> new_gene = _mutation([gene1])
>>> # new_gene could be the gene1 with the 3rd parameter changed
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene1[1]
>>> # new_gene[2] = [0, 1] != gene1[2]
Returns:
New gene (encoding)
""" |
sample_index = np.random.choice(len(candidate))
sample = candidate[sample_index]
idx_list = []
for i in range(int(max(len(sample) * rate, 1))):
idx = np.random.choice(len(sample))
idx_list.append(idx)
field = sample[idx] # one-hot encoding
field[np.argmax(field)] = 0
bit = np.random.choice(field.shape[0])
field[bit] = 1
logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s",
sample_index, str(idx_list))
return sample |
<SYSTEM_TASK:>
Start one iteration of training and save remote id.
<END_TASK>
<USER_TASK:>
Description:
def _train(self, trial):
"""Start one iteration of training and save remote id.""" |
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial |
<SYSTEM_TASK:>
Starts trial and restores last result if trial was paused.
<END_TASK>
<USER_TASK:>
Description:
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
""" |
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial) |
<SYSTEM_TASK:>
Stops this trial.
<END_TASK>
<USER_TASK:>
Description:
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
""" |
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None |
<SYSTEM_TASK:>
Starts the trial.
<END_TASK>
<USER_TASK:>
Description:
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
""" |
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg) |
<SYSTEM_TASK:>
Fetches one result of the running trials.
<END_TASK>
<USER_TASK:>
Description:
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run.""" |
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result |
<SYSTEM_TASK:>
Returns whether this runner has at least the specified resources.
<END_TASK>
<USER_TASK:>
Description:
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
""" |
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False |
<SYSTEM_TASK:>
Returns a string describing the total resources available.
<END_TASK>
<USER_TASK:>
Description:
def resource_string(self):
"""Returns a string describing the total resources available.""" |
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs" |
<SYSTEM_TASK:>
Saves the trial's state to a checkpoint.
<END_TASK>
<USER_TASK:>
Description:
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint.""" |
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
# Keeps only highest performing checkpoints if enabled
if trial.keep_checkpoints_num:
try:
last_attr_val = trial.last_result[
trial.checkpoint_score_attr]
if (trial.compare_checkpoints(last_attr_val)
and not math.isnan(last_attr_val)):
trial.best_checkpoint_attr_value = last_attr_val
self._checkpoint_and_erase(trial)
except KeyError:
logger.warning(
"Result dict has no key: {}. keep"
"_checkpoints_num flag will not work".format(
trial.checkpoint_score_attr))
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(
trial.runner.save.remote())
return trial._checkpoint.value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.