text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Construct the inputs to the attack graph to be used by generate_np.
<END_TASK>
<USER_TASK:>
Description:
def construct_variables(self, kwargs):
"""
Construct the inputs to the attack graph to be used by generate_np.
:param kwargs: Keyword arguments to generate_np.
:return:
Structural arguments
Feedable arguments
Output of `arg_type` describing feedable arguments
A unique key
""" |
if isinstance(self.feedable_kwargs, dict):
warnings.warn("Using a dict for `feedable_kwargs is deprecated."
"Switch to using a tuple."
"It is not longer necessary to specify the types "
"of the arguments---we build a different graph "
"for each received type."
"Using a dict may become an error on or after "
"2019-04-18.")
feedable_names = tuple(sorted(self.feedable_kwargs.keys()))
else:
feedable_names = self.feedable_kwargs
if not isinstance(feedable_names, tuple):
raise TypeError("Attack.feedable_kwargs should be a tuple, but "
"for subclass " + str(type(self)) + " it is "
+ str(self.feedable_kwargs) + " of type "
+ str(type(self.feedable_kwargs)))
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict(
(k, v) for k, v in kwargs.items() if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = {k: v for k, v in kwargs.items() if k in feedable_names}
for k in feedable:
if isinstance(feedable[k], (float, int)):
feedable[k] = np.array(feedable[k])
for key in kwargs:
if key not in fixed and key not in feedable:
raise ValueError(str(type(self)) + ": Undeclared argument: " + key)
feed_arg_type = arg_type(feedable_names, feedable)
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items())) + tuple([feed_arg_type])
return fixed, feedable, feed_arg_type, hash_key |
<SYSTEM_TASK:>
Creates the symbolic graph of an adversarial example given the name of
<END_TASK>
<USER_TASK:>
Description:
def create_adv_by_name(model, x, attack_type, sess, dataset, y=None, **kwargs):
"""
Creates the symbolic graph of an adversarial example given the name of
an attack. Simplifies creating the symbolic graph of an attack by defining
dataset-specific parameters.
Dataset-specific default parameters are used unless a different value is
given in kwargs.
:param model: an object of Model class
:param x: Symbolic input to the attack.
:param attack_type: A string that is the name of an attack.
:param sess: Tensorflow session.
:param dataset: The name of the dataset as a string to use for default
params.
:param y: (optional) a symbolic variable for the labels.
:param kwargs: (optional) additional parameters to be passed to the attack.
""" |
# TODO: black box attacks
attack_names = {'FGSM': FastGradientMethod,
'MadryEtAl': MadryEtAl,
'MadryEtAl_y': MadryEtAl,
'MadryEtAl_multigpu': MadryEtAlMultiGPU,
'MadryEtAl_y_multigpu': MadryEtAlMultiGPU
}
if attack_type not in attack_names:
raise Exception('Attack %s not defined.' % attack_type)
attack_params_shared = {
'mnist': {'eps': .3, 'eps_iter': 0.01, 'clip_min': 0., 'clip_max': 1.,
'nb_iter': 40},
'cifar10': {'eps': 8./255, 'eps_iter': 0.01, 'clip_min': 0.,
'clip_max': 1., 'nb_iter': 20}
}
with tf.variable_scope(attack_type):
attack_class = attack_names[attack_type]
attack = attack_class(model, sess=sess)
# Extract feedable and structural keyword arguments from kwargs
fd_kwargs = attack.feedable_kwargs.keys() + attack.structural_kwargs
params = attack_params_shared[dataset].copy()
params.update({k: v for k, v in kwargs.items() if v is not None})
params = {k: v for k, v in params.items() if k in fd_kwargs}
if '_y' in attack_type:
params['y'] = y
logging.info(params)
adv_x = attack.generate(x, **params)
return adv_x |
<SYSTEM_TASK:>
Log values to standard output and Tensorflow summary.
<END_TASK>
<USER_TASK:>
Description:
def log_value(self, tag, val, desc=''):
"""
Log values to standard output and Tensorflow summary.
:param tag: summary tag.
:param val: (required float or numpy array) value to be logged.
:param desc: (optional) additional description to be printed.
""" |
logging.info('%s (%s): %.4f' % (desc, tag, val))
self.summary.value.add(tag=tag, simple_value=val) |
<SYSTEM_TASK:>
Evaluate the accuracy of the model on adversarial examples
<END_TASK>
<USER_TASK:>
Description:
def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type):
"""
Evaluate the accuracy of the model on adversarial examples
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param preds_adv: symbolic variable for the prediction on an
adversarial example.
:param X_test: NumPy array of test set inputs.
:param Y_test: NumPy array of test set labels.
:param att_type: name of the attack.
""" |
end = (len(X_test) // self.batch_size) * self.batch_size
if self.hparams.fast_tests:
end = 10*self.batch_size
acc = model_eval(self.sess, x, y, preds_adv, X_test[:end],
Y_test[:end], args=self.eval_params)
self.log_value('test_accuracy_%s' % att_type, acc,
'Test accuracy on adversarial examples')
return acc |
<SYSTEM_TASK:>
Run the evaluation on multiple attacks.
<END_TASK>
<USER_TASK:>
Description:
def eval_multi(self, inc_epoch=True):
"""
Run the evaluation on multiple attacks.
""" |
sess = self.sess
preds = self.preds
x = self.x_pre
y = self.y
X_train = self.X_train
Y_train = self.Y_train
X_test = self.X_test
Y_test = self.Y_test
writer = self.writer
self.summary = tf.Summary()
report = {}
# Evaluate on train set
subsample_factor = 100
X_train_subsampled = X_train[::subsample_factor]
Y_train_subsampled = Y_train[::subsample_factor]
acc_train = model_eval(sess, x, y, preds, X_train_subsampled,
Y_train_subsampled, args=self.eval_params)
self.log_value('train_accuracy_subsampled', acc_train,
'Clean accuracy, subsampled train')
report['train'] = acc_train
# Evaluate on the test set
acc = model_eval(sess, x, y, preds, X_test, Y_test,
args=self.eval_params)
self.log_value('test_accuracy_natural', acc,
'Clean accuracy, natural test')
report['test'] = acc
# Evaluate against adversarial attacks
if self.epoch % self.hparams.eval_iters == 0:
for att_type in self.attack_type_test:
_, preds_adv = self.attacks[att_type]
acc = self.eval_advs(x, y, preds_adv, X_test, Y_test, att_type)
report[att_type] = acc
if self.writer:
writer.add_summary(self.summary, self.epoch)
# Add examples of adversarial examples to the summary
if self.writer and self.epoch % 20 == 0 and self.sum_op is not None:
sm_val = self.sess.run(self.sum_op,
feed_dict={x: X_test[:self.batch_size],
y: Y_test[:self.batch_size]})
if self.writer:
writer.add_summary(sm_val)
self.epoch += 1 if inc_epoch else 0
return report |
<SYSTEM_TASK:>
Wraps a callable `f` in a function that warns that the function is deprecated.
<END_TASK>
<USER_TASK:>
Description:
def _wrap(f):
"""
Wraps a callable `f` in a function that warns that the function is deprecated.
""" |
def wrapper(*args, **kwargs):
"""
Issues a deprecation warning and passes through the arguments.
"""
warnings.warn(str(f) + " is deprecated. Switch to calling the equivalent function in tensorflow. "
" This function was originally needed as a compatibility layer for old versions of tensorflow, "
" but support for those versions has now been dropped.")
return f(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
<END_TASK>
<USER_TASK:>
Description:
def softmax_cross_entropy_with_logits(sentinel=None,
labels=None,
logits=None,
dim=-1):
"""
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
deprecated warning
""" |
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
name = "softmax_cross_entropy_with_logits"
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
try:
f = tf.nn.softmax_cross_entropy_with_logits_v2
except AttributeError:
raise RuntimeError("This version of TensorFlow is no longer supported. See cleverhans/README.md")
labels = tf.stop_gradient(labels)
loss = f(labels=labels, logits=logits, dim=dim)
return loss |
<SYSTEM_TASK:>
Enforces size of perturbation on images, and compute hashes for all images.
<END_TASK>
<USER_TASK:>
Description:
def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir,
epsilon):
"""Enforces size of perturbation on images, and compute hashes for all images.
Args:
dataset_batch_dir: directory with the images of specific dataset batch
adv_dir: directory with generated adversarial images
output_dir: directory where to copy result
epsilon: size of perturbation
Returns:
dictionary with mapping form image ID to hash.
""" |
dataset_images = [f for f in os.listdir(dataset_batch_dir)
if f.endswith('.png')]
image_hashes = {}
resize_warning = False
for img_name in dataset_images:
if not os.path.exists(os.path.join(adv_dir, img_name)):
logging.warning('Image %s not found in the output', img_name)
continue
image = np.array(
Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB'))
image = image.astype('int32')
image_max_clip = np.clip(image + epsilon, 0, 255).astype('uint8')
image_min_clip = np.clip(image - epsilon, 0, 255).astype('uint8')
# load and resize adversarial image if needed
adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB')
# Image.size is reversed compared to np.array.shape
if adv_image.size[::-1] != image.shape[:2]:
resize_warning = True
adv_image = adv_image.resize((image.shape[1], image.shape[0]),
Image.BICUBIC)
adv_image = np.array(adv_image)
clipped_adv_image = np.clip(adv_image,
image_min_clip,
image_max_clip)
Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name))
# compute hash
image_hashes[img_name[:-4]] = hashlib.sha1(
clipped_adv_image.view(np.uint8)).hexdigest()
if resize_warning:
logging.warning('One or more adversarial images had incorrect size')
return image_hashes |
<SYSTEM_TASK:>
Downloads dataset, organize it by batches and rename images.
<END_TASK>
<USER_TASK:>
Description:
def download_dataset(storage_client, image_batches, target_dir,
local_dataset_copy=None):
"""Downloads dataset, organize it by batches and rename images.
Args:
storage_client: instance of the CompetitionStorageClient
image_batches: subclass of ImageBatchesBase with data about images
target_dir: target directory, should exist and be empty
local_dataset_copy: directory with local dataset copy, if local copy is
available then images will be takes from there instead of Cloud Storage
Data in the target directory will be organized into subdirectories by batches,
thus path to each image will be "target_dir/BATCH_ID/IMAGE_ID.png"
where BATCH_ID - ID of the batch (key of image_batches.data),
IMAGE_ID - ID of the image (key of image_batches.data[batch_id]['images'])
""" |
for batch_id, batch_value in iteritems(image_batches.data):
batch_dir = os.path.join(target_dir, batch_id)
os.mkdir(batch_dir)
for image_id, image_val in iteritems(batch_value['images']):
dst_filename = os.path.join(batch_dir, image_id + '.png')
# try to use local copy first
if local_dataset_copy:
local_filename = os.path.join(local_dataset_copy,
os.path.basename(image_val['image_path']))
if os.path.exists(local_filename):
shutil.copyfile(local_filename, dst_filename)
continue
# download image from cloud
cloud_path = ('gs://' + storage_client.bucket_name
+ '/' + image_val['image_path'])
if not os.path.exists(dst_filename):
subprocess.call(['gsutil', 'cp', cloud_path, dst_filename]) |
<SYSTEM_TASK:>
Saves file with target class for given dataset batch.
<END_TASK>
<USER_TASK:>
Description:
def save_target_classes_for_batch(self,
filename,
image_batches,
batch_id):
"""Saves file with target class for given dataset batch.
Args:
filename: output filename
image_batches: instance of ImageBatchesBase with dataset batches
batch_id: dataset batch ID
""" |
images = image_batches.data[batch_id]['images']
with open(filename, 'w') as f:
for image_id, image_val in iteritems(images):
target_class = self.get_target_class(image_val['dataset_image_id'])
f.write('{0}.png,{1}\n'.format(image_id, target_class)) |
<SYSTEM_TASK:>
Function for min eigen vector using tf's full eigen decomposition.
<END_TASK>
<USER_TASK:>
Description:
def tf_min_eig_vec(self):
"""Function for min eigen vector using tf's full eigen decomposition.""" |
# Full eigen decomposition requires the explicit psd matrix M
_, matrix_m = self.dual_object.get_full_psd_matrix()
[eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
index = tf.argmin(eig_vals)
return tf.reshape(
eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1]) |
<SYSTEM_TASK:>
Function that returns smoothed version of min eigen vector.
<END_TASK>
<USER_TASK:>
Description:
def tf_smooth_eig_vec(self):
"""Function that returns smoothed version of min eigen vector.""" |
_, matrix_m = self.dual_object.get_full_psd_matrix()
# Easier to think in terms of max so negating the matrix
[eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
scaling_factor = tf.reduce_sum(exp_eig_vals)
# Multiplying each eig vector by exponential of corresponding eig value
# Scaling factor normalizes the vector to be unit norm
eig_vec_smooth = tf.divide(
tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
tf.sqrt(scaling_factor))
return tf.reshape(
tf.reduce_sum(eig_vec_smooth, axis=1),
shape=[eig_vec_smooth.shape[0].value, 1]) |
<SYSTEM_TASK:>
Computes the min eigen value and corresponding vector of matrix M.
<END_TASK>
<USER_TASK:>
Description:
def get_min_eig_vec_proxy(self, use_tf_eig=False):
"""Computes the min eigen value and corresponding vector of matrix M.
Args:
use_tf_eig: Whether to use tf's default full eigen decomposition
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
""" |
if use_tf_eig:
# If smoothness parameter is too small, essentially no smoothing
# Just output the eigen vector corresponding to min
return tf.cond(self.smooth_placeholder < 1E-8,
self.tf_min_eig_vec,
self.tf_smooth_eig_vec)
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)
def _vector_prod_fn(x):
return self.dual_object.get_psd_product(x)
estimated_eigen_vector = min_eigen_tf(
x=self.eig_init_vec_placeholder,
num_steps=self.eig_num_iter_placeholder,
learning_rate=self.params['eig_learning_rate'],
vector_prod_fn=_vector_prod_fn)
return estimated_eigen_vector |
<SYSTEM_TASK:>
Computes scipy estimate of min eigenvalue for matrix M.
<END_TASK>
<USER_TASK:>
Description:
def get_scipy_eig_vec(self):
"""Computes scipy estimate of min eigenvalue for matrix M.
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
""" |
if not self.params['has_conv']:
matrix_m = self.sess.run(self.dual_object.matrix_m)
min_eig_vec_val, estimated_eigen_vector = eigs(matrix_m, k=1, which='SR',
tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
else:
dim = self.dual_object.matrix_m_dimension
input_vector = tf.placeholder(tf.float32, shape=(dim, 1))
output_vector = self.dual_object.get_psd_product(input_vector)
def np_vector_prod_fn(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
output_np_vector = self.sess.run(output_vector, feed_dict={input_vector:np_vector})
return output_np_vector
linear_operator = LinearOperator((dim, dim), matvec=np_vector_prod_fn)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_vec_val, estimated_eigen_vector = eigs(linear_operator,
k=1, which='SR', tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val |
<SYSTEM_TASK:>
Run one step of gradient descent for optimization.
<END_TASK>
<USER_TASK:>
Description:
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,
penalty_val, learning_rate_val):
"""Run one step of gradient descent for optimization.
Args:
eig_init_vec_val: Start value for eigen value computations
eig_num_iter_val: Number of iterations to run for eigen computations
smooth_val: Value of smoothness parameter
penalty_val: Value of penalty for the current step
learning_rate_val: Value of learning rate
Returns:
found_cert: True is negative certificate is found, False otherwise
""" |
# Running step
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,
self.eig_num_iter_placeholder: eig_num_iter_val,
self.smooth_placeholder: smooth_val,
self.penalty_placeholder: penalty_val,
self.learning_rate: learning_rate_val}
if self.params['eig_type'] == 'SCIPY':
current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()
step_feed_dict.update({
self.eig_vec_estimate: current_eig_vector
})
elif self.params['eig_type'] == 'LZS':
step_feed_dict.update({
self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate
})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[
_, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate
] = self.sess.run([
self.proj_step,
self.eig_vec_estimate,
self.eig_val_estimate
], feed_dict=step_feed_dict)
if self.current_step % self.params['print_stats_steps'] == 0:
[self.current_total_objective, self.current_unconstrained_objective,
self.dual_object.m_min_vec_estimate,
self.current_eig_val_estimate,
self.current_nu] = self.sess.run(
[self.total_objective,
self.dual_object.unconstrained_objective,
self.eig_vec_estimate,
self.eig_val_estimate,
self.dual_object.nu], feed_dict=step_feed_dict)
stats = {
'total_objective':
float(self.current_total_objective),
'unconstrained_objective':
float(self.current_unconstrained_objective),
'min_eig_val_estimate':
float(self.current_eig_val_estimate)
}
tf.logging.info('Current inner step: %d, optimization stats: %s',
self.current_step, stats)
if self.params['stats_folder'] is not None:
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'],
str(self.current_step) + '.json')
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
# Project onto feasible set of dual variables
if self.current_step % self.params['projection_steps'] == 0 and self.current_unconstrained_objective < 0:
nu = self.sess.run(self.dual_object.nu)
dual_feed_dict = {
self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate
}
_, min_eig_val_h_lz = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict)
projected_dual_feed_dict = {
self.dual_object.projected_dual.nu: nu,
self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz
}
if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict):
return True
return False |
<SYSTEM_TASK:>
Run the optimization, call run_one_step with suitable placeholders.
<END_TASK>
<USER_TASK:>
Description:
def run_optimization(self):
"""Run the optimization, call run_one_step with suitable placeholders.
Returns:
True if certificate is found
False otherwise
""" |
penalty_val = self.params['init_penalty']
# Don't use smoothing initially - very inaccurate for large dimension
self.smooth_on = False
smooth_val = 0
learning_rate_val = self.params['init_learning_rate']
self.current_outer_step = 1
while self.current_outer_step <= self.params['outer_num_steps']:
tf.logging.info('Running outer step %d with penalty %f',
self.current_outer_step, penalty_val)
# Running inner loop of optimization with current_smooth_val,
# current_penalty as smoothness parameters and penalty respectively
self.current_step = 0
# Run first step with random eig initialization and large number of steps
found_cert = self.run_one_step(
self.dual_object.m_min_vec_estimate,
self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val)
if found_cert:
return True
while self.current_step < self.params['inner_num_steps']:
self.current_step = self.current_step + 1
found_cert = self.run_one_step(self.dual_object.m_min_vec_estimate,
self.params['small_eig_num_steps'],
smooth_val, penalty_val,
learning_rate_val)
if found_cert:
return True
# Update penalty only if it looks like current objective is optimizes
if self.current_total_objective < UPDATE_PARAM_CONSTANT:
penalty_val = penalty_val * self.params['beta']
learning_rate_val = learning_rate_val*self.params['learning_rate_decay']
else:
# To get more accurate gradient estimate
self.params['small_eig_num_steps'] = (
1.5 * self.params['small_eig_num_steps'])
# If eigen values seem small enough, turn on smoothing
# useful only when performing full eigen decomposition
if np.abs(self.current_eig_val_estimate) < 0.01:
smooth_val = self.params['smoothness_parameter']
self.current_outer_step = self.current_outer_step + 1
return False |
<SYSTEM_TASK:>
PyTorch implementation of the clip_eta in utils_tf.
<END_TASK>
<USER_TASK:>
Description:
def clip_eta(eta, ord, eps):
"""
PyTorch implementation of the clip_eta in utils_tf.
:param eta: Tensor
:param ord: np.inf, 1, or 2
:param eps: float
""" |
if ord not in [np.inf, 1, 2]:
raise ValueError('ord must be np.inf, 1, or 2.')
avoid_zero_div = torch.tensor(1e-12, dtype=eta.dtype, device=eta.device)
reduc_ind = list(range(1, len(eta.size())))
if ord == np.inf:
eta = torch.clamp(eta, -eps, eps)
else:
if ord == 1:
# TODO
# raise NotImplementedError("L1 clip is not implemented.")
norm = torch.max(
avoid_zero_div,
torch.sum(torch.abs(eta), dim=reduc_ind, keepdim=True)
)
elif ord == 2:
norm = torch.sqrt(torch.max(
avoid_zero_div,
torch.sum(eta ** 2, dim=reduc_ind, keepdim=True)
))
factor = torch.min(
torch.tensor(1., dtype=eta.dtype, device=eta.device),
eps / norm
)
eta *= factor
return eta |
<SYSTEM_TASK:>
Perform the EAD attack on the given instance for the given targets.
<END_TASK>
<USER_TASK:>
Description:
def attack(self, imgs, targets):
"""
Perform the EAD attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
""" |
batch_size = self.batch_size
r = []
for i in range(0, len(imgs) // batch_size):
_logger.debug(
("Running EAD attack on instance %s of %s",
i * batch_size, len(imgs)))
r.extend(
self.attack_batch(
imgs[i * batch_size:(i + 1) * batch_size],
targets[i * batch_size:(i + 1) * batch_size]))
if len(imgs) % batch_size != 0:
last_elements = len(imgs) - (len(imgs) % batch_size)
_logger.debug(
("Running EAD attack on instance %s of %s",
last_elements, len(imgs)))
temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:])
temp_targets = np.zeros((batch_size, ) + targets.shape[2:])
temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:]
temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:]
temp_data = self.attack_batch(temp_imgs, temp_targets)
r.extend(temp_data[:(len(imgs) % batch_size)],
targets[last_elements:])
return np.array(r) |
<SYSTEM_TASK:>
This method creates a symoblic graph of the MadryEtAl attack on
<END_TASK>
<USER_TASK:>
Description:
def attack(self, x, y_p, **kwargs):
"""
This method creates a symoblic graph of the MadryEtAl attack on
multiple GPUs. The graph is created on the first n GPUs.
Stop gradient is needed to get the speed-up. This prevents us from
being able to back-prop through the attack.
:param x: A tensor with the input image.
:param y_p: Ground truth label or predicted label.
:return: Two lists containing the input and output tensors of each GPU.
""" |
inputs = []
outputs = []
# Create the initial random perturbation
device_name = '/gpu:0'
self.model.set_device(device_name)
with tf.device(device_name):
with tf.variable_scope('init_rand'):
if self.rand_init:
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
eta = clip_eta(eta, self.ord, self.eps)
eta = tf.stop_gradient(eta)
else:
eta = tf.zeros_like(x)
# TODO: Break the graph only nGPU times instead of nb_iter times.
# The current implementation by the time an adversarial example is
# used for training, the weights of the model have changed nb_iter
# times. This can cause slower convergence compared to the single GPU
# adversarial training.
for i in range(self.nb_iter):
# Create the graph for i'th step of attack
inputs += [OrderedDict()]
outputs += [OrderedDict()]
device_name = x.device
self.model.set_device(device_name)
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
if i > 0:
# Clone the variables to separate the graph of 2 GPUs
x = clone_variable('x', x)
y_p = clone_variable('y_p', y_p)
eta = clone_variable('eta', eta)
inputs[i]['x'] = x
inputs[i]['y_p'] = y_p
outputs[i]['x'] = x
outputs[i]['y_p'] = y_p
inputs[i]['eta'] = eta
eta = self.attack_single_step(x, eta, y_p)
if i < self.nb_iter-1:
outputs[i]['eta'] = eta
else:
# adv_x, not eta is the output of the last step
adv_x = x + eta
if (self.clip_min is not None and self.clip_max is not None):
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
adv_x = tf.stop_gradient(adv_x, name='adv_x')
outputs[i]['adv_x'] = adv_x
return inputs, outputs |
<SYSTEM_TASK:>
Facilitates testing this attack.
<END_TASK>
<USER_TASK:>
Description:
def generate_np(self, x_val, **kwargs):
"""
Facilitates testing this attack.
""" |
_, feedable, _feedable_types, hash_key = self.construct_variables(kwargs)
if hash_key not in self.graphs:
with tf.variable_scope(None, 'attack_%d' % len(self.graphs)):
# x is a special placeholder we always want to have
with tf.device('/gpu:0'):
x = tf.placeholder(tf.float32, shape=x_val.shape, name='x')
inputs, outputs = self.generate(x, **kwargs)
from runner import RunnerMultiGPU
runner = RunnerMultiGPU(inputs, outputs, sess=self.sess)
self.graphs[hash_key] = runner
runner = self.graphs[hash_key]
feed_dict = {'x': x_val}
for name in feedable:
feed_dict[name] = feedable[name]
fvals = runner.run(feed_dict)
while not runner.is_finished():
fvals = runner.run()
return fvals['adv_x'] |
<SYSTEM_TASK:>
A helper function that computes a tensor on numpy inputs by batches.
<END_TASK>
<USER_TASK:>
Description:
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, batch_size=None,
feed=None,
args=None):
"""
A helper function that computes a tensor on numpy inputs by batches.
This version uses exactly the tensorflow graph constructed by the
caller, so the caller can place specific ops on specific devices
to implement model parallelism.
Most users probably prefer `batch_eval_multi_worker` which maps
a single-device expression to multiple devices in order to evaluate
faster by parallelizing across data.
:param sess: tf Session to use
:param tf_inputs: list of tf Placeholders to feed from the dataset
:param tf_outputs: list of tf tensors to calculate
:param numpy_inputs: list of numpy arrays defining the dataset
:param batch_size: int, batch size to use for evaluation
If not specified, this function will try to guess the batch size,
but might get an out of memory error or run the model with an
unsupported batch size, etc.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Deprecated and included only for backwards compatibility.
Should contain `batch_size`
""" |
if args is not None:
warnings.warn("`args` is deprecated and will be removed on or "
"after 2019-03-09. Pass `batch_size` directly.")
if "batch_size" in args:
assert batch_size is None
batch_size = args["batch_size"]
if batch_size is None:
batch_size = DEFAULT_EXAMPLES_PER_DEVICE
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
for start in range(0, m, batch_size):
batch = start // batch_size
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Compute batch start and end indices
start = batch * batch_size
end = start + batch_size
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
if feed is not None:
feed_dict.update(feed)
numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out |
<SYSTEM_TASK:>
Makes sure a `y` argument is a vliad numpy dataset.
<END_TASK>
<USER_TASK:>
Description:
def _check_y(y):
"""
Makes sure a `y` argument is a vliad numpy dataset.
""" |
if not isinstance(y, np.ndarray):
raise TypeError("y must be numpy array. Typically y contains "
"the entire test set labels. Got " + str(y) + " of type " + str(type(y))) |
<SYSTEM_TASK:>
Creates a preprocessing graph for a batch given a function that processes
<END_TASK>
<USER_TASK:>
Description:
def preprocess_batch(images_batch, preproc_func=None):
"""
Creates a preprocessing graph for a batch given a function that processes
a single image.
:param images_batch: A tensor for an image batch.
:param preproc_func: (optional function) A function that takes in a
tensor and returns a preprocessed input.
""" |
if preproc_func is None:
return images_batch
with tf.variable_scope('preprocess'):
images_list = tf.split(images_batch, int(images_batch.shape[0]))
result_list = []
for img in images_list:
reshaped_img = tf.reshape(img, img.shape[1:])
processed_img = preproc_func(reshaped_img)
result_list.append(tf.expand_dims(processed_img, axis=0))
result_images = tf.concat(result_list, axis=0)
return result_images |
<SYSTEM_TASK:>
Create all Variables to be returned later by get_params.
<END_TASK>
<USER_TASK:>
Description:
def make_params(self):
"""
Create all Variables to be returned later by get_params.
By default this is a no-op.
Models that need their fprop to be called for their params to be
created can set `needs_dummy_fprop=True` in the constructor.
""" |
if self.needs_dummy_fprop:
if hasattr(self, "_dummy_input"):
return
self._dummy_input = self.make_input_placeholder()
self.fprop(self._dummy_input) |
<SYSTEM_TASK:>
Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data.
<END_TASK>
<USER_TASK:>
Description:
def init_lsh(self):
"""
Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data.
""" |
self.query_objects = {
} # contains the object that can be queried to find nearest neighbors at each layer.
# mean of training data representation per layer (that needs to be substracted before LSH).
self.centers = {}
for layer in self.layers:
assert self.nb_tables >= self.neighbors
# Normalize all the lenghts, since we care about the cosine similarity.
self.train_activations_lsh[layer] /= np.linalg.norm(
self.train_activations_lsh[layer], axis=1).reshape(-1, 1)
# Center the dataset and the queries: this improves the performance of LSH quite a bit.
center = np.mean(self.train_activations_lsh[layer], axis=0)
self.train_activations_lsh[layer] -= center
self.centers[layer] = center
# LSH parameters
params_cp = falconn.LSHConstructionParameters()
params_cp.dimension = len(self.train_activations_lsh[layer][1])
params_cp.lsh_family = falconn.LSHFamily.CrossPolytope
params_cp.distance_function = falconn.DistanceFunction.EuclideanSquared
params_cp.l = self.nb_tables
params_cp.num_rotations = 2 # for dense set it to 1; for sparse data set it to 2
params_cp.seed = 5721840
# we want to use all the available threads to set up
params_cp.num_setup_threads = 0
params_cp.storage_hash_table = falconn.StorageHashTable.BitPackedFlatHashTable
# we build 18-bit hashes so that each table has
# 2^18 bins; this is a good choice since 2^18 is of the same
# order of magnitude as the number of data points
falconn.compute_number_of_hash_functions(self.number_bits, params_cp)
print('Constructing the LSH table')
table = falconn.LSHIndex(params_cp)
table.setup(self.train_activations_lsh[layer])
# Parse test feature vectors and find k nearest neighbors
query_object = table.construct_query_object()
query_object.set_num_probes(self.nb_tables)
self.query_objects[layer] = query_object |
<SYSTEM_TASK:>
Given a data_activation dictionary that contains a np array with activations for each layer,
<END_TASK>
<USER_TASK:>
Description:
def find_train_knns(self, data_activations):
"""
Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data.
""" |
knns_ind = {}
knns_labels = {}
for layer in self.layers:
# Pre-process representations of data to normalize and remove training data mean.
data_activations_layer = copy.copy(data_activations[layer])
nb_data = data_activations_layer.shape[0]
data_activations_layer /= np.linalg.norm(
data_activations_layer, axis=1).reshape(-1, 1)
data_activations_layer -= self.centers[layer]
# Use FALCONN to find indices of nearest neighbors in training data.
knns_ind[layer] = np.zeros(
(data_activations_layer.shape[0], self.neighbors), dtype=np.int32)
knn_errors = 0
for i in range(data_activations_layer.shape[0]):
query_res = self.query_objects[layer].find_k_nearest_neighbors(
data_activations_layer[i], self.neighbors)
try:
knns_ind[layer][i, :] = query_res
except: # pylint: disable-msg=W0702
knns_ind[layer][i, :len(query_res)] = query_res
knn_errors += knns_ind[layer].shape[1] - len(query_res)
# Find labels of neighbors found in the training data.
knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32)
for data_id in range(nb_data):
knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]]
return knns_ind, knns_labels |
<SYSTEM_TASK:>
Given an array of nb_data x nb_classes dimensions, use conformal prediction to compute
<END_TASK>
<USER_TASK:>
Description:
def preds_conf_cred(self, knns_not_in_class):
"""
Given an array of nb_data x nb_classes dimensions, use conformal prediction to compute
the DkNN's prediction, confidence and credibility.
""" |
nb_data = knns_not_in_class.shape[0]
preds_knn = np.zeros(nb_data, dtype=np.int32)
confs = np.zeros((nb_data, self.nb_classes), dtype=np.float32)
creds = np.zeros((nb_data, self.nb_classes), dtype=np.float32)
for i in range(nb_data):
# p-value of test input for each class
p_value = np.zeros(self.nb_classes, dtype=np.float32)
for class_id in range(self.nb_classes):
# p-value of (test point, candidate label)
p_value[class_id] = (float(self.nb_cali) - bisect_left(
self.cali_nonconformity, knns_not_in_class[i, class_id])) / float(self.nb_cali)
preds_knn[i] = np.argmax(p_value)
confs[i, preds_knn[i]] = 1. - p_value[np.argsort(p_value)[-2]]
creds[i, preds_knn[i]] = p_value[preds_knn[i]]
return preds_knn, confs, creds |
<SYSTEM_TASK:>
Performs a forward pass through the DkNN on an numpy array of data.
<END_TASK>
<USER_TASK:>
Description:
def fprop_np(self, data_np):
"""
Performs a forward pass through the DkNN on an numpy array of data.
""" |
if not self.calibrated:
raise ValueError(
"DkNN needs to be calibrated by calling DkNNModel.calibrate method once before inferring.")
data_activations = self.get_activations(data_np)
_, knns_labels = self.find_train_knns(data_activations)
knns_not_in_class = self.nonconformity(knns_labels)
_, _, creds = self.preds_conf_cred(knns_not_in_class)
return creds |
<SYSTEM_TASK:>
Performs a forward pass through the DkNN on a TF tensor by wrapping
<END_TASK>
<USER_TASK:>
Description:
def fprop(self, x):
"""
Performs a forward pass through the DkNN on a TF tensor by wrapping
the fprop_np method.
""" |
logits = tf.py_func(self.fprop_np, [x], tf.float32)
return {self.O_LOGITS: logits} |
<SYSTEM_TASK:>
Relu, with optional leaky support.
<END_TASK>
<USER_TASK:>
Description:
def _relu(x, leakiness=0.0):
"""Relu, with optional leaky support.""" |
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') |
<SYSTEM_TASK:>
Computes matrices T and V using the Lanczos algorithm.
<END_TASK>
<USER_TASK:>
Description:
def construct_lanczos_params(self):
"""Computes matrices T and V using the Lanczos algorithm.
Args:
k: number of iterations and dimensionality of the tridiagonal matrix
Returns:
eig_vec: eigen vector corresponding to min eigenvalue
""" |
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval)
def _m_vector_prod_fn(x):
return self.get_psd_product(x, dtype=self.lanczos_dtype)
def _h_vector_prod_fn(x):
return self.get_h_product(x, dtype=self.lanczos_dtype)
# Construct nodes for computing eigenvalue of M
self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64)
zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64)
self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m,
shape=(self.matrix_m_dimension, 1),
name='m_min_vec_ph')
self.m_min_eig, self.m_min_vec = self.min_eigen_vec(_m_vector_prod_fn,
self.matrix_m_dimension,
self.m_min_vec_ph,
self.lzs_params['max_iter'],
dtype=self.lanczos_dtype)
self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype)
self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype)
self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=np.float64)
zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=tf.float64)
self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h,
shape=(self.matrix_m_dimension - 1, 1),
name='h_min_vec_ph')
self.h_min_eig, self.h_min_vec = self.min_eigen_vec(_h_vector_prod_fn,
self.matrix_m_dimension-1,
self.h_min_vec_ph,
self.lzs_params['max_iter'],
dtype=self.lanczos_dtype)
self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype)
self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype) |
<SYSTEM_TASK:>
Function that constructs minimization objective from dual variables.
<END_TASK>
<USER_TASK:>
Description:
def set_differentiable_objective(self):
"""Function that constructs minimization objective from dual variables.""" |
# Checking if graphs are already created
if self.vector_g is not None:
return
# Computing the scalar term
bias_sum = 0
for i in range(0, self.nn_params.num_hidden_layers):
bias_sum = bias_sum + tf.reduce_sum(
tf.multiply(self.nn_params.biases[i], self.lambda_pos[i + 1]))
lu_sum = 0
for i in range(0, self.nn_params.num_hidden_layers + 1):
lu_sum = lu_sum + tf.reduce_sum(
tf.multiply(tf.multiply(self.lower[i], self.upper[i]),
self.lambda_lu[i]))
self.scalar_f = -bias_sum - lu_sum + self.final_constant
# Computing the vector term
g_rows = []
for i in range(0, self.nn_params.num_hidden_layers):
if i > 0:
current_row = (self.lambda_neg[i] + self.lambda_pos[i] -
self.nn_params.forward_pass(self.lambda_pos[i+1],
i, is_transpose=True) +
tf.multiply(self.lower[i]+self.upper[i],
self.lambda_lu[i]) +
tf.multiply(self.lambda_quad[i],
self.nn_params.biases[i-1]))
else:
current_row = (-self.nn_params.forward_pass(self.lambda_pos[i+1],
i, is_transpose=True)
+ tf.multiply(self.lower[i]+self.upper[i],
self.lambda_lu[i]))
g_rows.append(current_row)
# Term for final linear term
g_rows.append((self.lambda_pos[self.nn_params.num_hidden_layers] +
self.lambda_neg[self.nn_params.num_hidden_layers] +
self.final_linear +
tf.multiply((self.lower[self.nn_params.num_hidden_layers]+
self.upper[self.nn_params.num_hidden_layers]),
self.lambda_lu[self.nn_params.num_hidden_layers])
+ tf.multiply(
self.lambda_quad[self.nn_params.num_hidden_layers],
self.nn_params.biases[
self.nn_params.num_hidden_layers-1])))
self.vector_g = tf.concat(g_rows, axis=0)
self.unconstrained_objective = self.scalar_f + 0.5 * self.nu |
<SYSTEM_TASK:>
Function that returns the tf graph corresponding to the entire matrix M.
<END_TASK>
<USER_TASK:>
Description:
def get_full_psd_matrix(self):
"""Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M
""" |
if self.matrix_m is not None:
return self.matrix_h, self.matrix_m
# Computing the matrix term
h_columns = []
for i in range(self.nn_params.num_hidden_layers + 1):
current_col_elems = []
for j in range(i):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
# For the first layer, there is no relu constraint
if i == 0:
current_col_elems.append(utils.diag(self.lambda_lu[i]))
else:
current_col_elems.append(
utils.diag(self.lambda_lu[i] + self.lambda_quad[i]))
if i < self.nn_params.num_hidden_layers:
current_col_elems.append(tf.matmul(
utils.diag(-1 * self.lambda_quad[i + 1]),
self.nn_params.weights[i]))
for j in range(i + 2, self.nn_params.num_hidden_layers + 1):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
current_column = tf.concat(current_col_elems, 0)
h_columns.append(current_column)
self.matrix_h = tf.concat(h_columns, 1)
self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h))
self.matrix_m = tf.concat(
[
tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1),
tf.concat([self.vector_g, self.matrix_h], axis=1)
],
axis=0)
return self.matrix_h, self.matrix_m |
<SYSTEM_TASK:>
Function to compute the certificate based either current value
<END_TASK>
<USER_TASK:>
Description:
def compute_certificate(self, current_step, feed_dictionary):
""" Function to compute the certificate based either current value
or dual variables loaded from dual folder """ |
feed_dict = feed_dictionary.copy()
nu = feed_dict[self.nu]
second_term = self.make_m_psd(nu, feed_dict)
tf.logging.info('Nu after modifying: ' + str(second_term))
feed_dict.update({self.nu: second_term})
computed_certificate = self.sess.run(self.unconstrained_objective, feed_dict=feed_dict)
tf.logging.info('Inner step: %d, current value of certificate: %f',
current_step, computed_certificate)
# Sometimes due to either overflow or instability in inverses,
# the returned certificate is large and negative -- keeping a check
if LOWER_CERT_BOUND < computed_certificate < 0:
_, min_eig_val_m = self.get_lanczos_eig(feed_dict=feed_dict)
tf.logging.info('min eig val from lanczos: ' + str(min_eig_val_m))
input_vector_m = tf.placeholder(tf.float32, shape=(self.matrix_m_dimension, 1))
output_vector_m = self.get_psd_product(input_vector_m)
def np_vector_prod_fn_m(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
feed_dict.update({input_vector_m:np_vector})
output_np_vector = self.sess.run(output_vector_m, feed_dict=feed_dict)
return output_np_vector
linear_operator_m = LinearOperator((self.matrix_m_dimension,
self.matrix_m_dimension),
matvec=np_vector_prod_fn_m)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_val_m_scipy, _ = eigs(linear_operator_m, k=1, which='SR', tol=TOL)
tf.logging.info('min eig val m from scipy: ' + str(min_eig_val_m_scipy))
if min_eig_val_m - TOL > 0:
tf.logging.info('Found certificate of robustness!')
return True
return False |
<SYSTEM_TASK:>
Returns extraction command based on the filename extension.
<END_TASK>
<USER_TASK:>
Description:
def get_extract_command_template(filename):
"""Returns extraction command based on the filename extension.""" |
for k, v in iteritems(EXTRACT_COMMAND):
if filename.endswith(k):
return v
return None |
<SYSTEM_TASK:>
Calls shell command with parameter substitution.
<END_TASK>
<USER_TASK:>
Description:
def shell_call(command, **kwargs):
"""Calls shell command with parameter substitution.
Args:
command: command to run as a list of tokens
**kwargs: dirctionary with substitutions
Returns:
whether command was successful, i.e. returned 0 status code
Example of usage:
shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file')
will call shell command:
cp src_file dst_file
""" |
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
return subprocess.call(command) == 0 |
<SYSTEM_TASK:>
Makes directory readable and writable by everybody.
<END_TASK>
<USER_TASK:>
Description:
def make_directory_writable(dirname):
"""Makes directory readable and writable by everybody.
Args:
dirname: name of the directory
Returns:
True if operation was successfull
If you run something inside Docker container and it writes files, then
these files will be written as root user with restricted permissions.
So to be able to read/modify these files outside of Docker you have to change
permissions to be world readable and writable.
""" |
retval = shell_call(['docker', 'run', '-v',
'{0}:/output_dir'.format(dirname),
'busybox:1.27.2',
'chmod', '-R', 'a+rwx', '/output_dir'])
if not retval:
logging.error('Failed to change permissions on directory: %s', dirname)
return retval |
<SYSTEM_TASK:>
Extracts submission and moves it into self._extracted_submission_dir.
<END_TASK>
<USER_TASK:>
Description:
def _extract_submission(self, filename):
"""Extracts submission and moves it into self._extracted_submission_dir.""" |
# verify filesize
file_size = os.path.getsize(filename)
if file_size > MAX_SUBMISSION_SIZE_ZIPPED:
logging.error('Submission archive size %d is exceeding limit %d',
file_size, MAX_SUBMISSION_SIZE_ZIPPED)
return False
# determime archive type
exctract_command_tmpl = get_extract_command_template(filename)
if not exctract_command_tmpl:
logging.error('Input file has to be zip, tar or tar.gz archive; however '
'found: %s', filename)
return False
# extract archive
submission_dir = os.path.dirname(filename)
submission_basename = os.path.basename(filename)
logging.info('Extracting archive %s', filename)
retval = shell_call(
['docker', 'run',
'--network=none',
'-v', '{0}:/input_dir'.format(submission_dir),
'-v', '{0}:/output_dir'.format(self._tmp_extracted_dir),
'busybox:1.27.2'] + exctract_command_tmpl,
src=os.path.join('/input_dir', submission_basename),
dst='/output_dir')
if not retval:
logging.error('Failed to extract submission from file %s', filename)
return False
if not make_directory_writable(self._tmp_extracted_dir):
return False
# find submission root
root_dir = self._tmp_extracted_dir
root_dir_content = [d for d in os.listdir(root_dir) if d != '__MACOSX']
if (len(root_dir_content) == 1
and os.path.isdir(os.path.join(root_dir, root_dir_content[0]))):
logging.info('Looks like submission root is in subdirectory "%s" of '
'the archive', root_dir_content[0])
root_dir = os.path.join(root_dir, root_dir_content[0])
# Move files to self._extracted_submission_dir.
# At this point self._extracted_submission_dir does not exist,
# so following command will simply rename root_dir into
# self._extracted_submission_dir
if not shell_call(['mv', root_dir, self._extracted_submission_dir]):
logging.error('Can''t move submission files from root directory')
return False
return True |
<SYSTEM_TASK:>
Verifies size of Docker image.
<END_TASK>
<USER_TASK:>
Description:
def _verify_docker_image_size(self, image_name):
"""Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise.
""" |
shell_call(['docker', 'pull', image_name])
try:
image_size = subprocess.check_output(
['docker', 'inspect', '--format={{.Size}}', image_name]).strip()
image_size = int(image_size)
except (ValueError, subprocess.CalledProcessError) as e:
logging.error('Failed to determine docker image size: %s', e)
return False
logging.info('Size of docker image %s is %d', image_name, image_size)
if image_size > MAX_DOCKER_IMAGE_SIZE:
logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)
return image_size <= MAX_DOCKER_IMAGE_SIZE |
<SYSTEM_TASK:>
Prepares sample data for the submission.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_sample_data(self, submission_type):
"""Prepares sample data for the submission.
Args:
submission_type: type of the submission.
""" |
# write images
images = np.random.randint(0, 256,
size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8)
for i in range(BATCH_SIZE):
Image.fromarray(images[i, :, :, :]).save(
os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i)))
# write target class for targeted attacks
if submission_type == 'targeted_attack':
target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE])
target_class_filename = os.path.join(self._sample_input_dir,
'target_class.csv')
with open(target_class_filename, 'w') as f:
for i in range(BATCH_SIZE):
f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i])) |
<SYSTEM_TASK:>
Verifies correctness of the submission output.
<END_TASK>
<USER_TASK:>
Description:
def _verify_output(self, submission_type):
"""Verifies correctness of the submission output.
Args:
submission_type: type of the submission
Returns:
True if output looks valid
""" |
result = True
if submission_type == 'defense':
try:
image_classification = load_defense_output(
os.path.join(self._sample_output_dir, 'result.csv'))
expected_keys = [IMAGE_NAME_PATTERN.format(i)
for i in range(BATCH_SIZE)]
if set(image_classification.keys()) != set(expected_keys):
logging.error('Classification results are not saved for all images')
result = False
except IOError as e:
logging.error('Failed to read defense output file: %s', e)
result = False
else:
for i in range(BATCH_SIZE):
image_filename = os.path.join(self._sample_output_dir,
IMAGE_NAME_PATTERN.format(i))
try:
img = np.array(Image.open(image_filename).convert('RGB'))
if list(img.shape) != [299, 299, 3]:
logging.error('Invalid image size %s for image %s',
str(img.shape), image_filename)
result = False
except IOError as e:
result = False
return result |
<SYSTEM_TASK:>
Validates submission.
<END_TASK>
<USER_TASK:>
Description:
def validate_submission(self, filename):
"""Validates submission.
Args:
filename: submission filename
Returns:
submission metadata or None if submission is invalid
""" |
self._prepare_temp_dir()
# Convert filename to be absolute path, relative path might cause problems
# with mounting directory in Docker
filename = os.path.abspath(filename)
# extract submission
if not self._extract_submission(filename):
return None
# verify submission size
if not self._verify_submission_size():
return None
# Load metadata
metadata = self._load_and_verify_metadata()
if not metadata:
return None
submission_type = metadata['type']
# verify docker container size
if not self._verify_docker_image_size(metadata['container_gpu']):
return None
# Try to run submission on sample data
self._prepare_sample_data(submission_type)
if not self._run_submission(metadata):
logging.error('Failure while running submission')
return None
if not self._verify_output(submission_type):
logging.warning('Some of the outputs of your submission are invalid or '
'missing. You submission still will be evaluation '
'but you might get lower score.')
return metadata |
<SYSTEM_TASK:>
Generate adversarial examples and return them as a NumPy array.
<END_TASK>
<USER_TASK:>
Description:
def generate_np(self, x_val, **kwargs):
"""
Generate adversarial examples and return them as a NumPy array.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
""" |
tfe = tf.contrib.eager
x = tfe.Variable(x_val)
adv_x = self.generate(x, **kwargs)
return adv_x.numpy() |
<SYSTEM_TASK:>
Returns a list of all files in CleverHans with the given suffix.
<END_TASK>
<USER_TASK:>
Description:
def list_files(suffix=""):
"""
Returns a list of all files in CleverHans with the given suffix.
Parameters
----------
suffix : str
Returns
-------
file_list : list
A list of all files in CleverHans whose filepath ends with `suffix`.
""" |
cleverhans_path = os.path.abspath(cleverhans.__path__[0])
# In some environments cleverhans_path does not point to a real directory.
# In such case return empty list.
if not os.path.isdir(cleverhans_path):
return []
repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir))
file_list = _list_files(cleverhans_path, suffix)
extra_dirs = ['cleverhans_tutorials', 'examples', 'scripts', 'tests_tf', 'tests_pytorch']
for extra_dir in extra_dirs:
extra_path = os.path.join(repo_path, extra_dir)
if os.path.isdir(extra_path):
extra_files = _list_files(extra_path, suffix)
extra_files = [os.path.join(os.pardir, path) for path in extra_files]
file_list = file_list + extra_files
return file_list |
<SYSTEM_TASK:>
Returns a list of all files ending in `suffix` contained within `path`.
<END_TASK>
<USER_TASK:>
Description:
def _list_files(path, suffix=""):
"""
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
""" |
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return [] |
<SYSTEM_TASK:>
Main function which runs master.
<END_TASK>
<USER_TASK:>
Description:
def main(args):
"""Main function which runs master.""" |
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
if args.limited_dataset:
logging.info('Using limited dataset: 3 batches * 10 images')
max_dataset_num_images = 30
batch_size = 10
else:
logging.info('Using full dataset. Batch size: %d', DEFAULT_BATCH_SIZE)
max_dataset_num_images = None
batch_size = DEFAULT_BATCH_SIZE
random.seed()
print('\nRound: {0}\n'.format(args.round_name))
eval_master = EvaluationMaster(
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
results_dir=args.results_dir,
num_defense_shards=args.num_defense_shards,
verbose=args.verbose,
batch_size=batch_size,
max_dataset_num_images=max_dataset_num_images)
if args.command == 'attack':
eval_master.prepare_attacks()
elif args.command == 'defense':
eval_master.prepare_defenses()
elif args.command == 'cleanup_defenses':
eval_master.cleanup_defenses()
elif args.command == 'results':
eval_master.compute_results()
elif args.command == 'status':
eval_master.show_status()
elif args.command == 'cleanup_datastore':
eval_master.cleanup_datastore()
elif args.command == 'cleanup_failed_attacks':
eval_master.cleanup_failed_attacks()
elif args.command == 'cleanup_attacks_with_zero_images':
eval_master.cleanup_attacks_with_zero_images()
else:
print('Invalid command: ', args.command)
print('')
print(USAGE) |
<SYSTEM_TASK:>
When work is already populated asks whether we should continue.
<END_TASK>
<USER_TASK:>
Description:
def ask_when_work_is_populated(self, work):
"""When work is already populated asks whether we should continue.
This method prints warning message that work is populated and asks
whether user wants to continue or not.
Args:
work: instance of WorkPiecesBase
Returns:
True if we should continue and populate datastore, False if we should stop
""" |
work.read_all_from_datastore()
if work.work:
print('Work is already written to datastore.\n'
'If you continue these data will be overwritten and '
'possible corrupted.')
inp = input_str('Do you want to continue? '
'(type "yes" without quotes to confirm): ')
return inp == 'yes'
else:
return True |
<SYSTEM_TASK:>
Prepares all data needed for evaluation of attacks.
<END_TASK>
<USER_TASK:>
Description:
def prepare_attacks(self):
"""Prepares all data needed for evaluation of attacks.""" |
print_header('PREPARING ATTACKS DATA')
# verify that attacks data not written yet
if not self.ask_when_work_is_populated(self.attack_work):
return
self.attack_work = eval_lib.AttackWorkPieces(
datastore_client=self.datastore_client)
# prepare submissions
print_header('Initializing submissions')
self.submissions.init_from_storage_write_to_datastore()
if self.verbose:
print(self.submissions)
# prepare dataset batches
print_header('Initializing dataset batches')
self.dataset_batches.init_from_storage_write_to_datastore(
batch_size=self.batch_size,
allowed_epsilon=ALLOWED_EPS,
skip_image_ids=[],
max_num_images=self.max_dataset_num_images)
if self.verbose:
print(self.dataset_batches)
# prepare adversarial batches
print_header('Initializing adversarial batches')
self.adv_batches.init_from_dataset_and_submissions_write_to_datastore(
dataset_batches=self.dataset_batches,
attack_submission_ids=self.submissions.get_all_attack_ids())
if self.verbose:
print(self.adv_batches)
# prepare work pieces
print_header('Preparing attack work pieces')
self.attack_work.init_from_adversarial_batches(self.adv_batches.data)
self.attack_work.write_all_to_datastore()
if self.verbose:
print(self.attack_work) |
<SYSTEM_TASK:>
Prepares all data needed for evaluation of defenses.
<END_TASK>
<USER_TASK:>
Description:
def prepare_defenses(self):
"""Prepares all data needed for evaluation of defenses.""" |
print_header('PREPARING DEFENSE DATA')
# verify that defense data not written yet
if not self.ask_when_work_is_populated(self.defense_work):
return
self.defense_work = eval_lib.DefenseWorkPieces(
datastore_client=self.datastore_client)
# load results of attacks
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
self.attack_work.read_all_from_datastore()
# populate classification results
print_header('Initializing classification batches')
self.class_batches.init_from_adversarial_batches_write_to_datastore(
self.submissions, self.adv_batches)
if self.verbose:
print(self.class_batches)
# populate work pieces
print_header('Preparing defense work pieces')
self.defense_work.init_from_class_batches(
self.class_batches.data, num_shards=self.num_defense_shards)
self.defense_work.write_all_to_datastore()
if self.verbose:
print(self.defense_work) |
<SYSTEM_TASK:>
Saves statistics about each submission.
<END_TASK>
<USER_TASK:>
Description:
def _save_work_results(self, run_stats, scores, num_processed_images,
filename):
"""Saves statistics about each submission.
Saved statistics include score; number of completed and failed batches;
min, max, average and median time needed to run one batch.
Args:
run_stats: dictionary with runtime statistics for submissions,
can be generated by WorkPiecesBase.compute_work_statistics
scores: dictionary mapping submission ids to scores
num_processed_images: dictionary with number of successfully processed
images by each submission, one of the outputs of
ClassificationBatches.compute_classification_results
filename: output filename
""" |
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(
['SubmissionID', 'ExternalSubmissionId', 'Score',
'CompletedBatches', 'BatchesWithError', 'ProcessedImages',
'MinEvalTime', 'MaxEvalTime',
'MedianEvalTime', 'MeanEvalTime',
'ErrorMsg'])
for submission_id in sorted(iterkeys(run_stats)):
stat = run_stats.get(
submission_id,
collections.defaultdict(lambda: float('NaN')))
external_id = self.submissions.get_external_id(submission_id)
error_msg = ''
while not error_msg and stat['error_messages']:
error_msg = stat['error_messages'].pop()
if error_msg.startswith('Cant copy adversarial batch locally'):
error_msg = ''
writer.writerow([
submission_id, external_id, scores.get(submission_id, None),
stat['completed'], stat['num_errors'],
num_processed_images.get(submission_id, None),
stat['min_eval_time'], stat['max_eval_time'],
stat['median_eval_time'], stat['mean_eval_time'],
error_msg
]) |
<SYSTEM_TASK:>
Shows status for given work pieces.
<END_TASK>
<USER_TASK:>
Description:
def _show_status_for_work(self, work):
"""Shows status for given work pieces.
Args:
work: instance of either AttackWorkPieces or DefenseWorkPieces
""" |
work_count = len(work.work)
work_completed = {}
work_completed_count = 0
for v in itervalues(work.work):
if v['is_completed']:
work_completed_count += 1
worker_id = v['claimed_worker_id']
if worker_id not in work_completed:
work_completed[worker_id] = {
'completed_count': 0,
'last_update': 0.0,
}
work_completed[worker_id]['completed_count'] += 1
work_completed[worker_id]['last_update'] = max(
work_completed[worker_id]['last_update'],
v['claimed_worker_start_time'])
print('Completed {0}/{1} work'.format(work_completed_count,
work_count))
for k in sorted(iterkeys(work_completed)):
last_update_time = time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(work_completed[k]['last_update']))
print('Worker {0}: completed {1} last claimed work at {2}'.format(
k, work_completed[k]['completed_count'], last_update_time)) |
<SYSTEM_TASK:>
Saves errors for given work pieces into file.
<END_TASK>
<USER_TASK:>
Description:
def _export_work_errors(self, work, output_file):
"""Saves errors for given work pieces into file.
Args:
work: instance of either AttackWorkPieces or DefenseWorkPieces
output_file: name of the output file
""" |
errors = set()
for v in itervalues(work.work):
if v['is_completed'] and v['error'] is not None:
errors.add(v['error'])
with open(output_file, 'w') as f:
for e in sorted(errors):
f.write(e)
f.write('\n') |
<SYSTEM_TASK:>
Shows current status of competition evaluation.
<END_TASK>
<USER_TASK:>
Description:
def show_status(self):
"""Shows current status of competition evaluation.
Also this method saves error messages generated by attacks and defenses
into attack_errors.txt and defense_errors.txt.
""" |
print_header('Attack work statistics')
self.attack_work.read_all_from_datastore()
self._show_status_for_work(self.attack_work)
self._export_work_errors(
self.attack_work,
os.path.join(self.results_dir, 'attack_errors.txt'))
print_header('Defense work statistics')
self.defense_work.read_all_from_datastore()
self._show_status_for_work(self.defense_work)
self._export_work_errors(
self.defense_work,
os.path.join(self.results_dir, 'defense_errors.txt')) |
<SYSTEM_TASK:>
Cleans up data of failed attacks.
<END_TASK>
<USER_TASK:>
Description:
def cleanup_failed_attacks(self):
"""Cleans up data of failed attacks.""" |
print_header('Cleaning up failed attacks')
attacks_to_replace = {}
self.attack_work.read_all_from_datastore()
failed_submissions = set()
error_msg = set()
for k, v in iteritems(self.attack_work.work):
if v['error'] is not None:
attacks_to_replace[k] = dict(v)
failed_submissions.add(v['submission_id'])
error_msg.add(v['error'])
attacks_to_replace[k].update(
{
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False,
'error': None,
'elapsed_time': None,
})
self.attack_work.replace_work(attacks_to_replace)
print('Affected submissions:')
print(' '.join(sorted(failed_submissions)))
print('Error messages:')
print(' '.join(sorted(error_msg)))
print('')
inp = input_str('Are you sure? (type "yes" without quotes to confirm): ')
if inp != 'yes':
return
self.attack_work.write_all_to_datastore()
print('Work cleaned up') |
<SYSTEM_TASK:>
Cleans up data about attacks which generated zero images.
<END_TASK>
<USER_TASK:>
Description:
def cleanup_attacks_with_zero_images(self):
"""Cleans up data about attacks which generated zero images.""" |
print_header('Cleaning up attacks which generated 0 images.')
# find out attack work to cleanup
self.adv_batches.init_from_datastore()
self.attack_work.read_all_from_datastore()
new_attack_work = {}
affected_adversarial_batches = set()
for work_id, work in iteritems(self.attack_work.work):
adv_batch_id = work['output_adversarial_batch_id']
img_count_adv_batch = len(self.adv_batches.data[adv_batch_id]['images'])
if (img_count_adv_batch < 100) and (work['elapsed_time'] < 500):
affected_adversarial_batches.add(adv_batch_id)
new_attack_work[work_id] = dict(work)
new_attack_work[work_id].update(
{
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False,
'error': None,
'elapsed_time': None,
})
self.attack_work.replace_work(new_attack_work)
print_header('Changes in attack works:')
print(self.attack_work)
# build list of classification batches
self.class_batches.init_from_datastore()
affected_class_batches = set()
for k, v in iteritems(self.class_batches.data):
if v['adversarial_batch_id'] in affected_adversarial_batches:
affected_class_batches.add(k)
# cleanup defense work on affected batches
self.defense_work.read_all_from_datastore()
new_defense_work = {}
for k, v in iteritems(self.defense_work.work):
if v['output_classification_batch_id'] in affected_class_batches:
new_defense_work[k] = dict(v)
new_defense_work[k].update(
{
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False,
'error': None,
'elapsed_time': None,
'stat_correct': None,
'stat_error': None,
'stat_target_class': None,
'stat_num_images': None,
})
self.defense_work.replace_work(new_defense_work)
print_header('Changes in defense works:')
print(self.defense_work)
print('')
print('Total number of affected attack work: ', len(self.attack_work))
print('Total number of affected defense work: ', len(self.defense_work))
inp = input_str('Are you sure? (type "yes" without quotes to confirm): ')
if inp != 'yes':
return
print('Writing attacks work')
self.attack_work.write_all_to_datastore()
print('Writing defenses work')
self.defense_work.write_all_to_datastore()
print('Done!') |
<SYSTEM_TASK:>
Asks confirmation and then deletes entries with keys.
<END_TASK>
<USER_TASK:>
Description:
def _cleanup_keys_with_confirmation(self, keys_to_delete):
"""Asks confirmation and then deletes entries with keys.
Args:
keys_to_delete: list of datastore keys for which entries should be deleted
""" |
print('Round name: ', self.round_name)
print('Number of entities to be deleted: ', len(keys_to_delete))
if not keys_to_delete:
return
if self.verbose:
print('Entities to delete:')
idx = 0
prev_key_prefix = None
dots_printed_after_same_prefix = False
for k in keys_to_delete:
if idx >= 20:
print(' ...')
print(' ...')
break
key_prefix = (k.flat_path[0:1]
if k.flat_path[0] in [u'SubmissionType', u'WorkType']
else k.flat_path[0])
if prev_key_prefix == key_prefix:
if not dots_printed_after_same_prefix:
print(' ...')
dots_printed_after_same_prefix = True
else:
print(' ', k)
dots_printed_after_same_prefix = False
idx += 1
prev_key_prefix = key_prefix
print()
inp = input_str('Are you sure? (type "yes" without quotes to confirm): ')
if inp != 'yes':
return
with self.datastore_client.no_transact_batch() as batch:
for k in keys_to_delete:
batch.delete(k)
print('Data deleted') |
<SYSTEM_TASK:>
Cleans up all data about defense work in current round.
<END_TASK>
<USER_TASK:>
Description:
def cleanup_defenses(self):
"""Cleans up all data about defense work in current round.""" |
print_header('CLEANING UP DEFENSES DATA')
work_ancestor_key = self.datastore_client.key('WorkType', 'AllDefenses')
keys_to_delete = [
e.key
for e in self.datastore_client.query_fetch(kind=u'ClassificationBatch')
] + [
e.key
for e in self.datastore_client.query_fetch(kind=u'Work',
ancestor=work_ancestor_key)
]
self._cleanup_keys_with_confirmation(keys_to_delete) |
<SYSTEM_TASK:>
Cleans up datastore and deletes all information about current round.
<END_TASK>
<USER_TASK:>
Description:
def cleanup_datastore(self):
"""Cleans up datastore and deletes all information about current round.""" |
print_header('CLEANING UP ENTIRE DATASTORE')
kinds_to_delete = [u'Submission', u'SubmissionType',
u'DatasetImage', u'DatasetBatch',
u'AdversarialImage', u'AdversarialBatch',
u'Work', u'WorkType',
u'ClassificationBatch']
keys_to_delete = [e.key for k in kinds_to_delete
for e in self.datastore_client.query_fetch(kind=k)]
self._cleanup_keys_with_confirmation(keys_to_delete) |
<SYSTEM_TASK:>
Create a multi-GPU model similar to the basic cnn in the tutorials.
<END_TASK>
<USER_TASK:>
Description:
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):
"""
Create a multi-GPU model similar to the basic cnn in the tutorials.
""" |
model = make_basic_cnn()
layers = model.layers
model = MLPnGPU(nb_classes, layers, input_shape)
return model |
<SYSTEM_TASK:>
Build the graph for cost from the logits if logits are provided.
<END_TASK>
<USER_TASK:>
Description:
def build_cost(self, labels, logits):
"""
Build the graph for cost from the logits if logits are provided.
If predictions are provided, logits are extracted from the operation.
""" |
op = logits.op
if "softmax" in str(op).lower():
logits, = op.inputs
with tf.variable_scope('costs'):
xent = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
cost = tf.reduce_mean(xent, name='xent')
cost += self._decay()
cost = cost
return cost |
<SYSTEM_TASK:>
Bottleneck residual unit with 3 sub layers.
<END_TASK>
<USER_TASK:>
Description:
def _bottleneck_residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False):
"""Bottleneck residual unit with 3 sub layers.""" |
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._layer_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._layer_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride)
with tf.variable_scope('sub2'):
x = self._layer_norm('bn2', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter / 4,
out_filter / 4, [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._layer_norm('bn3', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv3', x, 1, out_filter /
4, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = self._conv('project', orig_x, 1,
in_filter, out_filter, stride)
x += orig_x
return x |
<SYSTEM_TASK:>
Reads classification results from the file in Cloud Storage.
<END_TASK>
<USER_TASK:>
Description:
def read_classification_results(storage_client, file_path):
"""Reads classification results from the file in Cloud Storage.
This method reads file with classification results produced by running
defense on singe batch of adversarial images.
Args:
storage_client: instance of CompetitionStorageClient or None for local file
file_path: path of the file with results
Returns:
dictionary where keys are image names or IDs and values are classification
labels
""" |
if storage_client:
# file on Cloud
success = False
retry_count = 0
while retry_count < 4:
try:
blob = storage_client.get_blob(file_path)
if not blob:
return {}
if blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE:
logging.warning('Skipping classification result because it''s too '
'big: %d bytes for %s', blob.size, file_path)
return None
buf = BytesIO()
blob.download_to_file(buf)
buf.seek(0)
success = True
break
except Exception:
retry_count += 1
time.sleep(5)
if not success:
return None
else:
# local file
try:
with open(file_path, 'rb') as f:
buf = BytesIO(f.read())
except IOError:
return None
result = {}
if PY3:
buf = StringIO(buf.read().decode('UTF-8'))
for row in csv.reader(buf):
try:
image_filename = row[0]
if image_filename.endswith('.png') or image_filename.endswith('.jpg'):
image_filename = image_filename[:image_filename.rfind('.')]
label = int(row[1])
except (IndexError, ValueError):
continue
result[image_filename] = label
return result |
<SYSTEM_TASK:>
Reads and analyzes one classification result.
<END_TASK>
<USER_TASK:>
Description:
def analyze_one_classification_result(storage_client, file_path,
adv_batch, dataset_batches,
dataset_meta):
"""Reads and analyzes one classification result.
This method reads file with classification result and counts
how many images were classified correctly and incorrectly,
how many times target class was hit and total number of images.
Args:
storage_client: instance of CompetitionStorageClient
file_path: result file path
adv_batch: AversarialBatches.data[adv_batch_id]
adv_batch_id is stored in each ClassificationBatch entity
dataset_batches: instance of DatasetBatches
dataset_meta: instance of DatasetMetadata
Returns:
Tuple of (count_correctly_classified, count_errors, count_hit_target_class,
num_images)
""" |
class_result = read_classification_results(storage_client, file_path)
if class_result is None:
return 0, 0, 0, 0
adv_images = adv_batch['images']
dataset_batch_images = (
dataset_batches.data[adv_batch['dataset_batch_id']]['images'])
count_correctly_classified = 0
count_errors = 0
count_hit_target_class = 0
num_images = 0
for adv_img_id, label in iteritems(class_result):
if adv_img_id not in adv_images:
continue
num_images += 1
clean_image_id = adv_images[adv_img_id]['clean_image_id']
dataset_image_id = (
dataset_batch_images[clean_image_id]['dataset_image_id'])
if label == dataset_meta.get_true_label(dataset_image_id):
count_correctly_classified += 1
else:
count_errors += 1
if label == dataset_meta.get_target_class(dataset_image_id):
count_hit_target_class += 1
return (count_correctly_classified, count_errors,
count_hit_target_class, num_images) |
<SYSTEM_TASK:>
Saves matrix to the file.
<END_TASK>
<USER_TASK:>
Description:
def save_to_file(self, filename, remap_dim0=None, remap_dim1=None):
"""Saves matrix to the file.
Args:
filename: name of the file where to save matrix
remap_dim0: dictionary with mapping row indices to row names which should
be saved to file. If none then indices will be used as names.
remap_dim1: dictionary with mapping column indices to column names which
should be saved to file. If none then indices will be used as names.
""" |
# rows - first index
# columns - second index
with open(filename, 'w') as fobj:
columns = list(sorted(self._dim1))
for col in columns:
fobj.write(',')
fobj.write(str(remap_dim1[col] if remap_dim1 else col))
fobj.write('\n')
for row in sorted(self._dim0):
fobj.write(str(remap_dim0[row] if remap_dim0 else row))
for col in columns:
fobj.write(',')
fobj.write(str(self[row, col]))
fobj.write('\n') |
<SYSTEM_TASK:>
Populates data from adversarial batches and writes to datastore.
<END_TASK>
<USER_TASK:>
Description:
def init_from_adversarial_batches_write_to_datastore(self, submissions,
adv_batches):
"""Populates data from adversarial batches and writes to datastore.
Args:
submissions: instance of CompetitionSubmissions
adv_batches: instance of AversarialBatches
""" |
# prepare classification batches
idx = 0
for s_id in iterkeys(submissions.defenses):
for adv_id in iterkeys(adv_batches.data):
class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx)
idx += 1
self.data[class_batch_id] = {
'adversarial_batch_id': adv_id,
'submission_id': s_id,
'result_path': os.path.join(
self._round_name,
CLASSIFICATION_BATCHES_SUBDIR,
s_id + '_' + adv_id + '.csv')
}
# save them to datastore
client = self._datastore_client
with client.no_transact_batch() as batch:
for key, value in iteritems(self.data):
entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key))
entity.update(value)
batch.put(entity) |
<SYSTEM_TASK:>
Initializes data by reading it from the datastore.
<END_TASK>
<USER_TASK:>
Description:
def init_from_datastore(self):
"""Initializes data by reading it from the datastore.""" |
self._data = {}
client = self._datastore_client
for entity in client.query_fetch(kind=KIND_CLASSIFICATION_BATCH):
class_batch_id = entity.key.flat_path[-1]
self.data[class_batch_id] = dict(entity) |
<SYSTEM_TASK:>
Reads and returns single batch from the datastore.
<END_TASK>
<USER_TASK:>
Description:
def read_batch_from_datastore(self, class_batch_id):
"""Reads and returns single batch from the datastore.""" |
client = self._datastore_client
key = client.key(KIND_CLASSIFICATION_BATCH, class_batch_id)
result = client.get(key)
if result is not None:
return dict(result)
else:
raise KeyError(
'Key {0} not found in the datastore'.format(key.flat_path)) |
<SYSTEM_TASK:>
Computes classification results.
<END_TASK>
<USER_TASK:>
Description:
def compute_classification_results(self, adv_batches, dataset_batches,
dataset_meta, defense_work=None):
"""Computes classification results.
Args:
adv_batches: instance of AversarialBatches
dataset_batches: instance of DatasetBatches
dataset_meta: instance of DatasetMetadata
defense_work: instance of DefenseWorkPieces
Returns:
accuracy_matrix, error_matrix, hit_target_class_matrix,
processed_images_count
""" |
class_batch_to_work = {}
if defense_work:
for v in itervalues(defense_work.work):
class_batch_to_work[v['output_classification_batch_id']] = v
# accuracy_matrix[defense_id, attack_id] = num correctly classified
accuracy_matrix = ResultMatrix()
# error_matrix[defense_id, attack_id] = num misclassfied
error_matrix = ResultMatrix()
# hit_target_class_matrix[defense_id, attack_id] = num hit target class
hit_target_class_matrix = ResultMatrix()
# processed_images_count[defense_id] = num processed images by defense
processed_images_count = {}
total_count = len(self.data)
processed_count = 0
logging.info('Processing %d files with classification results',
len(self.data))
for k, v in iteritems(self.data):
if processed_count % 100 == 0:
logging.info('Processed %d out of %d classification results',
processed_count, total_count)
processed_count += 1
defense_id = v['submission_id']
adv_batch = adv_batches.data[v['adversarial_batch_id']]
attack_id = adv_batch['submission_id']
work_item = class_batch_to_work.get(k)
required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class',
'stat_num_images']
if work_item and work_item['error']:
# ignore batches with error
continue
if work_item and all(work_item.get(i) is not None
for i in required_work_stats):
count_correctly_classified = work_item['stat_correct']
count_errors = work_item['stat_error']
count_hit_target_class = work_item['stat_target_class']
num_images = work_item['stat_num_images']
else:
logging.warning('Recomputing accuracy for classification batch %s', k)
(count_correctly_classified, count_errors, count_hit_target_class,
num_images) = analyze_one_classification_result(
self._storage_client, v['result_path'], adv_batch, dataset_batches,
dataset_meta)
# update accuracy and hit target class
accuracy_matrix[defense_id, attack_id] += count_correctly_classified
error_matrix[defense_id, attack_id] += count_errors
hit_target_class_matrix[defense_id, attack_id] += count_hit_target_class
# update number of processed images
processed_images_count[defense_id] = (
processed_images_count.get(defense_id, 0) + num_images)
return (accuracy_matrix, error_matrix, hit_target_class_matrix,
processed_images_count) |
<SYSTEM_TASK:>
Parses type of participant based on submission filename.
<END_TASK>
<USER_TASK:>
Description:
def participant_from_submission_path(submission_path):
"""Parses type of participant based on submission filename.
Args:
submission_path: path to the submission in Google Cloud Storage
Returns:
dict with one element. Element key correspond to type of participant
(team, baseline), element value is ID of the participant.
Raises:
ValueError: is participant can't be determined based on submission path.
""" |
basename = os.path.basename(submission_path)
file_ext = None
for e in ALLOWED_EXTENSIONS:
if basename.endswith(e):
file_ext = e
break
if not file_ext:
raise ValueError('Invalid submission path: ' + submission_path)
basename = basename[:-len(file_ext)]
if basename.isdigit():
return {'team_id': int(basename)}
if basename.startswith('baseline_'):
return {'baseline_id': basename[len('baseline_'):]}
raise ValueError('Invalid submission path: ' + submission_path) |
<SYSTEM_TASK:>
Loads list of submissions from the directory.
<END_TASK>
<USER_TASK:>
Description:
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern):
"""Loads list of submissions from the directory.
Args:
dir_suffix: suffix of the directory where submissions are stored,
one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR
or DEFENSE_SUBDIR.
id_pattern: pattern which is used to generate (internal) IDs
for submissins. One of the following constants: ATTACK_ID_PATTERN,
TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN.
Returns:
dictionary with all found submissions
""" |
submissions = self._storage_client.list_blobs(
prefix=os.path.join(self._round_name, dir_suffix))
return {
id_pattern.format(idx): SubmissionDescriptor(
path=s, participant_id=participant_from_submission_path(s))
for idx, s in enumerate(submissions)
} |
<SYSTEM_TASK:>
Init list of sumibssions from Storage and saves them to Datastore.
<END_TASK>
<USER_TASK:>
Description:
def init_from_storage_write_to_datastore(self):
"""Init list of sumibssions from Storage and saves them to Datastore.
Should be called only once (typically by master) during evaluation of
the competition.
""" |
# Load submissions
self._attacks = self._load_submissions_from_datastore_dir(
ATTACK_SUBDIR, ATTACK_ID_PATTERN)
self._targeted_attacks = self._load_submissions_from_datastore_dir(
TARGETED_ATTACK_SUBDIR, TARGETED_ATTACK_ID_PATTERN)
self._defenses = self._load_submissions_from_datastore_dir(
DEFENSE_SUBDIR, DEFENSE_ID_PATTERN)
self._write_to_datastore() |
<SYSTEM_TASK:>
Writes all submissions to datastore.
<END_TASK>
<USER_TASK:>
Description:
def _write_to_datastore(self):
"""Writes all submissions to datastore.""" |
# Populate datastore
roots_and_submissions = zip([ATTACKS_ENTITY_KEY,
TARGET_ATTACKS_ENTITY_KEY,
DEFENSES_ENTITY_KEY],
[self._attacks,
self._targeted_attacks,
self._defenses])
client = self._datastore_client
with client.no_transact_batch() as batch:
for root_key, submissions in roots_and_submissions:
batch.put(client.entity(client.key(*root_key)))
for k, v in iteritems(submissions):
entity = client.entity(client.key(
*(root_key + [KIND_SUBMISSION, k])))
entity['submission_path'] = v.path
entity.update(participant_from_submission_path(v.path))
batch.put(entity) |
<SYSTEM_TASK:>
Init list of submission from Datastore.
<END_TASK>
<USER_TASK:>
Description:
def init_from_datastore(self):
"""Init list of submission from Datastore.
Should be called by each worker during initialization.
""" |
self._attacks = {}
self._targeted_attacks = {}
self._defenses = {}
for entity in self._datastore_client.query_fetch(kind=KIND_SUBMISSION):
submission_id = entity.key.flat_path[-1]
submission_path = entity['submission_path']
participant_id = {k: entity[k]
for k in ['team_id', 'baseline_id']
if k in entity}
submission_descr = SubmissionDescriptor(path=submission_path,
participant_id=participant_id)
if list(entity.key.flat_path[0:2]) == ATTACKS_ENTITY_KEY:
self._attacks[submission_id] = submission_descr
elif list(entity.key.flat_path[0:2]) == TARGET_ATTACKS_ENTITY_KEY:
self._targeted_attacks[submission_id] = submission_descr
elif list(entity.key.flat_path[0:2]) == DEFENSES_ENTITY_KEY:
self._defenses[submission_id] = submission_descr |
<SYSTEM_TASK:>
Finds submission by ID.
<END_TASK>
<USER_TASK:>
Description:
def find_by_id(self, submission_id):
"""Finds submission by ID.
Args:
submission_id: ID of the submission
Returns:
SubmissionDescriptor with information about submission or None if
submission is not found.
""" |
return self._attacks.get(
submission_id,
self._defenses.get(
submission_id,
self._targeted_attacks.get(submission_id, None))) |
<SYSTEM_TASK:>
Returns human readable submission external ID.
<END_TASK>
<USER_TASK:>
Description:
def get_external_id(self, submission_id):
"""Returns human readable submission external ID.
Args:
submission_id: internal submission ID.
Returns:
human readable ID.
""" |
submission = self.find_by_id(submission_id)
if not submission:
return None
if 'team_id' in submission.participant_id:
return submission.participant_id['team_id']
elif 'baseline_id' in submission.participant_id:
return 'baseline_' + submission.participant_id['baseline_id']
else:
return '' |
<SYSTEM_TASK:>
Loads and verifies metadata.
<END_TASK>
<USER_TASK:>
Description:
def _load_and_verify_metadata(self, submission_type):
"""Loads and verifies metadata.
Args:
submission_type: type of the submission
Returns:
dictionaty with metadata or None if metadata not found or invalid
""" |
metadata_filename = os.path.join(self._extracted_submission_dir,
'metadata.json')
if not os.path.isfile(metadata_filename):
logging.error('metadata.json not found')
return None
try:
with open(metadata_filename, 'r') as f:
metadata = json.load(f)
except IOError as e:
logging.error('Failed to load metadata: %s', e)
return None
for field_name in REQUIRED_METADATA_JSON_FIELDS:
if field_name not in metadata:
logging.error('Field %s not found in metadata', field_name)
return None
# Verify submission type
if submission_type != metadata['type']:
logging.error('Invalid submission type in metadata, expected "%s", '
'actual "%s"', submission_type, metadata['type'])
return None
# Check submission entry point
entry_point = metadata['entry_point']
if not os.path.isfile(os.path.join(self._extracted_submission_dir,
entry_point)):
logging.error('Entry point not found: %s', entry_point)
return None
if not entry_point.endswith('.sh'):
logging.warning('Entry point is not an .sh script. '
'This is not necessarily a problem, but if submission '
'won''t run double check entry point first: %s',
entry_point)
# Metadata verified
return metadata |
<SYSTEM_TASK:>
Runs submission inside Docker container.
<END_TASK>
<USER_TASK:>
Description:
def _run_submission(self, metadata):
"""Runs submission inside Docker container.
Args:
metadata: dictionary with submission metadata
Returns:
True if status code of Docker command was success (i.e. zero),
False otherwise.
""" |
if self._use_gpu:
docker_binary = 'nvidia-docker'
container_name = metadata['container_gpu']
else:
docker_binary = 'docker'
container_name = metadata['container']
if metadata['type'] == 'defense':
cmd = [docker_binary, 'run',
'--network=none',
'-m=24g',
'-v', '{0}:/input_images:ro'.format(self._sample_input_dir),
'-v', '{0}:/output_data'.format(self._sample_output_dir),
'-v', '{0}:/code'.format(self._extracted_submission_dir),
'-w', '/code',
container_name,
'./' + metadata['entry_point'],
'/input_images',
'/output_data/result.csv']
else:
epsilon = np.random.choice(ALLOWED_EPS)
cmd = [docker_binary, 'run',
'--network=none',
'-m=24g',
'-v', '{0}:/input_images:ro'.format(self._sample_input_dir),
'-v', '{0}:/output_images'.format(self._sample_output_dir),
'-v', '{0}:/code'.format(self._extracted_submission_dir),
'-w', '/code',
container_name,
'./' + metadata['entry_point'],
'/input_images',
'/output_images',
str(epsilon)]
logging.info('Command to run submission: %s', ' '.join(cmd))
return shell_call(cmd) |
<SYSTEM_TASK:>
Pad a single image and then crop to the original size with a random
<END_TASK>
<USER_TASK:>
Description:
def random_shift(x, pad=(4, 4), mode='REFLECT'):
"""Pad a single image and then crop to the original size with a random
offset.""" |
assert mode in 'REFLECT SYMMETRIC CONSTANT'.split()
assert x.get_shape().ndims == 3
xp = tf.pad(x, [[pad[0], pad[0]], [pad[1], pad[1]], [0, 0]], mode)
return tf.random_crop(xp, tf.shape(x)) |
<SYSTEM_TASK:>
Augment a batch by randomly cropping and horizontally flipping it.
<END_TASK>
<USER_TASK:>
Description:
def random_crop_and_flip(x, pad_rows=4, pad_cols=4):
"""Augment a batch by randomly cropping and horizontally flipping it.""" |
rows = tf.shape(x)[1]
cols = tf.shape(x)[2]
channels = x.get_shape()[3]
def _rand_crop_img(img):
"""Randomly crop an individual image"""
return tf.random_crop(img, [rows, cols, channels])
# Some of these ops are only on CPU.
# This function will often be called with the device set to GPU.
# We need to set it to CPU temporarily to avoid an exception.
with tf.device('/CPU:0'):
x = tf.image.resize_image_with_crop_or_pad(x, rows + pad_rows,
cols + pad_cols)
x = tf.map_fn(_rand_crop_img, x)
x = tf.image.random_flip_left_right(x)
return x |
<SYSTEM_TASK:>
Project `perturbation` onto L-infinity ball of radius `epsilon`.
<END_TASK>
<USER_TASK:>
Description:
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None,
clip_max=None):
"""Project `perturbation` onto L-infinity ball of radius `epsilon`.
Also project into hypercube such that the resulting adversarial example
is between clip_min and clip_max, if applicable.
""" |
if clip_min is None or clip_max is None:
raise NotImplementedError("_project_perturbation currently has clipping "
"hard-coded in.")
# Ensure inputs are in the correct range
with tf.control_dependencies([
utils_tf.assert_less_equal(input_image,
tf.cast(clip_max, input_image.dtype)),
utils_tf.assert_greater_equal(input_image,
tf.cast(clip_min, input_image.dtype))
]):
clipped_perturbation = utils_tf.clip_by_value(
perturbation, -epsilon, epsilon)
new_image = utils_tf.clip_by_value(
input_image + clipped_perturbation, clip_min, clip_max)
return new_image - input_image |
<SYSTEM_TASK:>
Computes difference between logit for `label` and next highest logit.
<END_TASK>
<USER_TASK:>
Description:
def margin_logit_loss(model_logits, label, nb_classes=10, num_classes=None):
"""Computes difference between logit for `label` and next highest logit.
The loss is high when `label` is unlikely (targeted by default).
This follows the same interface as `loss_fn` for TensorOptimizer and
projected_optimization, i.e. it returns a batch of loss values.
""" |
if num_classes is not None:
warnings.warn("`num_classes` is depreciated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
if 'int' in str(label.dtype):
logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1)
else:
logit_mask = label
if 'int' in str(logit_mask.dtype):
logit_mask = tf.to_float(logit_mask)
try:
label_logits = reduce_sum(logit_mask * model_logits, axis=-1)
except TypeError:
raise TypeError("Could not take row-wise dot product between "
"logit mask, of dtype " + str(logit_mask.dtype)
+ " and model_logits, of dtype "
+ str(model_logits.dtype))
logits_with_target_label_neg_inf = model_logits - logit_mask * 99999
highest_nonlabel_logits = reduce_max(
logits_with_target_label_neg_inf, axis=-1)
loss = highest_nonlabel_logits - label_logits
return loss |
<SYSTEM_TASK:>
Compute a new value of `x` to minimize `loss_fn`.
<END_TASK>
<USER_TASK:>
Description:
def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute a new value of `x` to minimize `loss_fn`.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns
a batch of loss values. `x` will be optimized to minimize
`loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous
to the `var_list` argument in standard TF Optimizer.
unused_optim_state: A (possibly nested) dict, containing any state
info needed for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`,
which have been updated.
""" |
# Assumes `x` is a list,
# and contains a tensor representing a batch of images
assert len(x) == 1 and isinstance(x, list), \
'x should be a list and contain only one image tensor'
x = x[0]
loss = reduce_mean(loss_fn(x), axis=0)
return tf.gradients(loss, x) |
<SYSTEM_TASK:>
Analogous to tf.Optimizer.minimize
<END_TASK>
<USER_TASK:>
Description:
def minimize(self, loss_fn, x, optim_state):
"""
Analogous to tf.Optimizer.minimize
:param loss_fn: tf Tensor, representing the loss to minimize
:param x: list of Tensor, analogous to tf.Optimizer's var_list
:param optim_state: A possibly nested dict, containing any optimizer state.
Returns:
new_x: list of Tensor, updated version of `x`
new_optim_state: dict, updated version of `optim_state`
""" |
grads = self._compute_gradients(loss_fn, x, optim_state)
return self._apply_gradients(grads, x, optim_state) |
<SYSTEM_TASK:>
Refer to parent class documentation.
<END_TASK>
<USER_TASK:>
Description:
def _apply_gradients(self, grads, x, optim_state):
"""Refer to parent class documentation.""" |
new_x = [None] * len(x)
new_optim_state = {
"t": optim_state["t"] + 1.,
"m": [None] * len(x),
"u": [None] * len(x)
}
t = new_optim_state["t"]
for i in xrange(len(x)):
g = grads[i]
m_old = optim_state["m"][i]
u_old = optim_state["u"][i]
new_optim_state["m"][i] = (
self._beta1 * m_old + (1. - self._beta1) * g)
new_optim_state["u"][i] = (
self._beta2 * u_old + (1. - self._beta2) * g * g)
m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t))
u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t))
new_x[i] = (
x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon))
return new_x, new_optim_state |
<SYSTEM_TASK:>
Scans directory and read all submissions.
<END_TASK>
<USER_TASK:>
Description:
def read_submissions_from_directory(dirname, use_gpu):
"""Scans directory and read all submissions.
Args:
dirname: directory to scan.
use_gpu: whether submissions should use GPU. This argument is
used to pick proper Docker container for each submission and create
instance of Attack or Defense class.
Returns:
List with submissions (subclasses of Submission class).
""" |
result = []
for sub_dir in os.listdir(dirname):
submission_path = os.path.join(dirname, sub_dir)
try:
if not os.path.isdir(submission_path):
continue
if not os.path.exists(os.path.join(submission_path, 'metadata.json')):
continue
with open(os.path.join(submission_path, 'metadata.json')) as f:
metadata = json.load(f)
if use_gpu and ('container_gpu' in metadata):
container = metadata['container_gpu']
else:
container = metadata['container']
entry_point = metadata['entry_point']
submission_type = metadata['type']
if submission_type == 'attack' or submission_type == 'targeted_attack':
submission = Attack(submission_path, container, entry_point, use_gpu)
elif submission_type == 'defense':
submission = Defense(submission_path, container, entry_point, use_gpu)
else:
raise ValueError('Invalid type of submission: %s' % submission_type)
result.append(submission)
except (IOError, KeyError, ValueError):
print('Failed to read submission from directory ', submission_path)
return result |
<SYSTEM_TASK:>
Loads output of defense from given file.
<END_TASK>
<USER_TASK:>
Description:
def load_defense_output(filename):
"""Loads output of defense from given file.""" |
result = {}
with open(filename) as f:
for row in csv.reader(f):
try:
image_filename = row[0]
if image_filename.endswith('.png') or image_filename.endswith('.jpg'):
image_filename = image_filename[:image_filename.rfind('.')]
label = int(row[1])
except (IndexError, ValueError):
continue
result[image_filename] = label
return result |
<SYSTEM_TASK:>
Run all attacks against all defenses and compute results.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Run all attacks against all defenses and compute results.
""" |
args = parse_args()
attacks_output_dir = os.path.join(args.intermediate_results_dir,
'attacks_output')
targeted_attacks_output_dir = os.path.join(args.intermediate_results_dir,
'targeted_attacks_output')
defenses_output_dir = os.path.join(args.intermediate_results_dir,
'defenses_output')
all_adv_examples_dir = os.path.join(args.intermediate_results_dir,
'all_adv_examples')
# Load dataset metadata.
dataset_meta = DatasetMetadata(args.dataset_metadata)
# Load attacks and defenses.
attacks = [
a for a in read_submissions_from_directory(args.attacks_dir,
args.use_gpu)
if isinstance(a, Attack)
]
targeted_attacks = [
a for a in read_submissions_from_directory(args.targeted_attacks_dir,
args.use_gpu)
if isinstance(a, Attack)
]
defenses = [
d for d in read_submissions_from_directory(args.defenses_dir,
args.use_gpu)
if isinstance(d, Defense)
]
print('Found attacks: ', [a.name for a in attacks])
print('Found tageted attacks: ', [a.name for a in targeted_attacks])
print('Found defenses: ', [d.name for d in defenses])
# Prepare subdirectories for intermediate results.
os.mkdir(attacks_output_dir)
os.mkdir(targeted_attacks_output_dir)
os.mkdir(defenses_output_dir)
os.mkdir(all_adv_examples_dir)
for a in attacks:
os.mkdir(os.path.join(attacks_output_dir, a.name))
for a in targeted_attacks:
os.mkdir(os.path.join(targeted_attacks_output_dir, a.name))
for d in defenses:
os.mkdir(os.path.join(defenses_output_dir, d.name))
# Run all non-targeted attacks.
attacks_output = AttacksOutput(args.dataset_dir,
attacks_output_dir,
targeted_attacks_output_dir,
all_adv_examples_dir,
args.epsilon)
for a in attacks:
a.run(args.dataset_dir,
os.path.join(attacks_output_dir, a.name),
args.epsilon)
attacks_output.clip_and_copy_attack_outputs(a.name, False)
# Run all targeted attacks.
dataset_meta.save_target_classes(os.path.join(args.dataset_dir,
'target_class.csv'))
for a in targeted_attacks:
a.run(args.dataset_dir,
os.path.join(targeted_attacks_output_dir, a.name),
args.epsilon)
attacks_output.clip_and_copy_attack_outputs(a.name, True)
# Run all defenses.
defenses_output = {}
for d in defenses:
d.run(all_adv_examples_dir, os.path.join(defenses_output_dir, d.name))
defenses_output[d.name] = load_defense_output(
os.path.join(defenses_output_dir, d.name, 'result.csv'))
# Compute and save scoring.
compute_and_save_scores_and_ranking(attacks_output, defenses_output,
dataset_meta, args.output_dir,
args.save_all_classification) |
<SYSTEM_TASK:>
Helper method which loads dataset and determines clipping range.
<END_TASK>
<USER_TASK:>
Description:
def _load_dataset_clipping(self, dataset_dir, epsilon):
"""Helper method which loads dataset and determines clipping range.
Args:
dataset_dir: location of the dataset.
epsilon: maximum allowed size of adversarial perturbation.
""" |
self.dataset_max_clip = {}
self.dataset_min_clip = {}
self._dataset_image_count = 0
for fname in os.listdir(dataset_dir):
if not fname.endswith('.png'):
continue
image_id = fname[:-4]
image = np.array(
Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))
image = image.astype('int32')
self._dataset_image_count += 1
self.dataset_max_clip[image_id] = np.clip(image + epsilon,
0,
255).astype('uint8')
self.dataset_min_clip[image_id] = np.clip(image - epsilon,
0,
255).astype('uint8') |
<SYSTEM_TASK:>
Clips results of attack and copy it to directory with all images.
<END_TASK>
<USER_TASK:>
Description:
def clip_and_copy_attack_outputs(self, attack_name, is_targeted):
"""Clips results of attack and copy it to directory with all images.
Args:
attack_name: name of the attack.
is_targeted: if True then attack is targeted, otherwise non-targeted.
""" |
if is_targeted:
self._targeted_attack_names.add(attack_name)
else:
self._attack_names.add(attack_name)
attack_dir = os.path.join(self.targeted_attacks_output_dir
if is_targeted
else self.attacks_output_dir,
attack_name)
for fname in os.listdir(attack_dir):
if not (fname.endswith('.png') or fname.endswith('.jpg')):
continue
image_id = fname[:-4]
if image_id not in self.dataset_max_clip:
continue
image_max_clip = self.dataset_max_clip[image_id]
image_min_clip = self.dataset_min_clip[image_id]
adversarial_image = np.array(
Image.open(os.path.join(attack_dir, fname)).convert('RGB'))
clipped_adv_image = np.clip(adversarial_image,
image_min_clip,
image_max_clip)
output_basename = '{0:08d}'.format(self._output_image_idx)
self._output_image_idx += 1
self._output_to_attack_mapping[output_basename] = (attack_name,
is_targeted,
image_id)
if is_targeted:
self._targeted_attack_image_count += 1
else:
self._attack_image_count += 1
Image.fromarray(clipped_adv_image).save(
os.path.join(self.all_adv_examples_dir, output_basename + '.png')) |
<SYSTEM_TASK:>
Saves target classed for all dataset images into given file.
<END_TASK>
<USER_TASK:>
Description:
def save_target_classes(self, filename):
"""Saves target classed for all dataset images into given file.""" |
with open(filename, 'w') as f:
for k, v in self._target_classes.items():
f.write('{0}.png,{1}\n'.format(k, v)) |
<SYSTEM_TASK:>
A reasonable attack bundling recipe for a max norm threat model and
<END_TASK>
<USER_TASK:>
Description:
def single_run_max_confidence_recipe(sess, model, x, y, nb_classes, eps,
clip_min, clip_max, eps_iter, nb_iter,
report_path,
batch_size=BATCH_SIZE,
eps_iter_small=None):
"""A reasonable attack bundling recipe for a max norm threat model and
a defender that uses confidence thresholding. This recipe uses both
uniform noise and randomly-initialized PGD targeted attacks.
References:
https://openreview.net/forum?id=H1g0piA9tQ
This version runs each attack (noise, targeted PGD for each class with
nb_iter iterations, target PGD for each class with 25X more iterations)
just once and then stops. See `basic_max_confidence_recipe` for a version
that runs indefinitely.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param nb_classes: int, number of classes
:param eps: float, maximum size of perturbation (measured by max norm)
:param eps_iter: float, step size for one version of PGD attacks
(will also run another version with eps_iter_small step size)
:param nb_iter: int, number of iterations for the cheaper PGD attacks
(will also run another version with 25X more iterations)
:param report_path: str, the path that the report will be saved to.
:param batch_size: int, the total number of examples to run simultaneously
:param eps_iter_small: optional, float.
The second version of the PGD attack is run with 25 * nb_iter iterations
and eps_iter_small step size. If eps_iter_small is not specified it is
set to eps_iter / 25.
""" |
noise_attack = Noise(model, sess)
pgd_attack = ProjectedGradientDescent(model, sess)
threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max}
noise_attack_config = AttackConfig(noise_attack, threat_params, "noise")
attack_configs = [noise_attack_config]
pgd_attack_configs = []
pgd_params = copy.copy(threat_params)
pgd_params["eps_iter"] = eps_iter
pgd_params["nb_iter"] = nb_iter
assert batch_size % num_devices == 0
dev_batch_size = batch_size // num_devices
ones = tf.ones(dev_batch_size, tf.int32)
expensive_pgd = []
if eps_iter_small is None:
eps_iter_small = eps_iter / 25.
for cls in range(nb_classes):
cls_params = copy.copy(pgd_params)
cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes))
cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls))
pgd_attack_configs.append(cls_attack_config)
expensive_params = copy.copy(cls_params)
expensive_params["eps_iter"] = eps_iter_small
expensive_params["nb_iter"] *= 25.
expensive_config = AttackConfig(
pgd_attack, expensive_params, "expensive_pgd_" + str(cls))
expensive_pgd.append(expensive_config)
attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd
new_work_goal = {config: 1 for config in attack_configs}
goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)]
bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size,
eval_batch_size=batch_size) |
<SYSTEM_TASK:>
Max confidence using random search.
<END_TASK>
<USER_TASK:>
Description:
def random_search_max_confidence_recipe(sess, model, x, y, eps,
clip_min, clip_max,
report_path, batch_size=BATCH_SIZE,
num_noise_points=10000):
"""Max confidence using random search.
References:
https://openreview.net/forum?id=H1g0piA9tQ
Describes the max_confidence procedure used for the bundling in this recipe
https://arxiv.org/abs/1802.00420
Describes using random search with 1e5 or more random points to avoid
gradient masking.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param nb_classes: int, number of classes
:param eps: float, maximum size of perturbation (measured by max norm)
:param eps_iter: float, step size for one version of PGD attacks
(will also run another version with 25X smaller step size)
:param nb_iter: int, number of iterations for one version of PGD attacks
(will also run another version with 25X more iterations)
:param report_path: str, the path that the report will be saved to.
:batch_size: int, the total number of examples to run simultaneously
""" |
noise_attack = Noise(model, sess)
threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max}
noise_attack_config = AttackConfig(noise_attack, threat_params)
attack_configs = [noise_attack_config]
assert batch_size % num_devices == 0
new_work_goal = {noise_attack_config: num_noise_points}
goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)]
bundle_attacks(sess, model, x, y, attack_configs, goals, report_path) |
<SYSTEM_TASK:>
Runs attack bundling.
<END_TASK>
<USER_TASK:>
Description:
def bundle_attacks(sess, model, x, y, attack_configs, goals, report_path,
attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE):
"""
Runs attack bundling.
Users of cleverhans may call this function but are more likely to call
one of the recipes above.
Reference: https://openreview.net/forum?id=H1g0piA9tQ
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param attack_configs: list of AttackConfigs to run
:param goals: list of AttackGoals to run
The bundler works through the goals in order, until each is satisfied.
Some goals may never be satisfied, in which case the bundler will run
forever, updating the report on disk as it goes.
:param report_path: str, the path the report will be saved to
:param attack_batch_size: int, batch size for generating adversarial examples
:param eval_batch_size: int, batch size for evaluating the model on clean / adversarial examples
:returns:
adv_x: The adversarial examples, in the same format as `x`
run_counts: dict mapping each AttackConfig to a numpy array reporting
how many times that AttackConfig was run on each example
""" |
assert isinstance(sess, tf.Session)
assert isinstance(model, Model)
assert all(isinstance(attack_config, AttackConfig) for attack_config
in attack_configs)
assert all(isinstance(goal, AttackGoal) for goal in goals)
assert isinstance(report_path, six.string_types)
if x.shape[0] != y.shape[0]:
raise ValueError("Number of input examples does not match number of labels")
# Note: no need to precompile attacks, correctness_and_confidence
# caches them
run_counts = {}
for attack_config in attack_configs:
run_counts[attack_config] = np.zeros(x.shape[0], dtype=np.int64)
# TODO: make an interface to pass this in if it has already been computed
# elsewhere
_logger.info("Running on clean data to initialize the report...")
packed = correctness_and_confidence(sess, model, x, y, batch_size=eval_batch_size,
devices=devices)
_logger.info("...done")
correctness, confidence = packed
_logger.info("Accuracy: " + str(correctness.mean()))
report = ConfidenceReport()
report['clean'] = ConfidenceReportEntry(correctness, confidence)
adv_x = x.copy()
for goal in goals:
bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs,
run_counts,
goal, report, report_path,
attack_batch_size=attack_batch_size, eval_batch_size=eval_batch_size)
# Many users will set `goals` to make this run forever, so the return
# statement is not the primary way to get information out.
return adv_x, run_counts |
<SYSTEM_TASK:>
Runs attack bundling, working on one specific AttackGoal.
<END_TASK>
<USER_TASK:>
Description:
def bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs,
run_counts,
goal, report, report_path,
attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE):
"""
Runs attack bundling, working on one specific AttackGoal.
This function is mostly intended to be called by `bundle_attacks`.
Reference: https://openreview.net/forum?id=H1g0piA9tQ
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param adv_x: numpy array containing the adversarial examples made so far
by earlier work in the bundling process
:param attack_configs: list of AttackConfigs to run
:param run_counts: dict mapping AttackConfigs to numpy arrays specifying
how many times they have been run on each example
:param goal: AttackGoal to run
:param report: ConfidenceReport
:param report_path: str, the path the report will be saved to
:param attack_batch_size: int, batch size for generating adversarial examples
:param eval_batch_size: int, batch size for evaluating the model on adversarial examples
""" |
goal.start(run_counts)
_logger.info("Running criteria for new goal...")
criteria = goal.get_criteria(sess, model, adv_x, y, batch_size=eval_batch_size)
assert 'correctness' in criteria
_logger.info("Accuracy: " + str(criteria['correctness'].mean()))
assert 'confidence' in criteria
while not goal.is_satisfied(criteria, run_counts):
run_batch_with_goal(sess, model, x, y, adv_x, criteria, attack_configs,
run_counts,
goal, report, report_path,
attack_batch_size=attack_batch_size)
# Save after finishing all goals.
# The incremental saves run on a timer. This save is needed so that the last
# few attacks after the timer don't get discarded
report.completed = True
save(criteria, report, report_path, adv_x) |
<SYSTEM_TASK:>
Runs attack bundling on one batch of data.
<END_TASK>
<USER_TASK:>
Description:
def run_batch_with_goal(sess, model, x, y, adv_x_val, criteria, attack_configs,
run_counts, goal, report, report_path,
attack_batch_size=BATCH_SIZE):
"""
Runs attack bundling on one batch of data.
This function is mostly intended to be called by
`bundle_attacks_with_goal`.
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param adv_x_val: numpy array containing the adversarial examples made so far
by earlier work in the bundling process
:param criteria: dict mapping string names of criteria to numpy arrays with
their values for each example
(Different AttackGoals track different criteria)
:param run_counts: dict mapping AttackConfigs to numpy arrays reporting how
many times they have been run on each example
:param goal: the AttackGoal to work on
:param report: dict, see `bundle_attacks_with_goal`
:param report_path: str, path to save the report to
""" |
attack_config = goal.get_attack_config(attack_configs, run_counts, criteria)
idxs = goal.request_examples(attack_config, criteria, run_counts,
attack_batch_size)
x_batch = x[idxs]
assert x_batch.shape[0] == attack_batch_size
y_batch = y[idxs]
assert y_batch.shape[0] == attack_batch_size
adv_x_batch = run_attack(sess, model, x_batch, y_batch,
attack_config.attack, attack_config.params,
attack_batch_size, devices, pass_y=attack_config.pass_y)
criteria_batch = goal.get_criteria(sess, model, adv_x_batch, y_batch,
batch_size=min(attack_batch_size,
BATCH_SIZE))
# This can't be parallelized because some orig examples are copied more
# than once into the batch
cur_run_counts = run_counts[attack_config]
for batch_idx, orig_idx in enumerate(idxs):
cur_run_counts[orig_idx] += 1
should_copy = goal.new_wins(criteria, orig_idx, criteria_batch, batch_idx)
if should_copy:
adv_x_val[orig_idx] = adv_x_batch[batch_idx]
for key in criteria:
criteria[key][orig_idx] = criteria_batch[key][batch_idx]
assert np.allclose(y[orig_idx], y_batch[batch_idx])
report['bundled'] = ConfidenceReportEntry(criteria['correctness'], criteria['confidence'])
should_save = False
new_time = time.time()
if hasattr(report, 'time'):
if new_time - report.time > REPORT_TIME_INTERVAL:
should_save = True
else:
should_save = True
if should_save:
report.time = new_time
goal.print_progress(criteria, run_counts)
save(criteria, report, report_path, adv_x_val) |
<SYSTEM_TASK:>
A post-processor version of attack bundling, that chooses the strongest
<END_TASK>
<USER_TASK:>
Description:
def bundle_examples_with_goal(sess, model, adv_x_list, y, goal,
report_path, batch_size=BATCH_SIZE):
"""
A post-processor version of attack bundling, that chooses the strongest
example from the output of multiple earlier bundling strategies.
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param adv_x_list: list of numpy arrays
Each entry in the list is the output of a previous bundler; it is an
adversarial version of the whole dataset.
:param y: numpy array containing true labels
:param goal: AttackGoal to use to choose the best version of each adversarial
example
:param report_path: str, the path the report will be saved to
:param batch_size: int, batch size
""" |
# Check the input
num_attacks = len(adv_x_list)
assert num_attacks > 0
adv_x_0 = adv_x_list[0]
assert isinstance(adv_x_0, np.ndarray)
assert all(adv_x.shape == adv_x_0.shape for adv_x in adv_x_list)
# Allocate the output
out = np.zeros_like(adv_x_0)
m = adv_x_0.shape[0]
# Initialize with negative sentinel values to make sure everything is
# written to
correctness = -np.ones(m, dtype='int32')
confidence = -np.ones(m, dtype='float32')
# Gather criteria
criteria = [goal.get_criteria(sess, model, adv_x, y, batch_size=batch_size) for adv_x in adv_x_list]
assert all('correctness' in c for c in criteria)
assert all('confidence' in c for c in criteria)
_logger.info("Accuracy on each advx dataset: ")
for c in criteria:
_logger.info("\t" + str(c['correctness'].mean()))
for example_idx in range(m):
# Index of the best attack for this example
attack_idx = 0
# Find the winner
for candidate_idx in range(1, num_attacks):
if goal.new_wins(criteria[attack_idx], example_idx,
criteria[candidate_idx], example_idx):
attack_idx = candidate_idx
# Copy the winner into the output
out[example_idx] = adv_x_list[attack_idx][example_idx]
correctness[example_idx] = criteria[attack_idx]['correctness'][example_idx]
confidence[example_idx] = criteria[attack_idx]['confidence'][example_idx]
assert correctness.min() >= 0
assert correctness.max() <= 1
assert confidence.min() >= 0.
assert confidence.max() <= 1.
correctness = correctness.astype('bool')
_logger.info("Accuracy on bundled examples: " + str(correctness.mean()))
report = ConfidenceReport()
report['bundled'] = ConfidenceReportEntry(correctness, confidence)
serial.save(report_path, report)
assert report_path.endswith('.joblib')
adv_x_path = report_path[:-len('.joblib')] + "_adv_x.npy"
np.save(adv_x_path, out) |
<SYSTEM_TASK:>
Runs the MaxConfidence attack using SPSA as the underlying optimizer.
<END_TASK>
<USER_TASK:>
Description:
def spsa_max_confidence_recipe(sess, model, x, y, nb_classes, eps,
clip_min, clip_max, nb_iter,
report_path,
spsa_samples=SPSA.DEFAULT_SPSA_SAMPLES,
spsa_iters=SPSA.DEFAULT_SPSA_ITERS,
eval_batch_size=BATCH_SIZE):
"""Runs the MaxConfidence attack using SPSA as the underlying optimizer.
Even though this runs only one attack, it must be implemented as a bundler
because SPSA supports only batch_size=1. The cleverhans.attacks.MaxConfidence
attack internally multiplies the batch size by nb_classes, so it can't take
SPSA as a base attacker. Insteader, we must bundle batch_size=1 calls using
cleverhans.attack_bundling.MaxConfidence.
References:
https://openreview.net/forum?id=H1g0piA9tQ
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing clean example inputs to attack
:param y: numpy array containing true labels
:param nb_classes: int, number of classes
:param eps: float, maximum size of perturbation (measured by max norm)
:param nb_iter: int, number of iterations for one version of PGD attacks
(will also run another version with 25X more iterations)
:param report_path: str, the path that the report will be saved to.
:param eval_batch_size: int, batch size for evaluation (as opposed to making attacks)
""" |
spsa = SPSA(model, sess)
spsa_params = {"eps": eps, "clip_min" : clip_min, "clip_max" : clip_max,
"nb_iter": nb_iter, "spsa_samples": spsa_samples,
"spsa_iters": spsa_iters}
attack_configs = []
dev_batch_size = 1 # The only batch size supported by SPSA
batch_size = num_devices
ones = tf.ones(dev_batch_size, tf.int32)
for cls in range(nb_classes):
cls_params = copy.copy(spsa_params)
cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes))
cls_attack_config = AttackConfig(spsa, cls_params, "spsa_" + str(cls))
attack_configs.append(cls_attack_config)
new_work_goal = {config: 1 for config in attack_configs}
goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)]
bundle_attacks(sess, model, x, y, attack_configs, goals, report_path,
attack_batch_size=batch_size, eval_batch_size=eval_batch_size) |
<SYSTEM_TASK:>
Returns a dictionary mapping the name of each criterion to a NumPy
<END_TASK>
<USER_TASK:>
Description:
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE):
"""
Returns a dictionary mapping the name of each criterion to a NumPy
array containing the value of that criterion for each adversarial
example.
Subclasses can add extra criteria by implementing the `extra_criteria`
method.
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param adv_x: numpy array containing the adversarial examples made so far
by earlier work in the bundling process
:param y: numpy array containing true labels
:param batch_size: int, batch size
""" |
names, factory = self.extra_criteria()
factory = _CriteriaFactory(model, factory)
results = batch_eval_multi_worker(sess, factory, [advx, y],
batch_size=batch_size, devices=devices)
names = ['correctness', 'confidence'] + names
out = dict(safe_zip(names, results))
return out |
<SYSTEM_TASK:>
Returns a numpy array of integer example indices to run in the next batch.
<END_TASK>
<USER_TASK:>
Description:
def request_examples(self, attack_config, criteria, run_counts, batch_size):
"""
Returns a numpy array of integer example indices to run in the next batch.
""" |
raise NotImplementedError(str(type(self)) +
"needs to implement request_examples") |
<SYSTEM_TASK:>
Return run counts only for examples that are still correctly classified
<END_TASK>
<USER_TASK:>
Description:
def filter(self, run_counts, criteria):
"""
Return run counts only for examples that are still correctly classified
""" |
correctness = criteria['correctness']
assert correctness.dtype == np.bool
filtered_counts = deep_copy(run_counts)
for key in filtered_counts:
filtered_counts[key] = filtered_counts[key][correctness]
return filtered_counts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.