text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return the counts for only those examples that are below the threshold
<END_TASK>
<USER_TASK:>
Description:
def filter(self, run_counts, criteria):
"""
Return the counts for only those examples that are below the threshold
""" |
wrong_confidence = criteria['wrong_confidence']
below_t = wrong_confidence <= self.t
filtered_counts = deep_copy(run_counts)
for key in filtered_counts:
filtered_counts[key] = filtered_counts[key][below_t]
return filtered_counts |
<SYSTEM_TASK:>
Clip an image, or an image batch, with upper and lower threshold.
<END_TASK>
<USER_TASK:>
Description:
def clip_image(image, clip_min, clip_max):
""" Clip an image, or an image batch, with upper and lower threshold. """ |
return np.minimum(np.maximum(clip_min, image), clip_max) |
<SYSTEM_TASK:>
Compute the distance between two images.
<END_TASK>
<USER_TASK:>
Description:
def compute_distance(x_ori, x_pert, constraint='l2'):
""" Compute the distance between two images. """ |
if constraint == 'l2':
dist = np.linalg.norm(x_ori - x_pert)
elif constraint == 'linf':
dist = np.max(abs(x_ori - x_pert))
return dist |
<SYSTEM_TASK:>
Gradient direction estimation
<END_TASK>
<USER_TASK:>
Description:
def approximate_gradient(decision_function, sample, num_evals,
delta, constraint, shape, clip_min, clip_max):
""" Gradient direction estimation """ |
# Generate random vectors.
noise_shape = [num_evals] + list(shape)
if constraint == 'l2':
rv = np.random.randn(*noise_shape)
elif constraint == 'linf':
rv = np.random.uniform(low=-1, high=1, size=noise_shape)
axis = tuple(range(1, 1 + len(shape)))
rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True))
perturbed = sample + delta * rv
perturbed = clip_image(perturbed, clip_min, clip_max)
rv = (perturbed - sample) / delta
# query the model.
decisions = decision_function(perturbed)
decision_shape = [len(decisions)] + [1] * len(shape)
fval = 2 * decisions.astype(np_dtype).reshape(decision_shape) - 1.0
# Baseline subtraction (when fval differs)
if np.mean(fval) == 1.0: # label changes.
gradf = np.mean(rv, axis=0)
elif np.mean(fval) == -1.0: # label not change.
gradf = - np.mean(rv, axis=0)
else:
fval = fval - np.mean(fval)
gradf = np.mean(fval * rv, axis=0)
# Get the gradient direction.
gradf = gradf / np.linalg.norm(gradf)
return gradf |
<SYSTEM_TASK:>
Binary search to approach the boundary.
<END_TASK>
<USER_TASK:>
Description:
def binary_search_batch(original_image, perturbed_images, decision_function,
shape, constraint, theta):
""" Binary search to approach the boundary. """ |
# Compute distance between each of perturbed image and original image.
dists_post_update = np.array([
compute_distance(
original_image,
perturbed_image,
constraint
)
for perturbed_image in perturbed_images])
# Choose upper thresholds in binary searchs based on constraint.
if constraint == 'linf':
highs = dists_post_update
# Stopping criteria.
thresholds = np.minimum(dists_post_update * theta, theta)
else:
highs = np.ones(len(perturbed_images))
thresholds = theta
lows = np.zeros(len(perturbed_images))
while np.max((highs - lows) / thresholds) > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_images = project(original_image, perturbed_images,
mids, shape, constraint)
# Update highs and lows based on model decisions.
decisions = decision_function(mid_images)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs)
out_images = project(original_image, perturbed_images,
highs, shape, constraint)
# Compute distance of the output image to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = np.array([
compute_distance(
original_image,
out_image,
constraint
)
for out_image in out_images])
idx = np.argmin(dists)
dist = dists_post_update[idx]
out_image = out_images[idx]
return out_image, dist |
<SYSTEM_TASK:>
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
<END_TASK>
<USER_TASK:>
Description:
def initialize(decision_function, sample, shape, clip_min, clip_max):
"""
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
""" |
success = 0
num_evals = 0
# Find a misclassified random noise.
while True:
random_noise = np.random.uniform(clip_min, clip_max, size=shape)
success = decision_function(random_noise[None])[0]
if success:
break
num_evals += 1
message = "Initialization failed! Try to use a misclassified image as `target_image`"
assert num_evals < 1e4, message
# Binary search to minimize l2 distance to original image.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid) * sample + mid * random_noise
success = decision_function(blended[None])[0]
if success:
high = mid
else:
low = mid
initialization = (1 - high) * sample + high * random_noise
return initialization |
<SYSTEM_TASK:>
Geometric progression to search for stepsize.
<END_TASK>
<USER_TASK:>
Description:
def geometric_progression_for_stepsize(x, update, dist, decision_function,
current_iteration):
""" Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
""" |
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = x + epsilon * update
success = decision_function(updated[None])[0]
if success:
break
else:
epsilon = epsilon / 2.0
return epsilon |
<SYSTEM_TASK:>
Choose the delta at the scale of distance
<END_TASK>
<USER_TASK:>
Description:
def select_delta(dist_post_update, current_iteration,
clip_max, clip_min, d, theta, constraint):
"""
Choose the delta at the scale of distance
between x and perturbed sample.
""" |
if current_iteration == 1:
delta = 0.1 * (clip_max - clip_min)
else:
if constraint == 'l2':
delta = np.sqrt(d) * theta * dist_post_update
elif constraint == 'linf':
delta = d * theta * dist_post_update
return delta |
<SYSTEM_TASK:>
TensorFlow implementation of the Fast Feature Gradient. This is a
<END_TASK>
<USER_TASK:>
Description:
def attack_single_step(self, x, eta, g_feat):
"""
TensorFlow implementation of the Fast Feature Gradient. This is a
single step attack similar to Fast Gradient Method that attacks an
internal representation.
:param x: the input placeholder
:param eta: A tensor the same shape as x that holds the perturbation.
:param g_feat: model's internal tensor for guide
:return: a tensor for the adversarial example
""" |
adv_x = x + eta
a_feat = self.model.fprop(adv_x)[self.layer]
# feat.shape = (batch, c) or (batch, w, h, c)
axis = list(range(1, len(a_feat.shape)))
# Compute loss
# This is a targeted attack, hence the negative sign
loss = -reduce_sum(tf.square(a_feat - g_feat), axis)
# Define gradient of loss wrt input
grad, = tf.gradients(loss, adv_x)
# Multiply by constant epsilon
scaled_signed_grad = self.eps_iter * tf.sign(grad)
# Add perturbation to original example to obtain adversarial example
adv_x = adv_x + scaled_signed_grad
# If clipping is needed,
# reset all values outside of [clip_min, clip_max]
if (self.clip_min is not None) and (self.clip_max is not None):
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
adv_x = tf.stop_gradient(adv_x)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return eta |
<SYSTEM_TASK:>
Creates the Inception Resnet V2 model.
<END_TASK>
<USER_TASK:>
Description:
def inception_resnet_v2(inputs, nb_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2',
create_aux_logits=True,
num_classes=None):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
nb_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
num_classes: depricated alias for nb_classes
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
""" |
if num_classes is not None:
warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, nb_classes],
reuse=reuse) as var_scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_resnet_v2_base(inputs, scope=var_scope)
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, nb_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, nb_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points |
<SYSTEM_TASK:>
Returns the scope with the default parameters for inception_resnet_v2.
<END_TASK>
<USER_TASK:>
Description:
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
""" |
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope |
<SYSTEM_TASK:>
Validate all submissions and copy them into place
<END_TASK>
<USER_TASK:>
Description:
def main(args):
"""Validate all submissions and copy them into place""" |
random.seed()
temp_dir = tempfile.mkdtemp()
logging.info('Created temporary directory: %s', temp_dir)
validator = SubmissionValidator(
source_dir=args.source_dir,
target_dir=args.target_dir,
temp_dir=temp_dir,
do_copy=args.copy,
use_gpu=args.use_gpu,
containers_file=args.containers_file)
validator.run()
logging.info('Deleting temporary directory: %s', temp_dir)
subprocess.call(['rm', '-rf', temp_dir]) |
<SYSTEM_TASK:>
Copies submission from Google Cloud Storage to local directory.
<END_TASK>
<USER_TASK:>
Description:
def copy_submission_locally(self, cloud_path):
"""Copies submission from Google Cloud Storage to local directory.
Args:
cloud_path: path of the submission in Google Cloud Storage
Returns:
name of the local file where submission is copied to
""" |
local_path = os.path.join(self.download_dir, os.path.basename(cloud_path))
cmd = ['gsutil', 'cp', cloud_path, local_path]
if subprocess.call(cmd) != 0:
logging.error('Can\'t copy submission locally')
return None
return local_path |
<SYSTEM_TASK:>
Copies submission to target directory.
<END_TASK>
<USER_TASK:>
Description:
def copy_submission_to_destination(self, src_filename, dst_subdir,
submission_id):
"""Copies submission to target directory.
Args:
src_filename: source filename of the submission
dst_subdir: subdirectory of the target directory where submission should
be copied to
submission_id: ID of the submission, will be used as a new
submission filename (before extension)
""" |
extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]
if len(extension) != 1:
logging.error('Invalid submission extension: %s', src_filename)
return
dst_filename = os.path.join(self.target_dir, dst_subdir,
submission_id + extension[0])
cmd = ['gsutil', 'cp', src_filename, dst_filename]
if subprocess.call(cmd) != 0:
logging.error('Can\'t copy submission to destination')
else:
logging.info('Submission copied to: %s', dst_filename) |
<SYSTEM_TASK:>
Validates one submission and copies it to target directory.
<END_TASK>
<USER_TASK:>
Description:
def validate_and_copy_one_submission(self, submission_path):
"""Validates one submission and copies it to target directory.
Args:
submission_path: path in Google Cloud Storage of the submission file
""" |
if os.path.exists(self.download_dir):
shutil.rmtree(self.download_dir)
os.makedirs(self.download_dir)
if os.path.exists(self.validate_dir):
shutil.rmtree(self.validate_dir)
os.makedirs(self.validate_dir)
logging.info('\n' + ('#' * 80) + '\n# Processing submission: %s\n'
+ '#' * 80, submission_path)
local_path = self.copy_submission_locally(submission_path)
metadata = self.base_validator.validate_submission(local_path)
if not metadata:
logging.error('Submission "%s" is INVALID', submission_path)
self.stats.add_failure()
return
submission_type = metadata['type']
container_name = metadata['container_gpu']
logging.info('Submission "%s" is VALID', submission_path)
self.list_of_containers.add(container_name)
self.stats.add_success(submission_type)
if self.do_copy:
submission_id = '{0:04}'.format(self.cur_submission_idx)
self.cur_submission_idx += 1
self.copy_submission_to_destination(submission_path,
TYPE_TO_DIR[submission_type],
submission_id)
self.id_to_path_mapping[submission_id] = submission_path |
<SYSTEM_TASK:>
Saves mapping from submission IDs to original filenames.
<END_TASK>
<USER_TASK:>
Description:
def save_id_to_path_mapping(self):
"""Saves mapping from submission IDs to original filenames.
This mapping is saved as CSV file into target directory.
""" |
if not self.id_to_path_mapping:
return
with open(self.local_id_to_path_mapping_file, 'w') as f:
writer = csv.writer(f)
writer.writerow(['id', 'path'])
for k, v in sorted(iteritems(self.id_to_path_mapping)):
writer.writerow([k, v])
cmd = ['gsutil', 'cp', self.local_id_to_path_mapping_file,
os.path.join(self.target_dir, 'id_to_path_mapping.csv')]
if subprocess.call(cmd) != 0:
logging.error('Can\'t copy id_to_path_mapping.csv to target directory') |
<SYSTEM_TASK:>
Runs validation of all submissions.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Runs validation of all submissions.""" |
cmd = ['gsutil', 'ls', os.path.join(self.source_dir, '**')]
try:
files_list = subprocess.check_output(cmd).split('\n')
except subprocess.CalledProcessError:
logging.error('Can''t read source directory')
all_submissions = [
s for s in files_list
if s.endswith('.zip') or s.endswith('.tar') or s.endswith('.tar.gz')
]
for submission_path in all_submissions:
self.validate_and_copy_one_submission(submission_path)
self.stats.log_stats()
self.save_id_to_path_mapping()
if self.containers_file:
with open(self.containers_file, 'w') as f:
f.write('\n'.join(sorted(self.list_of_containers))) |
<SYSTEM_TASK:>
Takes the path to a directory with reports and renders success fail plots.
<END_TASK>
<USER_TASK:>
Description:
def main(argv=None):
"""Takes the path to a directory with reports and renders success fail plots.""" |
report_paths = argv[1:]
fail_names = FLAGS.fail_names.split(',')
for report_path in report_paths:
plot_report_from_path(report_path, label=report_path, fail_names=fail_names)
pyplot.legend()
pyplot.xlim(-.01, 1.)
pyplot.ylim(0., 1.)
pyplot.show() |
<SYSTEM_TASK:>
Returns True if work piece is unclaimed.
<END_TASK>
<USER_TASK:>
Description:
def is_unclaimed(work):
"""Returns True if work piece is unclaimed.""" |
if work['is_completed']:
return False
cutoff_time = time.time() - MAX_PROCESSING_TIME
if (work['claimed_worker_id'] and
work['claimed_worker_start_time'] is not None
and work['claimed_worker_start_time'] >= cutoff_time):
return False
return True |
<SYSTEM_TASK:>
Writes all work pieces into datastore.
<END_TASK>
<USER_TASK:>
Description:
def write_all_to_datastore(self):
"""Writes all work pieces into datastore.
Each work piece is identified by ID. This method writes/updates only those
work pieces which IDs are stored in this class. For examples, if this class
has only work pieces with IDs '1' ... '100' and datastore already contains
work pieces with IDs '50' ... '200' then this method will create new
work pieces with IDs '1' ... '49', update work pieces with IDs
'50' ... '100' and keep unchanged work pieces with IDs '101' ... '200'.
""" |
client = self._datastore_client
with client.no_transact_batch() as batch:
parent_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id)
batch.put(client.entity(parent_key))
for work_id, work_val in iteritems(self._work):
entity = client.entity(client.key(KIND_WORK, work_id,
parent=parent_key))
entity.update(work_val)
batch.put(entity) |
<SYSTEM_TASK:>
Reads all work pieces from the datastore.
<END_TASK>
<USER_TASK:>
Description:
def read_all_from_datastore(self):
"""Reads all work pieces from the datastore.""" |
self._work = {}
client = self._datastore_client
parent_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id)
for entity in client.query_fetch(kind=KIND_WORK, ancestor=parent_key):
work_id = entity.key.flat_path[-1]
self.work[work_id] = dict(entity) |
<SYSTEM_TASK:>
Reads undone worke pieces which are assigned to shard with given id.
<END_TASK>
<USER_TASK:>
Description:
def _read_undone_shard_from_datastore(self, shard_id=None):
"""Reads undone worke pieces which are assigned to shard with given id.""" |
self._work = {}
client = self._datastore_client
parent_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id)
filters = [('is_completed', '=', False)]
if shard_id is not None:
filters.append(('shard_id', '=', shard_id))
for entity in client.query_fetch(kind=KIND_WORK, ancestor=parent_key,
filters=filters):
work_id = entity.key.flat_path[-1]
self.work[work_id] = dict(entity)
if len(self._work) >= MAX_WORK_RECORDS_READ:
break |
<SYSTEM_TASK:>
Reads undone work from the datastore.
<END_TASK>
<USER_TASK:>
Description:
def read_undone_from_datastore(self, shard_id=None, num_shards=None):
"""Reads undone work from the datastore.
If shard_id and num_shards are specified then this method will attempt
to read undone work for shard with id shard_id. If no undone work was found
then it will try to read shard (shard_id+1) and so on until either found
shard with undone work or all shards are read.
Args:
shard_id: Id of the start shard
num_shards: total number of shards
Returns:
id of the shard with undone work which was read. None means that work
from all datastore was read.
""" |
if shard_id is not None:
shards_list = [(i + shard_id) % num_shards for i in range(num_shards)]
else:
shards_list = []
shards_list.append(None)
for shard in shards_list:
self._read_undone_shard_from_datastore(shard)
if self._work:
return shard
return None |
<SYSTEM_TASK:>
Tries pick next unclaimed piece of work to do.
<END_TASK>
<USER_TASK:>
Description:
def try_pick_piece_of_work(self, worker_id, submission_id=None):
"""Tries pick next unclaimed piece of work to do.
Attempt to claim work piece is done using Cloud Datastore transaction, so
only one worker can claim any work piece at a time.
Args:
worker_id: ID of current worker
submission_id: if not None then this method will try to pick
piece of work for this submission
Returns:
ID of the claimed work piece
""" |
client = self._datastore_client
unclaimed_work_ids = None
if submission_id:
unclaimed_work_ids = [
k for k, v in iteritems(self.work)
if is_unclaimed(v) and (v['submission_id'] == submission_id)
]
if not unclaimed_work_ids:
unclaimed_work_ids = [k for k, v in iteritems(self.work)
if is_unclaimed(v)]
if unclaimed_work_ids:
next_work_id = random.choice(unclaimed_work_ids)
else:
return None
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
KIND_WORK, next_work_id)
work_entity = client.get(work_key, transaction=transaction)
if not is_unclaimed(work_entity):
return None
work_entity['claimed_worker_id'] = worker_id
work_entity['claimed_worker_start_time'] = get_integer_time()
transaction.put(work_entity)
except Exception:
return None
return next_work_id |
<SYSTEM_TASK:>
Updates work piece in datastore as completed.
<END_TASK>
<USER_TASK:>
Description:
def update_work_as_completed(self, worker_id, work_id, other_values=None,
error=None):
"""Updates work piece in datastore as completed.
Args:
worker_id: ID of the worker which did the work
work_id: ID of the work which was done
other_values: dictionary with additonal values which should be saved
with the work piece
error: if not None then error occurred during computation of the work
piece. In such case work will be marked as completed with error.
Returns:
whether work was successfully updated
""" |
client = self._datastore_client
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
KIND_WORK, work_id)
work_entity = client.get(work_key, transaction=transaction)
if work_entity['claimed_worker_id'] != worker_id:
return False
work_entity['is_completed'] = True
if other_values:
work_entity.update(other_values)
if error:
work_entity['error'] = text_type(error)
transaction.put(work_entity)
except Exception:
return False
return True |
<SYSTEM_TASK:>
Computes statistics from all work pieces stored in this class.
<END_TASK>
<USER_TASK:>
Description:
def compute_work_statistics(self):
"""Computes statistics from all work pieces stored in this class.""" |
result = {}
for v in itervalues(self.work):
submission_id = v['submission_id']
if submission_id not in result:
result[submission_id] = {
'completed': 0,
'num_errors': 0,
'error_messages': set(),
'eval_times': [],
'min_eval_time': None,
'max_eval_time': None,
'mean_eval_time': None,
'median_eval_time': None,
}
if not v['is_completed']:
continue
result[submission_id]['completed'] += 1
if 'error' in v and v['error']:
result[submission_id]['num_errors'] += 1
result[submission_id]['error_messages'].add(v['error'])
else:
result[submission_id]['eval_times'].append(float(v['elapsed_time']))
for v in itervalues(result):
if v['eval_times']:
v['min_eval_time'] = np.min(v['eval_times'])
v['max_eval_time'] = np.max(v['eval_times'])
v['mean_eval_time'] = np.mean(v['eval_times'])
v['median_eval_time'] = np.median(v['eval_times'])
return result |
<SYSTEM_TASK:>
Initializes work pieces from adversarial batches.
<END_TASK>
<USER_TASK:>
Description:
def init_from_adversarial_batches(self, adv_batches):
"""Initializes work pieces from adversarial batches.
Args:
adv_batches: dict with adversarial batches,
could be obtained as AversarialBatches.data
""" |
for idx, (adv_batch_id, adv_batch_val) in enumerate(iteritems(adv_batches)):
work_id = ATTACK_WORK_ID_PATTERN.format(idx)
self.work[work_id] = {
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False,
'error': None,
'elapsed_time': None,
'submission_id': adv_batch_val['submission_id'],
'shard_id': None,
'output_adversarial_batch_id': adv_batch_id,
} |
<SYSTEM_TASK:>
Initializes work pieces from classification batches.
<END_TASK>
<USER_TASK:>
Description:
def init_from_class_batches(self, class_batches, num_shards=None):
"""Initializes work pieces from classification batches.
Args:
class_batches: dict with classification batches, could be obtained
as ClassificationBatches.data
num_shards: number of shards to split data into,
if None then no sharding is done.
""" |
shards_for_submissions = {}
shard_idx = 0
for idx, (batch_id, batch_val) in enumerate(iteritems(class_batches)):
work_id = DEFENSE_WORK_ID_PATTERN.format(idx)
submission_id = batch_val['submission_id']
shard_id = None
if num_shards:
shard_id = shards_for_submissions.get(submission_id)
if shard_id is None:
shard_id = shard_idx % num_shards
shards_for_submissions[submission_id] = shard_id
shard_idx += 1
# Note: defense also might have following fields populated by worker:
# stat_correct, stat_error, stat_target_class, stat_num_images
self.work[work_id] = {
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False,
'error': None,
'elapsed_time': None,
'submission_id': submission_id,
'shard_id': shard_id,
'output_classification_batch_id': batch_id,
} |
<SYSTEM_TASK:>
Returns the graph for Fast Gradient Method adversarial examples.
<END_TASK>
<USER_TASK:>
Description:
def generate(self, x, **kwargs):
"""
Returns the graph for Fast Gradient Method adversarial examples.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
""" |
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
return fgm(
x,
self.model.get_logits(x),
y=labels,
eps=self.eps,
ord=self.ord,
clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks) |
<SYSTEM_TASK:>
Function to read the weights from checkpoint based on json description.
<END_TASK>
<USER_TASK:>
Description:
def load_network_from_checkpoint(checkpoint, model_json, input_shape=None):
"""Function to read the weights from checkpoint based on json description.
Args:
checkpoint: tensorflow checkpoint with trained model to
verify
model_json: path of json file with model description of
the network list of dictionary items for each layer
containing 'type', 'weight_var', 'bias_var' and
'is_transpose' 'type'is one of {'ff', 'ff_relu' or
'conv'}; 'weight_var' is the name of tf variable for
weights of layer i; 'bias_var' is the name of tf
variable for bias of layer i; 'is_transpose' is set to
True if the weights have to be transposed as per
convention Note that last layer is always feedforward
net_weights: list of numpy matrices of weights of each layer
convention: x[i+1] = W[i] x[i]
net_biases: list of numpy arrays of biases of each layer
net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv'
or 'ff_conv_relu']
'ff': Simple feedforward layer with no activations
'ff_relu': Simple feedforward layer with ReLU activations
'ff_conv': Convolution layer with no activation
'ff_conv_relu': Convolution layer with ReLU activation
Raises:
ValueError: If layer_types are invalid or variable names
not found in checkpoint
""" |
# Load checkpoint
reader = tf.train.load_checkpoint(checkpoint)
variable_map = reader.get_variable_to_shape_map()
checkpoint_variable_names = variable_map.keys()
# Parse JSON file for names
with tf.gfile.Open(model_json) as f:
list_model_var = json.load(f)
net_layer_types = []
net_weights = []
net_biases = []
cnn_params = []
# Checking validity of the input and adding to list
for layer_model_var in list_model_var:
if layer_model_var['type'] not in {'ff', 'ff_relu', 'conv'}:
raise ValueError('Invalid layer type in description')
if (layer_model_var['weight_var'] not in checkpoint_variable_names or
layer_model_var['bias_var'] not in checkpoint_variable_names):
raise ValueError('Variable names not found in checkpoint')
net_layer_types.append(layer_model_var['type'])
layer_weight = reader.get_tensor(layer_model_var['weight_var'])
layer_bias = reader.get_tensor(layer_model_var['bias_var'])
# TODO(aditirag): is there a way to automatically check when to transpose
# We want weights W such that x^{i+1} = W^i x^i + b^i
# Can think of a hack involving matching shapes but if shapes are equal
# it can be ambiguous
if layer_model_var['type'] in {'ff', 'ff_relu'}:
layer_weight = np.transpose(layer_weight)
cnn_params.append(None)
if layer_model_var['type'] in {'conv'}:
if 'stride' not in layer_model_var or 'padding' not in layer_model_var:
raise ValueError('Please define stride and padding for conv layers.')
cnn_params.append({'stride': layer_model_var['stride'], 'padding': layer_model_var['padding']})
net_weights.append(layer_weight)
net_biases.append(np.reshape(layer_bias, (np.size(layer_bias), 1)))
return NeuralNetwork(net_weights, net_biases, net_layer_types, input_shape, cnn_params) |
<SYSTEM_TASK:>
Performs forward pass through the layer weights at layer_index.
<END_TASK>
<USER_TASK:>
Description:
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):
"""Performs forward pass through the layer weights at layer_index.
Args:
vector: vector that has to be passed through in forward pass
layer_index: index of the layer
is_transpose: whether the weights of the layer have to be transposed
is_abs: whether to take the absolute value of the weights
Returns:
tensor that corresponds to the forward pass through the layer
Raises:
ValueError: if the layer_index is negative or more than num hidden layers
""" |
if(layer_index < 0 or layer_index > self.num_hidden_layers):
raise ValueError('Invalid layer index')
layer_type = self.layer_types[layer_index]
weight = self.weights[layer_index]
if is_abs:
weight = tf.abs(weight)
if is_transpose:
vector = tf.reshape(vector, self.output_shapes[layer_index])
else:
vector = tf.reshape(vector, self.input_shapes[layer_index])
if layer_type in {'ff', 'ff_relu'}:
if is_transpose:
weight = tf.transpose(weight)
return_vector = tf.matmul(weight, vector)
elif layer_type in {'conv', 'conv_relu'}:
if is_transpose:
return_vector = tf.nn.conv2d_transpose(vector,
weight,
output_shape=self.input_shapes[layer_index],
strides=[1, self.cnn_params[layer_index]['stride'],
self.cnn_params[layer_index]['stride'], 1],
padding=self.cnn_params[layer_index]['padding'])
else:
return_vector = tf.nn.conv2d(vector,
weight,
strides=[1, self.cnn_params[layer_index]['stride'],
self.cnn_params[layer_index]['stride'], 1],
padding=self.cnn_params[layer_index]['padding'])
else:
raise NotImplementedError('Unsupported layer type: {0}'.format(layer_type))
if is_transpose:
return tf.reshape(return_vector, (self.sizes[layer_index], 1))
return tf.reshape(return_vector, (self.sizes[layer_index + 1], 1)) |
<SYSTEM_TASK:>
Returns a hexdigest of all the python files in the module.
<END_TASK>
<USER_TASK:>
Description:
def dev_version():
"""
Returns a hexdigest of all the python files in the module.
""" |
md5_hash = hashlib.md5()
py_files = sorted(list_files(suffix=".py"))
if not py_files:
return ''
for filename in py_files:
with open(filename, 'rb') as fobj:
content = fobj.read()
md5_hash.update(content)
return md5_hash.hexdigest() |
<SYSTEM_TASK:>
Function to initialize the dual variables of the class.
<END_TASK>
<USER_TASK:>
Description:
def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
""" |
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var |
<SYSTEM_TASK:>
Computes eigenvector which corresponds to minimum eigenvalue.
<END_TASK>
<USER_TASK:>
Description:
def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):
"""Computes eigenvector which corresponds to minimum eigenvalue.
Args:
x: initial value of eigenvector.
num_steps: number of optimization steps.
learning_rate: learning rate.
vector_prod_fn: function which takes x and returns product H*x.
Returns:
approximate value of eigenvector.
This function finds approximate value of eigenvector of matrix H which
corresponds to smallest (by absolute value) eigenvalue of H.
It works by solving optimization problem x^{T}*H*x -> min.
""" |
x = tf.nn.l2_normalize(x)
for _ in range(num_steps):
x = eig_one_step(x, learning_rate, vector_prod_fn)
return x |
<SYSTEM_TASK:>
Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
<END_TASK>
<USER_TASK:>
Description:
def tf_lanczos_smallest_eigval(vector_prod_fn,
matrix_dim,
initial_vector,
num_iter=1000,
max_iter=1000,
collapse_tol=1e-9,
dtype=tf.float32):
"""Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
This function computes smallest eigenvector and eigenvalue of the matrix
which is implicitly specified by `vector_prod_fn`.
`vector_prod_fn` is a function which takes `x` and returns a product of matrix
in consideration and `x`.
Computation is done using Lanczos algorithm, see
https://en.wikipedia.org/wiki/Lanczos_algorithm#The_algorithm
Args:
vector_prod_fn: function which takes a vector as an input and returns
matrix vector product.
matrix_dim: dimentionality of the matrix.
initial_vector: guess vector to start the algorithm with
num_iter: user-defined number of iterations for the algorithm
max_iter: maximum number of iterations.
collapse_tol: tolerance to determine collapse of the Krylov subspace
dtype: type of data
Returns:
tuple of (eigenvalue, eigenvector) of smallest eigenvalue and corresponding
eigenvector.
""" |
# alpha will store diagonal elements
alpha = tf.TensorArray(dtype, size=1, dynamic_size=True, element_shape=())
# beta will store off diagonal elements
beta = tf.TensorArray(dtype, size=0, dynamic_size=True, element_shape=())
# q will store Krylov space basis
q_vectors = tf.TensorArray(
dtype, size=1, dynamic_size=True, element_shape=(matrix_dim, 1))
# If start vector is all zeros, make it a random normal vector and run for max_iter
if tf.norm(initial_vector) < collapse_tol:
initial_vector = tf.random_normal(shape=(matrix_dim, 1), dtype=dtype)
num_iter = max_iter
w = initial_vector / tf.norm(initial_vector)
# Iteration 0 of Lanczos
q_vectors = q_vectors.write(0, w)
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
alpha = alpha.write(0, cur_alpha)
w_ = w_ - tf.scalar_mul(cur_alpha, w)
w_prev = w
w = w_
# Subsequent iterations of Lanczos
for i in tf.range(1, num_iter):
cur_beta = tf.norm(w)
if cur_beta < collapse_tol:
# return early if Krylov subspace collapsed
break
# cur_beta is larger than collapse_tol,
# so division will return finite result.
w = w / cur_beta
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
q_vectors = q_vectors.write(i, w)
alpha = alpha.write(i, cur_alpha)
beta = beta.write(i-1, cur_beta)
w_ = w_ - tf.scalar_mul(cur_alpha, w) - tf.scalar_mul(cur_beta, w_prev)
w_prev = w
w = w_
alpha = alpha.stack()
beta = beta.stack()
q_vectors = tf.reshape(q_vectors.stack(), (-1, matrix_dim))
offdiag_submatrix = tf.linalg.diag(beta)
tridiag_matrix = (tf.linalg.diag(alpha)
+ tf.pad(offdiag_submatrix, [[0, 1], [1, 0]])
+ tf.pad(offdiag_submatrix, [[1, 0], [0, 1]]))
eigvals, eigvecs = tf.linalg.eigh(tridiag_matrix)
smallest_eigval = eigvals[0]
smallest_eigvec = tf.matmul(tf.reshape(eigvecs[:, 0], (1, -1)),
q_vectors)
smallest_eigvec = smallest_eigvec / tf.norm(smallest_eigvec)
smallest_eigvec = tf.reshape(smallest_eigvec, (matrix_dim, 1))
return smallest_eigval, smallest_eigvec |
<SYSTEM_TASK:>
Return a tensor that constructs adversarial examples for the given
<END_TASK>
<USER_TASK:>
Description:
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: A tensor with the inputs.
:param kwargs: See `parse_params`
""" |
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = CWL2(self.sess, self.model, self.batch_size, self.confidence,
'y_target' in kwargs, self.learning_rate,
self.binary_search_steps, self.max_iterations,
self.abort_early, self.initial_const, self.clip_min,
self.clip_max, nb_classes,
x.get_shape().as_list()[1:])
def cw_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap |
<SYSTEM_TASK:>
Perform the L_2 attack on the given instance for the given targets.
<END_TASK>
<USER_TASK:>
Description:
def attack(self, imgs, targets):
"""
Perform the L_2 attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
""" |
r = []
for i in range(0, len(imgs), self.batch_size):
_logger.debug(
("Running CWL2 attack on instance %s of %s", i, len(imgs)))
r.extend(
self.attack_batch(imgs[i:i + self.batch_size],
targets[i:i + self.batch_size]))
return np.array(r) |
<SYSTEM_TASK:>
Load model if present at the specified path.
<END_TASK>
<USER_TASK:>
Description:
def maybe_load_model(savedir, container):
"""Load model if present at the specified path.""" |
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(
state["num_iters"]))
return state |
<SYSTEM_TASK:>
Warn user if running cleverhans from a different directory than tutorial.
<END_TASK>
<USER_TASK:>
Description:
def check_installation(cur_file):
"""Warn user if running cleverhans from a different directory than tutorial.""" |
cur_dir = os.path.split(os.path.dirname(os.path.abspath(cur_file)))[0]
ch_dir = os.path.split(cleverhans.__path__[0])[0]
if cur_dir != ch_dir:
warnings.warn("It appears that you have at least two versions of "
"cleverhans installed, one at %s and one at"
" %s. You are running the tutorial script from the "
"former but python imported the library module from the "
"latter. This may cause errors, for example if the tutorial"
" version is newer than the library version and attempts to"
" call new features." % (cur_dir, ch_dir)) |
<SYSTEM_TASK:>
Downloads the image that corresponds to the given row.
<END_TASK>
<USER_TASK:>
Description:
def get_image(row, output_dir):
"""Downloads the image that corresponds to the given row.
Prints a notification if the download fails.""" |
if not download_image(image_id=row[0],
url=row[1],
x1=float(row[2]),
y1=float(row[3]),
x2=float(row[4]),
y2=float(row[5]),
output_dir=output_dir):
print("Download failed: " + str(row[0])) |
<SYSTEM_TASK:>
Downloads one image, crops it, resizes it and saves it locally.
<END_TASK>
<USER_TASK:>
Description:
def download_image(image_id, url, x1, y1, x2, y2, output_dir):
"""Downloads one image, crops it, resizes it and saves it locally.""" |
output_filename = os.path.join(output_dir, image_id + '.png')
if os.path.exists(output_filename):
# Don't download image if it's already there
return True
try:
# Download image
url_file = urlopen(url)
if url_file.getcode() != 200:
return False
image_buffer = url_file.read()
# Crop, resize and save image
image = Image.open(BytesIO(image_buffer)).convert('RGB')
w = image.size[0]
h = image.size[1]
image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w),
int(y2 * h)))
image = image.resize((299, 299), resample=Image.ANTIALIAS)
image.save(output_filename)
except IOError:
return False
return True |
<SYSTEM_TASK:>
Custom py_func with gradient support
<END_TASK>
<USER_TASK:>
Description:
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
"""Custom py_func with gradient support
""" |
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name) |
<SYSTEM_TASK:>
Get logits when the input is perturbed in an interval in adv direction.
<END_TASK>
<USER_TASK:>
Description:
def get_logits_over_interval(sess, model, x_data, fgsm_params,
min_epsilon=-10., max_epsilon=10.,
num_points=21):
"""Get logits when the input is perturbed in an interval in adv direction.
Args:
sess: Tf session
model: Model for which we wish to get logits.
x_data: Numpy array corresponding to single data.
point of shape [height, width, channels].
fgsm_params: Parameters for generating adversarial examples.
min_epsilon: Minimum value of epsilon over the interval.
max_epsilon: Maximum value of epsilon over the interval.
num_points: Number of points used to interpolate.
Returns:
Numpy array containing logits.
Raises:
ValueError if min_epsilon is larger than max_epsilon.
""" |
# Get the height, width and number of channels
height = x_data.shape[0]
width = x_data.shape[1]
channels = x_data.shape[2]
x_data = np.expand_dims(x_data, axis=0)
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
# Define the data placeholder
x = tf.placeholder(dtype=tf.float32,
shape=[1, height,
width,
channels],
name='x')
# Define adv_x
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
if min_epsilon > max_epsilon:
raise ValueError('Minimum epsilon is less than maximum epsilon')
eta = tf.nn.l2_normalize(adv_x - x, dim=0)
epsilon = tf.reshape(tf.lin_space(float(min_epsilon),
float(max_epsilon),
num_points),
(num_points, 1, 1, 1))
lin_batch = x + epsilon * eta
logits = model.get_logits(lin_batch)
with sess.as_default():
log_prob_adv_array = sess.run(logits,
feed_dict={x: x_data})
return log_prob_adv_array |
<SYSTEM_TASK:>
Generate linear extrapolation plot.
<END_TASK>
<USER_TASK:>
Description:
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
min_epsilon=-10, max_epsilon=10,
num_points=21):
"""Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
""" |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel('Epsilon')
plt.ylabel('Logits')
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim(min_epsilon - 1, max_epsilon + 1)
for i in range(y.shape[0]):
if i == correct_idx:
ls = '-'
linewidth = 5
else:
ls = '--'
linewidth = 2
plt.plot(
x_axis,
log_prob_adv_array[:, i],
ls=ls,
linewidth=linewidth,
label='{}'.format(i))
plt.legend(loc='best', fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure |
<SYSTEM_TASK:>
Request historical 5 minute data from DTN.
<END_TASK>
<USER_TASK:>
Description:
def get_historical_minute_data(self, ticker: str):
"""Request historical 5 minute data from DTN.""" |
start = self._start
stop = self._stop
if len(stop) > 4:
stop = stop[:4]
if len(start) > 4:
start = start[:4]
for year in range(int(start), int(stop) + 1):
beg_time = ('%s0101000000' % year)
end_time = ('%s1231235959' % year)
msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker,
beg_time,
end_time)
try:
data = iq.iq_query(message=msg)
iq.add_data_to_df(data=data)
except Exception as err:
log.error('No data returned because %s', err)
try:
self.dfdb.write_points(self._ndf, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err) |
<SYSTEM_TASK:>
Connect to window and set it foreground
<END_TASK>
<USER_TASK:>
Description:
def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
""" |
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None |
<SYSTEM_TASK:>
Get rectangle of app or desktop resolution
<END_TASK>
<USER_TASK:>
Description:
def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
""" |
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom) |
<SYSTEM_TASK:>
Take a screenshot and save it to `tmp.png` filename by default
<END_TASK>
<USER_TASK:>
Description:
def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
""" |
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img |
<SYSTEM_TASK:>
Return a constructor for a decoder for fields of a particular type.
<END_TASK>
<USER_TASK:>
Description:
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
""" |
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder |
<SYSTEM_TASK:>
Like SimpleDecoder but additionally invokes modify_value on every value
<END_TASK>
<USER_TASK:>
Description:
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
""" |
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode) |
<SYSTEM_TASK:>
Return a constructor for a decoder for a fixed-width field.
<END_TASK>
<USER_TASK:>
Description:
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
""" |
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode) |
<SYSTEM_TASK:>
Returns a decoder for a float field.
<END_TASK>
<USER_TASK:>
Description:
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
""" |
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode) |
<SYSTEM_TASK:>
Returns a decoder for a double field.
<END_TASK>
<USER_TASK:>
Description:
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
""" |
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) |
<SYSTEM_TASK:>
Returns a decoder for a string field.
<END_TASK>
<USER_TASK:>
Description:
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field.""" |
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField |
<SYSTEM_TASK:>
Returns a decoder for a bytes field.
<END_TASK>
<USER_TASK:>
Description:
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field.""" |
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField |
<SYSTEM_TASK:>
Returns a decoder for a group field.
<END_TASK>
<USER_TASK:>
Description:
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field.""" |
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField |
<SYSTEM_TASK:>
Returns a decoder for a map field.
<END_TASK>
<USER_TASK:>
Description:
def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field.""" |
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].MergeFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap |
<SYSTEM_TASK:>
A flexible and advanced prediction API.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset, output_type='class', missing_value_action='auto'):
"""
A flexible and advanced prediction API.
The target column is provided during
:func:`~turicreate.decision_tree.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional.
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'margin': Margin associated with the prediction (not applicable
for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate, classify
Examples
--------
>>> m.predict(testdata)
>>> m.predict(testdata, output_type='probability')
>>> m.predict(testdata, output_type='margin')
""" |
_check_categorical_option_type('output_type', output_type,
['class', 'margin', 'probability', 'probability_vector'])
return super(_Classifier, self).predict(dataset,
output_type=output_type,
missing_value_action=missing_value_action) |
<SYSTEM_TASK:>
get enviroment variables for slaves
<END_TASK>
<USER_TASK:>
Description:
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
""" |
if self.hostIP == 'dns':
host = socket.gethostname()
elif self.hostIP == 'ip':
host = socket.gethostbyname(socket.getfqdn())
else:
host = self.hostIP
return {'rabit_tracker_uri': host,
'rabit_tracker_port': self.port} |
<SYSTEM_TASK:>
get a ring structure that tends to share nodes with the tree
<END_TASK>
<USER_TASK:>
Description:
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
""" |
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst |
<SYSTEM_TASK:>
get a ring connection used to recover local data
<END_TASK>
<USER_TASK:>
Description:
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
""" |
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map |
<SYSTEM_TASK:>
get the link map, this is a bit hacky, call for better algorithm
<END_TASK>
<USER_TASK:>
Description:
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
""" |
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_ |
<SYSTEM_TASK:>
Helper rule to generate a faster alternative to MSVC setup scripts.
<END_TASK>
<USER_TASK:>
Description:
def maybe_rewrite_setup(toolset, setup_script, setup_options, version, rewrite_setup='off'):
"""
Helper rule to generate a faster alternative to MSVC setup scripts.
We used to call MSVC setup scripts directly in every action, however in
newer MSVC versions (10.0+) they make long-lasting registry queries
which have a significant impact on build time.
""" |
result = '"{}" {}'.format(setup_script, setup_options)
# At the moment we only know how to rewrite scripts with cmd shell.
if os.name == 'nt' and rewrite_setup != 'off':
basename = os.path.basename(setup_script)
filename, _ = os.path.splitext(basename)
setup_script_id = 'b2_{}_{}_{}'.format(toolset, version, filename)
if setup_options:
setup_script_id = '{}_{}'.format(setup_script_id, setup_options)
tempdir = os.environ.get('TEMP')
replacement = os.path.join(tempdir, setup_script_id + '.cmd')
if rewrite_setup == 'always' or not os.path.exists(replacement):
import subprocess
# call the setup script and print the environment after doing so
p = subprocess.Popen([
setup_script, setup_options, '>', 'nul', '&&', 'set',
], stdout=subprocess.PIPE, shell=True
)
stdout, _ = p.communicate()
diff_vars = []
for var in stdout.splitlines():
# returns a tuple of ('var-name', '=', 'value').
# partition is being used here (over something like .split())
# for two reasons:
# 1) an environment variable may have a value that contains an '=';
# .partition() will still return the correct key and value pair.
# 2) if the line doesn't contain an '=' at all, then the returned
# tuple will contain only empty strings rather than raising
# an exception.
key, _, value = var.partition('=')
# os.environ handles casing differences here. Usually the
# call to "set" above will produce pascal-cased environment
# variable names, so a normal python dict can't be used here.
# check for the existence of key in case the partitioning() above
# returned an empty key value pair.
if key and os.environ.get(key) != value:
diff_vars.append('SET {}={}'.format(key, value))
if diff_vars:
with open(replacement, 'wb') as f:
f.write(os.linesep.join(diff_vars))
result = '"{}"'.format(replacement)
else:
result = '"{}"'.format(replacement)
return result |
<SYSTEM_TASK:>
Automatically create a suitable classifier model based on the provided
<END_TASK>
<USER_TASK:>
Description:
def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable classifier model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are
provided. For example, a target variable with 'cat' and 'dog' as
possible values is mapped to 0 and 1 respectively with 0 being the base
class and 1 being the reference class. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained classifier model.
See Also
--------
turicreate.boosted_trees_classifier.BoostedTreesClassifier,
turicreate.logistic_classifier.LogisticClassifier,
turicreate.svm_classifier.SVMClassifier,
turicreate.nearest_neighbor_classifier.NearestNeighborClassifier
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
# Selects the best model based on your data.
>>> model = tc.classifier.create(data, target='is_expensive',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.classify(data)
>>> results = model.evaluate(data)
""" |
return _sl.create_classification_with_model_selector(
dataset,
target,
model_selector = _turicreate.extensions._supervised_learning._classifier_available_models,
features = features,
validation_set = validation_set,
verbose = verbose) |
<SYSTEM_TASK:>
Adds the specified column to this SFrame. The number of elements in
<END_TASK>
<USER_TASK:>
Description:
def add_column(self, data, column_name="", inplace=False):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data.
column_name : string
The name of the column. If no name is given, a default name is chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
""" |
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).add_column(data, column_name, inplace=inplace) |
<SYSTEM_TASK:>
Adds columns to the SFrame. The number of elements in all columns must
<END_TASK>
<USER_TASK:>
Description:
def add_columns(self, data, column_names=None, inplace=False):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
""" |
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
for (data, name) in zip(datalist, column_names):
self.add_column(data, name)
return self
else:
return super(GFrame, self).add_column(datalist, column_names, inplace=inplace) |
<SYSTEM_TASK:>
Removes the column with the given name from the SFrame.
<END_TASK>
<USER_TASK:>
Description:
def remove_column(self, column_name, inplace=False):
"""
Removes the column with the given name from the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
""" |
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
if inplace:
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert column_name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert column_name != '__src_id', 'Cannot remove \"__src_id\" column'
assert column_name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(column_name)
self.__graph__.__proxy__ = graph_proxy
return self
except:
self.__is_dirty__ = False
raise
else:
return super(GFrame, self).remove_column(column_name, inplace=inplace) |
<SYSTEM_TASK:>
Swaps the columns with the given names.
<END_TASK>
<USER_TASK:>
Description:
def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Swaps the columns with the given names.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
""" |
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).swap_columns(column_name_1, column_name_2, inplace=inplace) |
<SYSTEM_TASK:>
Rename the columns using the 'names' dict. This changes the names of
<END_TASK>
<USER_TASK:>
Description:
def rename(self, names, inplace=False):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
""" |
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).rename(names, inplace=inplace) |
<SYSTEM_TASK:>
Returns the number of rows.
<END_TASK>
<USER_TASK:>
Description:
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
""" |
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges'] |
<SYSTEM_TASK:>
Returns the column names.
<END_TASK>
<USER_TASK:>
Description:
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
""" |
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields() |
<SYSTEM_TASK:>
Returns the column types.
<END_TASK>
<USER_TASK:>
Description:
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
""" |
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types() |
<SYSTEM_TASK:>
Automatically create a suitable regression model based on the provided
<END_TASK>
<USER_TASK:>
Description:
def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable regression model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type (int/float).
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained regression model.
See Also
--------
turicreate.linear_regression.LinearRegression,
turicreate.boosted_trees_regression.BoostedTreesRegression
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
""" |
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
if validation_set is None:
validation_set = _turicreate.SFrame()
model_proxy = _turicreate.extensions.create_automatic_regression_model(
dataset, target, validation_set, {})
return _sl.wrap_model_proxy(model_proxy) |
<SYSTEM_TASK:>
Registers the given message type in the local database.
<END_TASK>
<USER_TASK:>
Description:
def RegisterMessage(self, message):
"""Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message.
""" |
desc = message.DESCRIPTOR
self._classes[desc.full_name] = message
self.pool.AddDescriptor(desc)
return message |
<SYSTEM_TASK:>
Gets all registered messages from a specified file.
<END_TASK>
<USER_TASK:>
Description:
def GetMessages(self, files):
# TODO(amauryfa): Fix the differences with MessageFactory.
"""Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found.
""" |
def _GetAllMessageNames(desc):
"""Walk a message Descriptor and recursively yields all message names."""
yield desc.full_name
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
yield full_name
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for msg_desc in file_desc.message_types_by_name.values():
for full_name in _GetAllMessageNames(msg_desc):
try:
result[full_name] = self._classes[full_name]
except KeyError:
# This descriptor has no registered class, skip it.
pass
return result |
<SYSTEM_TASK:>
Check that the predictionsa are either probabilities of prob-vectors.
<END_TASK>
<USER_TASK:>
Description:
def _check_prob_and_prob_vector(predictions):
"""
Check that the predictionsa are either probabilities of prob-vectors.
""" |
from .._deps import numpy
ptype = predictions.dtype
import array
if ptype not in [float, numpy.ndarray, array.array, int]:
err_msg = "Input `predictions` must be of numeric type (for binary "
err_msg += "classification) or array (of probability vectors) for "
err_msg += "multiclass classification."
raise TypeError(err_msg) |
<SYSTEM_TASK:>
Perform basic error checking for the evaluation metrics. Check
<END_TASK>
<USER_TASK:>
Description:
def _supervised_evaluation_error_checking(targets, predictions):
"""
Perform basic error checking for the evaluation metrics. Check
types and sizes of the inputs.
""" |
_raise_error_if_not_sarray(targets, "targets")
_raise_error_if_not_sarray(predictions, "predictions")
if (len(targets) != len(predictions)):
raise _ToolkitError(
"Input SArrays 'targets' and 'predictions' must be of the same length.") |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def max_error(targets, predictions):
r"""
Compute the maximum absolute deviation between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The maximum absolute deviation error between the two SArrays.
See Also
--------
rmse
Notes
-----
The maximum absolute deviation between two vectors, x and y, is defined as:
.. math::
\textrm{max error} = \max_{i \in 1,\ldots,N} \|x_i - y_i\|
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.max_error(targets, predictions)
2.5
""" |
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "max_error", {}) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def rmse(targets, predictions):
r"""
Compute the root mean squared error between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The RMSE between the two SArrays.
See Also
--------
max_error
Notes
-----
The root mean squared error between two vectors, x and y, is defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - y_i)^2}
References
----------
- `Wikipedia - root-mean-square deviation
<http://en.wikipedia.org/wiki/Root-mean-square_deviation>`_
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.rmse(targets, predictions)
1.2749117616525465
""" |
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "rmse", {}) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def confusion_matrix(targets, predictions):
r"""
Compute the confusion matrix for classifier predictions.
Parameters
----------
targets : SArray
Ground truth class labels (cannot be of type float).
predictions : SArray
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``. The predictions
SArray cannot be of type float.
Returns
-------
out : SFrame
An SFrame containing counts for 'target_label', 'predicted_label' and
'count' corresponding to each pair of true and predicted labels.
See Also
--------
accuracy
Examples
--------
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([1, 0, 1, 0])
>>> turicreate.evaluation.confusion_matrix(targets, predictions)
""" |
_supervised_evaluation_error_checking(targets, predictions)
_check_same_type_not_float(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "confusion_matrix_no_map", {}) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def auc(targets, predictions, average='macro', index_map=None):
r"""
Compute the area under the ROC curve for the given targets and predictions.
Parameters
----------
targets : SArray
An SArray containing the observed values. For binary classification,
the alpha-numerically first category is considered the reference
category.
predictions : SArray
Prediction probability that corresponds to each target value. This must
be of same length as ``targets``.
average : string, [None, 'macro' (default)]
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
roc_curve, confusion_matrix
Examples
--------
.. sourcecode:: python
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
This metric also works when the targets are strings (Here "cat" is chosen
as the reference class).
.. sourcecode:: python
>>> targets = turicreate.SArray(["cat", "dog", "dog", "cat"])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
For the multi-class setting, the auc-score can be averaged.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ 1, 0, 2, 1])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
... [.9, .1, 0.0],
... [.8, .1, 0.1],
... [.3, .6, 0.1]])
# Macro average of the scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = 'macro')
0.8888888888888888
# Scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = None)
{0: 1.0, 1: 1.0, 2: 0.6666666666666666}
This metric also works for "string" targets in the multi-class setting
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ "dog", "cat", "foosa", "dog"])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
# Macro average.
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.8888888888888888
# Score for each class.
>>> auc = turicreate.evaluation.auc(targets, predictions, average=None)
{'cat': 1.0, 'dog': 1.0, 'foosa': 0.6666666666666666}
""" |
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['macro', None])
_check_prob_and_prob_vector(predictions)
_check_target_not_float(targets)
_check_index_map(index_map)
opts = {"average": average,
"binary": predictions.dtype in [int, float]}
if index_map is not None:
opts['index_map'] = index_map
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "auc", opts) |
<SYSTEM_TASK:>
Convert a trained XGBoost model to Core ML format.
<END_TASK>
<USER_TASK:>
Description:
def convert(model, feature_names = None, target = 'target', force_32bit_float = True):
"""
Convert a trained XGBoost model to Core ML format.
Parameters
----------
decision_tree : Booster
A trained XGboost tree model.
feature_names: [str] | str
Names of input features that will be exposed in the Core ML model
interface.
Can be set to one of the following:
- None for using the feature names from the model.
- List of names of the input features that should be exposed in the
interface to the Core ML model. These input features are in the same
order as the XGboost model.
target: str
Name of the output feature name exposed to the Core ML model.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.xgboost.convert(model)
# Saving the Core ML model to a file.
>>> coremltools.save('my_model.mlmodel')
""" |
return _MLModel(_convert_tree_ensemble(model, feature_names, target, force_32bit_float = force_32bit_float)) |
<SYSTEM_TASK:>
Fit a transformer using the SFrame `data`.
<END_TASK>
<USER_TASK:>
Description:
def fit(self, data):
"""
Fit a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted version of the object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
{examples}
""" |
_raise_error_if_not_sframe(data, "data")
self.__proxy__.fit(data)
return self |
<SYSTEM_TASK:>
For each example in the dataset, extract the leaf indices of
<END_TASK>
<USER_TASK:>
Description:
def extract_features(self, dataset, missing_value_action='auto'):
"""
For each example in the dataset, extract the leaf indices of
each tree as features.
For multiclass classification, each leaf index contains #num_class
numbers.
The returned feature vectors can be used as input to train another
supervised learning model such as a
:py:class:`~turicreate.logistic_classifier.LogisticClassifier`,
an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
An SArray of dtype array.array containing extracted features.
Examples
--------
>>> data = turicreate.SFrame(
'https://static.turi.com/datasets/regression/houses.csv')
>>> # Regression Tree Models
>>> data['regression_tree_features'] = model.extract_features(data)
>>> # Classification Tree Models
>>> data['classification_tree_features'] = model.extract_features(data)
""" |
_raise_error_if_not_sframe(dataset, "dataset")
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(self,
'extract_features')
return self.__proxy__.extract_features(dataset, missing_value_action) |
<SYSTEM_TASK:>
Extract features along with all the missing features associated with
<END_TASK>
<USER_TASK:>
Description:
def _extract_features_with_missing(self, dataset, tree_id = 0,
missing_value_action = 'auto'):
"""
Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs
""" |
# Extract the features from only one tree.
sf = dataset
sf['leaf_id'] = self.extract_features(dataset, missing_value_action)\
.vector_slice(tree_id)\
.astype(int)
tree = self._get_tree(tree_id)
type_map = dict(zip(dataset.column_names(), dataset.column_types()))
def get_missing_features(row):
x = row['leaf_id']
path = tree.get_prediction_path(x)
missing_id = [] # List of "missing_id" children.
# For each node in the prediction path.
for p in path:
fname = p['feature']
idx = p['index']
f = row[fname]
if type_map[fname] in [int, float]:
if f is None:
missing_id.append(p['child_id'])
elif type_map[fname] in [dict]:
if f is None:
missing_id.append(p['child_id'])
if idx not in f:
missing_id.append(p['child_id'])
else:
pass
return missing_id
sf['missing_id'] = sf.apply(get_missing_features, list)
return sf[['leaf_id', 'missing_id']] |
<SYSTEM_TASK:>
Sort a dictionary of classes and corresponding vote totals according to the
<END_TASK>
<USER_TASK:>
Description:
def _sort_topk_votes(x, k):
"""
Sort a dictionary of classes and corresponding vote totals according to the
votes, then truncate to the highest 'k' classes.
""" |
y = sorted(x.items(), key=lambda x: x[1], reverse=True)[:k]
return [{'class': i[0], 'votes': i[1]} for i in y] |
<SYSTEM_TASK:>
Construct a composite distance function for a set of features, based on the
<END_TASK>
<USER_TASK:>
Description:
def _construct_auto_distance(features, column_types):
"""
Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolkit.
Parameters
----------
features : list[str]
Names of for which to construct a distance function.
column_types : dict(string, type)
Names and types of all columns.
Returns
-------
dist : list[list]
A composite distance function. Each element of the inner list has three
elements: a list of feature names (strings), a distance function name
(string), and a weight (float).
""" |
## Put input features into buckets based on type.
numeric_ftrs = []
string_ftrs = []
dict_ftrs = []
for ftr in features:
try:
ftr_type = column_types[ftr]
except:
raise ValueError("The specified feature does not exist in the " +
"input data.")
if ftr_type == str:
string_ftrs.append(ftr)
elif ftr_type == dict:
dict_ftrs.append(ftr)
elif ftr_type in [int, float, _array.array]:
numeric_ftrs.append(ftr)
else:
raise TypeError("Unable to automatically construct a distance " +
"function for feature '{}'. ".format(ftr) +
"For the nearest neighbor classifier, features " +
"must be of type integer, float, string, dictionary, " +
"or array.array.")
## Construct the distance function
dist = []
for ftr in string_ftrs:
dist.append([[ftr], 'levenshtein', 1])
if len(dict_ftrs) > 0:
dist.append([dict_ftrs, 'weighted_jaccard', len(dict_ftrs)])
if len(numeric_ftrs) > 0:
dist.append([numeric_ftrs, 'euclidean', len(numeric_ftrs)])
return dist |
<SYSTEM_TASK:>
A function to load a previously saved NearestNeighborClassifier model.
<END_TASK>
<USER_TASK:>
Description:
def _load_version(cls, state, version):
"""
A function to load a previously saved NearestNeighborClassifier model.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
""" |
assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION)
knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model'])
del state['knn_model']
state['_target_type'] = eval(state['_target_type'])
return cls(knn_model, state) |
<SYSTEM_TASK:>
Evaluate the model's predictive accuracy. This is done by predicting the
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, dataset, metric='auto', max_neighbors=10, radius=None):
"""
Evaluate the model's predictive accuracy. This is done by predicting the
target class for instances in a new dataset and comparing to known
target values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Returns all available metrics.
- 'accuracy': Classification accuracy.
- 'confusion_matrix': An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve': An SFrame containing information needed for an roc
curve (binary classification only).
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : dict
Evaluation results. The dictionary keys are *accuracy* and
*confusion_matrix* and *roc_curve* (if applicable).
See also
--------
create, predict, predict_topk, classify
Notes
-----
- Because the model randomly breaks ties between predicted classes, the
results of repeated calls to `evaluate` method may differ.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ans = m.evaluate(sf_train, max_neighbors=2,
... metric='confusion_matrix')
>>> print ans['confusion_matrix']
+--------------+-----------------+-------+
| target_label | predicted_label | count |
+--------------+-----------------+-------+
| cat | dog | 1 |
| dog | dog | 2 |
| fossa | dog | 1 |
+--------------+-----------------+-------+
""" |
## Validate the metric name
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'accuracy', 'confusion_matrix', 'roc_curve'])
## Make sure the input dataset has a target column with an appropriate
# type.
target = self.target
_raise_error_if_column_exists(dataset, target, 'dataset', target)
if not dataset[target].dtype == str and not dataset[target].dtype == int:
raise TypeError("The target column of the evaluation dataset must "
"contain integers or strings.")
if self.num_classes != 2:
if (metric == 'roc_curve') or (metric == ['roc_curve']):
err_msg = "Currently, ROC curve is not supported for "
err_msg += "multi-class classification in this model."
raise _ToolkitError(err_msg)
else:
warn_msg = "WARNING: Ignoring `roc_curve`. "
warn_msg += "Not supported for multi-class classification."
print(warn_msg)
## Compute predictions with the input dataset.
ystar = self.predict(dataset, output_type='class',
max_neighbors=max_neighbors, radius=radius)
ystar_prob = self.predict(dataset, output_type='probability',
max_neighbors=max_neighbors, radius=radius)
## Compile accuracy metrics
results = {}
if metric in ['accuracy', 'auto']:
results['accuracy'] = _evaluation.accuracy(targets=dataset[target],
predictions=ystar)
if metric in ['confusion_matrix', 'auto']:
results['confusion_matrix'] = \
_evaluation.confusion_matrix(targets=dataset[target],
predictions=ystar)
if self.num_classes == 2:
if metric in ['roc_curve', 'auto']:
results['roc_curve'] = \
_evaluation.roc_curve(targets=dataset[target],
predictions=ystar_prob)
return results |
<SYSTEM_TASK:>
First fit a transformer using the SFrame `data` and then return a transformed
<END_TASK>
<USER_TASK:>
Description:
def fit_transform(self, data):
"""
First fit a transformer using the SFrame `data` and then return a transformed
version of `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer. The same data is then also
transformed.
Returns
-------
Transformed SFrame.
See Also
--------
transform, fit_transform
Notes
-----
- The default implementation calls fit() and then calls transform().
You may override this function with a more efficient implementation."
Examples
--------
.. sourcecode:: python
>> transformed_sf = chain.fit_transform(sf)
""" |
if not self._transformers:
return self._preprocess(data)
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
return final_step[1].fit_transform(transformed_data) |
<SYSTEM_TASK:>
An function to load an object with a specific version of the class.
<END_TASK>
<USER_TASK:>
Description:
def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
""" |
obj = unpickler.load()
return TransformerChain(obj._state["steps"]) |
<SYSTEM_TASK:>
Compute the PageRank for each vertex in the graph. Return a model object
<END_TASK>
<USER_TASK:>
Description:
def create(graph, reset_probability=0.15,
threshold=1e-2,
max_iterations=20,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Compute the PageRank for each vertex in the graph. Return a model object
with total PageRank as well as the PageRank value for each vertex in the
graph.
Parameters
----------
graph : SGraph
The graph on which to compute the pagerank value.
reset_probability : float, optional
Probability that a random surfer jumps to an arbitrary page.
threshold : float, optional
Threshold for convergence, measured in the L1 norm
(the sum of absolute value) of the delta of each vertex's
pagerank value.
max_iterations : int, optional
The maximum number of iterations to run.
_single_precision : bool, optional
If true, running pagerank in single precision. The resulting
pagerank values may not be accurate for large graph, but
should run faster and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : PagerankModel
References
----------
- `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_
- Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to
the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.pagerank.PageRankModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> pr = turicreate.pagerank.create(g)
We can obtain the page rank corresponding to each vertex in the graph ``g``
using:
>>> pr_out = pr['pagerank'] # SFrame
We can add the new pagerank field to the original graph g using:
>>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
See Also
--------
PagerankModel
""" |
from turicreate._cython.cy_server import QuietProgress
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
opts = {'threshold': threshold, 'reset_probability': reset_probability,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.pagerank.create(opts)
model = params['model']
return PagerankModel(model) |
<SYSTEM_TASK:>
Adds a dependency from 'targets' to 'sources'
<END_TASK>
<USER_TASK:>
Description:
def add_dependency (self, targets, sources):
"""Adds a dependency from 'targets' to 'sources'
Both 'targets' and 'sources' can be either list
of target names, or a single target name.
""" |
if isinstance (targets, str):
targets = [targets]
if isinstance (sources, str):
sources = [sources]
assert is_iterable(targets)
assert is_iterable(sources)
for target in targets:
for source in sources:
self.do_add_dependency (target, source) |
<SYSTEM_TASK:>
Gets the value of `variable` on set on the first target in `targets`.
<END_TASK>
<USER_TASK:>
Description:
def get_target_variable(self, targets, variable):
"""Gets the value of `variable` on set on the first target in `targets`.
Args:
targets (str or list): one or more targets to get the variable from.
variable (str): the name of the variable
Returns:
the value of `variable` set on `targets` (list)
Example:
>>> ENGINE = get_manager().engine()
>>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World')
>>> ENGINE.get_target_variable(targets, 'MY-VAR')
['Hello World']
Equivalent Jam code:
MY-VAR on $(targets) = "Hello World" ;
echo [ on $(targets) return $(MY-VAR) ] ;
"Hello World"
""" |
if isinstance(targets, str):
targets = [targets]
assert is_iterable(targets)
assert isinstance(variable, basestring)
return bjam_interface.call('get-target-variable', targets, variable) |
<SYSTEM_TASK:>
Sets a target variable.
<END_TASK>
<USER_TASK:>
Description:
def set_target_variable (self, targets, variable, value, append=0):
""" Sets a target variable.
The 'variable' will be available to bjam when it decides
where to generate targets, and will also be available to
updating rule for that 'taret'.
""" |
if isinstance (targets, str):
targets = [targets]
if isinstance(value, str):
value = [value]
assert is_iterable(targets)
assert isinstance(variable, basestring)
assert is_iterable(value)
if targets:
if append:
bjam_interface.call("set-target-variable", targets, variable, value, "true")
else:
bjam_interface.call("set-target-variable", targets, variable, value) |
<SYSTEM_TASK:>
Binds a target to the corresponding update action.
<END_TASK>
<USER_TASK:>
Description:
def set_update_action (self, action_name, targets, sources, properties=None):
""" Binds a target to the corresponding update action.
If target needs to be updated, the action registered
with action_name will be used.
The 'action_name' must be previously registered by
either 'register_action' or 'register_bjam_action'
method.
""" |
if isinstance(targets, str):
targets = [targets]
if isinstance(sources, str):
sources = [sources]
if properties is None:
properties = property_set.empty()
assert isinstance(action_name, basestring)
assert is_iterable(targets)
assert is_iterable(sources)
assert(isinstance(properties, property_set.PropertySet))
self.do_set_update_action (action_name, targets, sources, properties) |
<SYSTEM_TASK:>
Creates a new build engine action.
<END_TASK>
<USER_TASK:>
Description:
def register_action (self, action_name, command='', bound_list = [], flags = [],
function = None):
"""Creates a new build engine action.
Creates on bjam side an action named 'action_name', with
'command' as the command to be executed, 'bound_variables'
naming the list of variables bound when the command is executed
and specified flag.
If 'function' is not None, it should be a callable taking three
parameters:
- targets
- sources
- instance of the property_set class
This function will be called by set_update_action, and can
set additional target variables.
""" |
assert isinstance(action_name, basestring)
assert isinstance(command, basestring)
assert is_iterable(bound_list)
assert is_iterable(flags)
assert function is None or callable(function)
bjam_flags = reduce(operator.or_,
(action_modifiers[flag] for flag in flags), 0)
# We allow command to be empty so that we can define 'action' as pure
# python function that would do some conditional logic and then relay
# to other actions.
assert command or function
if command:
bjam_interface.define_action(action_name, command, bound_list, bjam_flags)
self.actions[action_name] = BjamAction(
action_name, function, has_command=bool(command)) |
<SYSTEM_TASK:>
Informs self that 'action_name' is declared in bjam.
<END_TASK>
<USER_TASK:>
Description:
def register_bjam_action (self, action_name, function=None):
"""Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
""" |
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function) |
<SYSTEM_TASK:>
Returns the pixel data stored in the Image object.
<END_TASK>
<USER_TASK:>
Description:
def pixel_data(self):
"""
Returns the pixel data stored in the Image object.
Returns
-------
out : numpy.array
The pixel data of the Image object. It returns a multi-dimensional
numpy array, where the shape of the array represents the shape of
the image (height, weight, channels).
See Also
--------
width, channels, height
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> image_array = img.pixel_data
""" |
from .. import extensions as _extensions
data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8)
_extensions.image_load_to_numpy(self, data.ctypes.data, data.strides)
if self.channels == 1:
data = data.squeeze(2)
return data |
<SYSTEM_TASK:>
Return predictions for the model. The kwargs gets passed into the
<END_TASK>
<USER_TASK:>
Description:
def predict(self, data, useCPUOnly=False, **kwargs):
"""
Return predictions for the model. The kwargs gets passed into the
model as a dictionary.
Parameters
----------
data : dict[str, value]
Dictionary of data to make predictions from where the keys are
the names of the input features.
useCPUOnly : bool
Set to true to restrict computation to use only the CPU. Defaults to False.
Returns
-------
out : dict[str, value]
Predictions as a dictionary where each key is the output feature
name.
Examples
--------
>>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240}
>>> predictions = model.predict(data)
""" |
if self.__proxy__:
return self.__proxy__.predict(data,useCPUOnly)
else:
if _macos_version() < (10, 13):
raise Exception('Model prediction is only supported on macOS version 10.13 or later.')
try:
from ..libcoremlpython import _MLModelProxy
except:
_MLModelProxy = None
if not _MLModelProxy:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion:
engineVersion = _MLModelProxy.maximum_supported_specification_version()
raise Exception('The specification has version ' + str(self._spec.specificationVersion)
+ ' but the Core ML framework version installed only supports Core ML model specification version '
+ str(engineVersion) + ' or older.')
elif _has_custom_layer(self._spec):
raise Exception('This model contains a custom neural network layer, so predict is not supported.')
else:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.