text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Delete attributes from a given item.
<END_TASK>
<USER_TASK:>
Description:
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
""" |
return self.connection.delete_attributes(self, item_name, attributes,
expected_values) |
<SYSTEM_TASK:>
Returns a set of Attributes for item names within domain_name that match the query.
<END_TASK>
<USER_TASK:>
Description:
def select(self, query='', next_token=None, consistent_read=False, max_items=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: iter
:return: An iterator containing the results. This is actually a generator
function that will iterate across all search results, not just the
first page.
""" |
return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
consistent_read=consistent_read) |
<SYSTEM_TASK:>
Load this domain based on an XML document
<END_TASK>
<USER_TASK:>
Description:
def from_xml(self, doc):
"""Load this domain based on an XML document""" |
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler |
<SYSTEM_TASK:>
Check if either the datastream content or profile fields have changed
<END_TASK>
<USER_TASK:>
Description:
def isModified(self):
"""Check if either the datastream content or profile fields have changed
and should be saved to Fedora.
:rtype: boolean
""" |
# NOTE: only check content digest if locally cached content is set
# (content already pulled or new content set); otherwise this
# results in pulling content down to checksum it !
return self.info_modified or \
self._content and self._content_digest() != self.digest |
<SYSTEM_TASK:>
Save datastream content and any changed datastream profile
<END_TASK>
<USER_TASK:>
Description:
def save(self, logmessage=None):
"""Save datastream content and any changed datastream profile
information to Fedora.
:rtype: boolean for success
""" |
if self.as_of_date is not None:
raise RuntimeError('Saving is not implemented for datastream versions')
save_opts = {}
if self.info_modified:
if self.label:
save_opts['dsLabel'] = self.label
if self.mimetype:
save_opts['mimeType'] = self.mimetype
if self.versionable is not None:
save_opts['versionable'] = self.versionable
if self.state:
save_opts['dsState'] = self.state
if self.format:
save_opts['formatURI'] = self.format
if self.checksum:
if self.checksum_modified:
save_opts['checksum'] = self.checksum
if self.checksum_type:
save_opts['checksumType'] = self.checksum_type
# FIXME: should be able to handle checksums
# NOTE: as of Fedora 3.2, updating content without specifying mimetype fails (Fedora bug?)
if 'mimeType' not in save_opts.keys():
# if datastreamProfile has not been pulled from fedora, use configured default mimetype
if self._info is not None:
save_opts['mimeType'] = self.mimetype
else:
save_opts['mimeType'] = self.defaults['mimetype']
# if datastream location has been set, use that for content
# otherwise, use local content (if any)
if self.ds_location is not None:
save_opts['dsLocation'] = self.ds_location
else:
save_opts['content'] = self._raw_content()
if self.exists:
# if not versionable, make a backup to back out changes if object save fails
if not self.versionable:
self._backup()
# if this datastream already exists, use modifyDatastream API call
r = self.obj.api.modifyDatastream(self.obj.pid, self.id,
logMessage=logmessage, **save_opts)
# expects 200 ok
success = (r.status_code == requests.codes.ok)
else:
# if this datastream does not yet exist, add it
r = self.obj.api.addDatastream(self.obj.pid, self.id,
controlGroup=self.defaults['control_group'],
logMessage=logmessage, **save_opts)
# expects 201 created
success = (r.status_code == requests.codes.created)
# clean-up required for object info after adding a new datastream
if success:
# update exists flag - if add succeeded, the datastream exists now
self.exists = True
# if the datastream content is a file-like object, clear it out
# (we don't want to attempt to save the current file contents again,
# particularly since the file is not guaranteed to still be open)
if 'content' in save_opts and hasattr(save_opts['content'], 'read'):
self._content = None
self._content_modified = False
if success:
# update modification indicators
self.info_modified = False
self.checksum_modified = False
self.digest = self._content_digest()
# clear out ds location
self.ds_location = None
return success |
<SYSTEM_TASK:>
Undo the last change made to the datastream content and profile, effectively
<END_TASK>
<USER_TASK:>
Description:
def undo_last_save(self, logMessage=None):
"""Undo the last change made to the datastream content and profile, effectively
reverting to the object state in Fedora as of the specified timestamp.
For a versioned datastream, this will purge the most recent datastream.
For an unversioned datastream, this will overwrite the last changes with
a cached version of any content and/or info pulled from Fedora.
""" |
# NOTE: currently not clearing any of the object caches and backups
# of fedora content and datastream info, as it is unclear what (if anything)
# should be cleared
if self.versionable:
# if this is a versioned datastream, get datastream history
# and purge the most recent version
last_save = self.history().versions[0].created # fedora returns most recent first
r = self.obj.api.purgeDatastream(self.obj.pid, self.id,
datetime_to_fedoratime(last_save),
logMessage=logMessage)
return r.status_code == requests.codes.ok
else:
# for an unversioned datastream, update with any content and info
# backups that were pulled from Fedora before any modifications were made
args = {}
if self._content_backup is not None:
args['content'] = self._content_backup
if self._info_backup is not None:
args.update(self._info_backup)
r = self.obj.api.modifyDatastream(self.obj.pid, self.id,
logMessage=logMessage, **args)
return r.status_code == requests.codes.ok |
<SYSTEM_TASK:>
Replace a uri reference everywhere it appears in the graph with
<END_TASK>
<USER_TASK:>
Description:
def replace_uri(self, src, dest):
"""Replace a uri reference everywhere it appears in the graph with
another one. It could appear as the subject, predicate, or object of
a statement, so for each position loop through each statement that
uses the reference in that position, remove the old statement, and
add the replacement. """ |
# NB: The hypothetical statement <src> <src> <src> will be removed
# and re-added several times. The subject block will remove it and
# add <dest> <src> <src>. The predicate block will remove that and
# add <dest> <dest> <src>. The object block will then remove that
# and add <dest> <dest> <dest>.
# NB2: The list() call here is necessary. .triples() is a generator:
# It calculates its matches as it progressively iterates through the
# graph. Actively changing the graph inside the for loop while the
# generator is in the middle of examining it risks invalidating the
# generator and could conceivably make it Just Break, depending on
# the implementation of .triples(). Wrapping .triples() in a list()
# forces it to exhaust the generator, running through the entire
# graph to calculate the list of matches before continuing to the
# for loop.
subject_triples = list(self.content.triples((src, None, None)))
for s, p, o in subject_triples:
self.content.remove((src, p, o))
self.content.add((dest, p, o))
predicate_triples = list(self.content.triples((None, src, None)))
for s, p, o in predicate_triples:
self.content.remove((s, src, o))
self.content.add((s, dest, o))
object_triples = list(self.content.triples((None, None, src)))
for s, p, o in object_triples:
self.content.remove((s, p, src))
self.content.add((s, p, dest)) |
<SYSTEM_TASK:>
If the RDF datastream refers to the object by the default dummy
<END_TASK>
<USER_TASK:>
Description:
def _prepare_ingest(self):
"""If the RDF datastream refers to the object by the default dummy
uriref then we need to replace that dummy reference with a real one
before we ingest the object.""" |
# see also commentary on DigitalObject.DUMMY_URIREF
self.replace_uri(self.obj.DUMMY_URIREF, self.obj.uriref) |
<SYSTEM_TASK:>
Get information about a particular datastream belonging to this object.
<END_TASK>
<USER_TASK:>
Description:
def getDatastreamProfile(self, dsid, date=None):
"""Get information about a particular datastream belonging to this object.
:param dsid: datastream id
:rtype: :class:`DatastreamProfile`
""" |
# NOTE: used by DatastreamObject
if self._create:
return None
r = self.api.getDatastream(self.pid, dsid, asOfDateTime=date)
return parse_xml_object(DatastreamProfile, r.content, r.url) |
<SYSTEM_TASK:>
Takes a list of datastreams and a datetime, run undo save on all of them,
<END_TASK>
<USER_TASK:>
Description:
def _undo_save(self, datastreams, logMessage=None):
"""Takes a list of datastreams and a datetime, run undo save on all of them,
and returns a list of the datastreams where the undo succeeded.
:param datastreams: list of datastream ids (should be in self.dscache)
:param logMessage: optional log message
""" |
return [ds for ds in datastreams if self.dscache[ds].undo_last_save(logMessage)] |
<SYSTEM_TASK:>
Get all datastreams that belong to this object.
<END_TASK>
<USER_TASK:>
Description:
def _get_datastreams(self):
"""
Get all datastreams that belong to this object.
Returns a dictionary; key is datastream id, value is an :class:`ObjectDatastream`
for that datastream.
:rtype: dictionary
""" |
if self._create:
# FIXME: should we default to the datastreams defined in code?
return {}
else:
# NOTE: can be accessed as a cached class property via ds_list
r = self.api.listDatastreams(self.pid)
dsobj = parse_xml_object(ObjectDatastreams, r.content, r.url)
return dict([(ds.dsid, ds) for ds in dsobj.datastreams]) |
<SYSTEM_TASK:>
Check if this object subscribes to the specified content model.
<END_TASK>
<USER_TASK:>
Description:
def has_model(self, model):
"""
Check if this object subscribes to the specified content model.
:param model: URI for the content model, as a string
(currently only accepted in ``info:fedora/foo:###`` format)
:rtype: boolean
""" |
# TODO:
# - accept DigitalObject for model?
# - convert model pid to info:fedora/ form if not passed in that way?
try:
rels = self.rels_ext.content
except RequestFailed:
# if rels-ext can't be retrieved, confirm this object does not have a RELS-EXT
# (in which case, it does not subscribe to the specified content model)
if "RELS-EXT" not in self.ds_list.keys():
return False
else:
raise
st = (self.uriref, modelns.hasModel, URIRef(model))
return st in rels |
<SYSTEM_TASK:>
Get a list of content models the object subscribes to.
<END_TASK>
<USER_TASK:>
Description:
def get_models(self):
"""
Get a list of content models the object subscribes to.
""" |
try:
rels = self.rels_ext.content
except RequestFailed:
# if rels-ext can't be retrieved, confirm this object does not have a RELS-EXT
# (in which case, it does not have any content models)
if "RELS-EXT" not in self.ds_list.keys():
return []
else:
raise
return list(rels.objects(self.uriref, modelns.hasModel)) |
<SYSTEM_TASK:>
Deletes the specified auto scaling group if the group has no instances
<END_TASK>
<USER_TASK:>
Description:
def delete_auto_scaling_group(self, name, force_delete=False):
"""
Deletes the specified auto scaling group if the group has no instances
and no scaling activities in progress.
""" |
if(force_delete):
params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'}
else:
params = {'AutoScalingGroupName': name}
return self.get_object('DeleteAutoScalingGroup', params, Request) |
<SYSTEM_TASK:>
Creates a new Scaling Policy.
<END_TASK>
<USER_TASK:>
Description:
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
""" |
params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request) |
<SYSTEM_TASK:>
Returns a full description of each Auto Scaling group in the given
<END_TASK>
<USER_TASK:>
Description:
def get_all_groups(self, names=None, max_records=None, next_token=None):
"""
Returns a full description of each Auto Scaling group in the given
list. This includes all Amazon EC2 instances that are members of the
group. If a list of names is not provided, the service returns the full
details of all Auto Scaling groups.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type names: list
:param names: List of group names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup`
instances.
""" |
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if names:
self.build_list_params(params, names, 'AutoScalingGroupNames')
return self.get_list('DescribeAutoScalingGroups', params,
[('member', AutoScalingGroup)]) |
<SYSTEM_TASK:>
Returns a full description of the launch configurations given the
<END_TASK>
<USER_TASK:>
Description:
def get_all_launch_configurations(self, **kwargs):
"""
Returns a full description of the launch configurations given the
specified names.
If no names are specified, then the full details of all launch
configurations are returned.
:type names: list
:param names: List of configuration names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of configurations to return.
:type next_token: str
:param next_token: If you have more results than can be returned
at once, pass in this parameter to page through all results.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
instances.
""" |
params = {}
max_records = kwargs.get('max_records', None)
names = kwargs.get('names', None)
if max_records is not None:
params['MaxRecords'] = max_records
if names:
self.build_list_params(params, names, 'LaunchConfigurationNames')
next_token = kwargs.get('next_token')
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeLaunchConfigurations', params,
[('member', LaunchConfiguration)]) |
<SYSTEM_TASK:>
Get all activities for the given autoscaling group.
<END_TASK>
<USER_TASK:>
Description:
def get_all_activities(self, autoscale_group, activity_ids=None,
max_records=None, next_token=None):
"""
Get all activities for the given autoscaling group.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The auto scaling group to get activities on.
:type max_records: int
:param max_records: Maximum amount of activities to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.activity.Activity` instances.
""" |
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName' : name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if activity_ids:
self.build_list_params(params, activity_ids, 'ActivityIds')
return self.get_list('DescribeScalingActivities',
params, [('member', Activity)]) |
<SYSTEM_TASK:>
Deletes a previously scheduled action.
<END_TASK>
<USER_TASK:>
Description:
def delete_scheduled_action(self, scheduled_action_name,
autoscale_group=None):
"""
Deletes a previously scheduled action.
:type scheduled_action_name: str
:param scheduled_action_name: The name of the action you want
to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
""" |
params = {'ScheduledActionName': scheduled_action_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeleteScheduledAction', params) |
<SYSTEM_TASK:>
Terminates the specified instance. The desired group size can
<END_TASK>
<USER_TASK:>
Description:
def terminate_instance(self, instance_id, decrement_capacity=True):
"""
Terminates the specified instance. The desired group size can
also be adjusted, if desired.
:type instance_id: str
:param instance_id: The ID of the instance to be terminated.
:type decrement_capability: bool
:param decrement_capacity: Whether to decrement the size of the
autoscaling group or not.
""" |
params = {'InstanceId': instance_id}
if decrement_capacity:
params['ShouldDecrementDesiredCapacity'] = 'true'
else:
params['ShouldDecrementDesiredCapacity'] = 'false'
return self.get_object('TerminateInstanceInAutoScalingGroup', params,
Activity) |
<SYSTEM_TASK:>
Delete a policy.
<END_TASK>
<USER_TASK:>
Description:
def delete_policy(self, policy_name, autoscale_group=None):
"""
Delete a policy.
:type policy_name: str
:param policy_name: The name or ARN of the policy to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
""" |
params = {'PolicyName': policy_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeletePolicy', params) |
<SYSTEM_TASK:>
Returns a description of each Auto Scaling instance in the instance_ids
<END_TASK>
<USER_TASK:>
Description:
def get_all_autoscaling_instances(self, instance_ids=None,
max_records=None, next_token=None):
"""
Returns a description of each Auto Scaling instance in the instance_ids
list. If a list is not provided, the service returns the full details
of all instances up to a maximum of fifty.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type instance_ids: list
:param instance_ids: List of Autoscaling Instance IDs which should be
searched for.
:type max_records: int
:param max_records: Maximum number of results to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.activity.Activity` objects.
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceIds')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAutoScalingInstances',
params, [('member', Instance)]) |
<SYSTEM_TASK:>
Returns descriptions of what each policy does. This action supports
<END_TASK>
<USER_TASK:>
Description:
def get_all_policies(self, as_group=None, policy_names=None,
max_records=None, next_token=None):
"""
Returns descriptions of what each policy does. This action supports
pagination. If the response includes a token, there are more records
available. To get the additional records, repeat the request with the
response token as the NextToken parameter.
If no group name or list of policy names are provided, all
available policies are returned.
:type as_name: str
:param as_name: The name of the
:class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
:type names: list
:param names: List of policy names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
""" |
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if policy_names:
self.build_list_params(params, policy_names, 'PolicyNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribePolicies', params,
[('member', ScalingPolicy)]) |
<SYSTEM_TASK:>
Suspends Auto Scaling processes for an Auto Scaling group.
<END_TASK>
<USER_TASK:>
Description:
def suspend_processes(self, as_group, scaling_processes=None):
"""
Suspends Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to suspend. If omitted, all
processes will be suspended.
""" |
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes, 'ScalingProcesses')
return self.get_status('SuspendProcesses', params) |
<SYSTEM_TASK:>
Resumes Auto Scaling processes for an Auto Scaling group.
<END_TASK>
<USER_TASK:>
Description:
def resume_processes(self, as_group, scaling_processes=None):
"""
Resumes Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to resume processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to resume. If omitted, all
processes will be resumed.
""" |
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes, 'ScalingProcesses')
return self.get_status('ResumeProcesses', params) |
<SYSTEM_TASK:>
Creates a scheduled scaling action for a Auto Scaling group. If you
<END_TASK>
<USER_TASK:>
Description:
def create_scheduled_group_action(self, as_group, name, time,
desired_capacity=None,
min_size=None, max_size=None):
"""
Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
unchanged in the affected Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to get activities on.
:type name: string
:param name: Scheduled action name.
:type time: datetime.datetime
:param time: The time for this action to start.
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should
be running in this group.
:type min_size: int
:param min_size: The minimum size for the new auto scaling group.
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
""" |
params = {'AutoScalingGroupName': as_group,
'ScheduledActionName': name,
'Time': time.isoformat()}
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
params['MinSize'] = min_size
if max_size is not None:
params['MaxSize'] = max_size
return self.get_status('PutScheduledUpdateGroupAction', params) |
<SYSTEM_TASK:>
Disables monitoring of group metrics for the Auto Scaling group
<END_TASK>
<USER_TASK:>
Description:
def disable_metrics_collection(self, as_group, metrics=None):
"""
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
""" |
params = {'AutoScalingGroupName': as_group}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params) |
<SYSTEM_TASK:>
Enables monitoring of group metrics for the Auto Scaling group
<END_TASK>
<USER_TASK:>
Description:
def enable_metrics_collection(self, as_group, granularity, metrics=None):
"""
Enables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of enabled
metrics with the Metrics parameter.
Auto scaling metrics collection can be turned on only if the
InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch
configuration, is set to true.
:type autoscale_group: string
:param autoscale_group: The auto scaling group to get activities on.
:type granularity: string
:param granularity: The granularity to associate with the metrics to
collect. Currently, the only legal granularity is "1Minute".
:type metrics: string list
:param metrics: The list of metrics to collect. If no metrics are
specified, all metrics are enabled.
""" |
params = {'AutoScalingGroupName': as_group,
'Granularity': granularity}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('EnableMetricsCollection', params) |
<SYSTEM_TASK:>
Explicitly set the health status of an instance.
<END_TASK>
<USER_TASK:>
Description:
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
Explicitly set the health status of an instance.
:type instance_id: str
:param instance_id: The identifier of the EC2 instance.
:type health_status: str
:param health_status: The health status of the instance.
"Healthy" means that the instance is healthy and should remain
in service. "Unhealthy" means that the instance is unhealthy.
Auto Scaling should terminate and replace it.
:type should_respect_grace_period: bool
:param should_respect_grace_period: If True, this call should
respect the grace period associated with the group.
""" |
params = {'InstanceId': instance_id,
'HealthStatus': health_status}
if should_respect_grace_period:
params['ShouldRespectGracePeriod'] = 'true'
else:
params['ShouldRespectGracePeriod'] = 'false'
return self.get_status('SetInstanceHealth', params) |
<SYSTEM_TASK:>
Lists the Auto Scaling group tags.
<END_TASK>
<USER_TASK:>
Description:
def get_all_tags(self, filters=None, max_records=None, next_token=None):
"""
Lists the Auto Scaling group tags.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
the tags to be returned. NOT IMPLEMENTED YET.
:type max_records: int
:param max_records: Maximum number of tags to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.tag.Tag`
instances.
""" |
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeTags', params,
[('member', Tag)]) |
<SYSTEM_TASK:>
Creates new tags or updates existing tags for an Auto Scaling group.
<END_TASK>
<USER_TASK:>
Description:
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
""" |
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i+1)
return self.get_status('CreateOrUpdateTags', params, verb='POST') |
<SYSTEM_TASK:>
Open this key for reading
<END_TASK>
<USER_TASK:>
Description:
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string (ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
""" |
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name,value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp) |
<SYSTEM_TASK:>
Change the storage class of an existing key.
<END_TASK>
<USER_TASK:>
Description:
def change_storage_class(self, new_storage_class, dst_bucket=None):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key
will be used.
""" |
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class) |
<SYSTEM_TASK:>
Copy this Key to another bucket.
<END_TASK>
<USER_TASK:>
Description:
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key.
If metadata is supplied, it will replace the
metadata of the source key being copied.
If no metadata is supplied, the source key's
metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the storage
class of the new Key to be
REDUCED_REDUNDANCY regardless of the
storage class of the key being copied.
The Reduced Redundancy Storage (RRS)
feature of S3, provides lower
redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key
will be copied to the destination
key. If False, the destination key
will have the default ACL.
Note that preserving the ACL in the
new key object will require two
additional API calls to S3, one to
retrieve the current ACL and one to
set that ACL on the new object. If
you don't care about the ACL, a value
of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
""" |
dst_bucket = self.bucket.connection.lookup(dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key) |
<SYSTEM_TASK:>
Delete this key from S3
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""
Delete this key from S3
""" |
return self.bucket.delete_key(self.name, version_id=self.version_id) |
<SYSTEM_TASK:>
Generate a URL to access this key.
<END_TASK>
<USER_TASK:>
Description:
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:rtype: string
:return: The URL to access the key
""" |
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute) |
<SYSTEM_TASK:>
Store an object in S3 using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
""" |
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close() |
<SYSTEM_TASK:>
Retrieve an object from S3 using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
""" |
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers) |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add an email grant
<END_TASK>
<USER_TASK:>
Description:
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the command
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
""" |
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers) |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add a canonical
<END_TASK>
<USER_TASK:>
Description:
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
""" |
policy = self.get_acl()
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers) |
<SYSTEM_TASK:>
Returns the AWS object associated with a given option.
<END_TASK>
<USER_TASK:>
Description:
def get_obj(self, name):
"""
Returns the AWS object associated with a given option.
The heuristics used are a bit lame. If the option name contains
the word 'bucket' it is assumed to be an S3 bucket, if the name
contains the word 'queue' it is assumed to be an SQS queue and
if it contains the word 'domain' it is assumed to be a SimpleDB
domain. If the option name specified does not exist in the
config file or if the AWS object cannot be retrieved this
returns None.
""" |
val = self.get(name)
if not val:
return None
if name.find('queue') >= 0:
obj = boto.lookup('sqs', val)
if obj:
obj.set_message_class(ServiceMessage)
elif name.find('bucket') >= 0:
obj = boto.lookup('s3', val)
elif name.find('domain') >= 0:
obj = boto.lookup('sdb', val)
else:
obj = None
return obj |
<SYSTEM_TASK:>
Creates a JSON response
<END_TASK>
<USER_TASK:>
Description:
def export_json(data, status, headers):
"""
Creates a JSON response
JSON content is encoded by utf-8, not unicode escape.
Args:
data: any type object that can dump to json
status (int): http status code
headers (dict): http headers
""" |
dumped = json.dumps(data, ensure_ascii=False)
resp = current_app.response_class(
dumped, status=status, headers=headers,
content_type='application/json; charset=utf-8')
return resp |
<SYSTEM_TASK:>
Add a new rule to this DBSecurity group.
<END_TASK>
<USER_TASK:>
Description:
def authorize(self, cidr_ip=None, ec2_group=None):
"""
Add a new rule to this DBSecurity group.
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
@type cidr_ip: string
@param cidr_ip: A valid CIDR IP range to authorize
@type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
@rtype: bool
@return: True if successful.
""" |
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
else:
group_name = None
group_owner_id = None
return self.connection.authorize_dbsecurity_group(self.name,
cidr_ip,
group_name,
group_owner_id) |
<SYSTEM_TASK:>
Revoke access to a CIDR range or EC2 SecurityGroup.
<END_TASK>
<USER_TASK:>
Description:
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
@type cidr_ip: string
@param cidr_ip: A valid CIDR IP range to revoke
@type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
@rtype: bool
@return: True if successful.
""" |
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip) |
<SYSTEM_TASK:>
A generator function for listing parts of a multipart upload.
<END_TASK>
<USER_TASK:>
Description:
def part_lister(mpupload, part_number_marker=None):
"""
A generator function for listing parts of a multipart upload.
""" |
more_results = True
part = None
while more_results:
parts = mpupload.get_all_parts(None, part_number_marker)
for part in parts:
yield part
part_number_marker = mpupload.next_part_number_marker
more_results= mpupload.is_truncated |
<SYSTEM_TASK:>
Return the uploaded parts of this MultiPart Upload. This is
<END_TASK>
<USER_TASK:>
Description:
def get_all_parts(self, max_parts=None, part_number_marker=None):
"""
Return the uploaded parts of this MultiPart Upload. This is
a lower-level method that requires you to manually page through
results. To simplify this process, you can just use the
object itself as an iterator and it will automatically handle
all of the paging with S3.
""" |
self._parts = []
query_args = 'uploadId=%s' % self.id
if max_parts:
query_args += '&max-parts=%d' % max_parts
if part_number_marker:
query_args += '&part-number-marker=%s' % part_number_marker
response = self.bucket.connection.make_request('GET', self.bucket.name,
self.key_name,
query_args=query_args)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(self, self)
xml.sax.parseString(body, h)
return self._parts |
<SYSTEM_TASK:>
Copy another part of this MultiPart Upload.
<END_TASK>
<USER_TASK:>
Description:
def copy_part_from_key(self, src_bucket_name, src_key_name, part_num,
start=None, end=None):
"""
Copy another part of this MultiPart Upload.
:type src_bucket_name: string
:param src_bucket_name: Name of the bucket containing the source key
:type src_key_name: string
:param src_key_name: Name of the source key
:type part_num: int
:param part_num: The number of this part.
:type start: int
:param start: Zero-based byte offset to start copying from
:type end: int
:param end: Zero-based byte offset to copy to
""" |
if part_num < 1:
raise ValueError('Part numbers must be greater than zero')
query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
if start is not None and end is not None:
rng = 'bytes=%s-%s' % (start, end)
provider = self.bucket.connection.provider
headers = {provider.copy_source_range_header: rng}
else:
headers = None
return self.bucket.copy_key(self.key_name, src_bucket_name,
src_key_name, storage_class=None,
headers=headers,
query_args=query_args) |
<SYSTEM_TASK:>
Complete the MultiPart Upload operation. This method should
<END_TASK>
<USER_TASK:>
Description:
def complete_upload(self):
"""
Complete the MultiPart Upload operation. This method should
be called when all parts of the file have been successfully
uploaded to S3.
:rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload`
:returns: An object representing the completed upload.
""" |
xml = self.to_xml()
return self.bucket.complete_multipart_upload(self.key_name,
self.id, xml) |
<SYSTEM_TASK:>
Return the callback url for this provider.
<END_TASK>
<USER_TASK:>
Description:
def get_callback_url(self, provider):
"""Return the callback url for this provider.""" |
info = self.model._meta.app_label, self.model._meta.model_name
return reverse('admin:%s_%s_callback' % info, kwargs={'provider': provider.id}) |
<SYSTEM_TASK:>
Return url to redirect authenticated users.
<END_TASK>
<USER_TASK:>
Description:
def get_login_redirect(self, provider, account):
"""Return url to redirect authenticated users.""" |
info = self.model._meta.app_label, self.model._meta.model_name
# inline import to prevent circular imports.
from .admin import PRESERVED_FILTERS_SESSION_KEY
preserved_filters = self.request.session.get(PRESERVED_FILTERS_SESSION_KEY, None)
redirect_url = reverse('admin:%s_%s_changelist' % info)
if preserved_filters:
redirect_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': self.model._meta}, redirect_url)
return redirect_url |
<SYSTEM_TASK:>
Return url to redirect on login failure.
<END_TASK>
<USER_TASK:>
Description:
def get_error_redirect(self, provider, reason):
"""Return url to redirect on login failure.""" |
info = self.model._meta.app_label, self.model._meta.model_name
return reverse('admin:%s_%s_changelist' % info) |
<SYSTEM_TASK:>
Message user and redirect on error.
<END_TASK>
<USER_TASK:>
Description:
def handle_login_failure(self, provider, reason):
"""Message user and redirect on error.""" |
logger.error('Authenication Failure: {0}'.format(reason))
messages.error(self.request, 'Authenication Failed. Please try again')
return redirect(self.get_error_redirect(provider, reason)) |
<SYSTEM_TASK:>
Create and return an SSHClient object given an
<END_TASK>
<USER_TASK:>
Description:
def sshclient_from_instance(instance, ssh_key_file,
host_key_file='~/.ssh/known_hosts',
user_name='root', ssh_pwd=None):
"""
Create and return an SSHClient object given an
instance object.
:type instance: :class`boto.ec2.instance.Instance` object
:param instance: The instance object.
:type ssh_key_file: str
:param ssh_key_file: A path to the private key file used
to log into instance.
:type host_key_file: str
:param host_key_file: A path to the known_hosts file used
by the SSH client.
Defaults to ~/.ssh/known_hosts
:type user_name: str
:param user_name: The username to use when logging into
the instance. Defaults to root.
:type ssh_pwd: str
:param ssh_pwd: The passphrase, if any, associated with
private key.
""" |
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd) |
<SYSTEM_TASK:>
Open a file on the remote system and return a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote system and return a file-like object.
""" |
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize) |
<SYSTEM_TASK:>
Execute a command on the remote host. Return a tuple containing
<END_TASK>
<USER_TASK:>
Description:
def run(self, command):
"""
Execute a command on the remote host. Return a tuple containing
an integer status and a two strings, the first containing stdout
and the second containing stderr from the command.
""" |
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
boto.log.debug('stdout: %s' % std_out)
boto.log.debug('stderr: %s' % std_err)
return (status, std_out, std_err) |
<SYSTEM_TASK:>
Execute a command on the remote host with a pseudo-terminal.
<END_TASK>
<USER_TASK:>
Description:
def run_pty(self, command):
"""
Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command.
""" |
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel.recv(1024) |
<SYSTEM_TASK:>
Factory to choose feature extractor
<END_TASK>
<USER_TASK:>
Description:
def factory(feature):
"""
Factory to choose feature extractor
:param feature: name of the feature
:return: Feature extractor function
""" |
if feature == 'hog':
return hog
elif feature == 'deep':
return deep
elif feature == 'gray':
return gray
elif feature == 'lab':
return lab
elif feature == 'luv':
return luv
elif feature == 'hsv':
return hsv
elif feature == 'hls':
return hls
else:
return rgb |
<SYSTEM_TASK:>
HOG feature extractor.
<END_TASK>
<USER_TASK:>
Description:
def hog(img, options=None):
"""
HOG feature extractor.
:param img:
:param options:
:return: HOG Feature for given image
The output will have channels same as number of orientations.
Height and Width will be reduced based on block-size and cell-size
""" |
op = _DEF_HOG_OPTS.copy()
if options is not None:
op.update(options)
img = gray(img)
img_fd = skimage.feature.hog(img,
orientations=op['orientations'],
pixels_per_cell=op['cell_size'],
cells_per_block=op['block_size'],
visualise=False)
h, w = img.shape
cx, cy = op['cell_size']
n_cellsx, n_cellsy = w // cx, h // cy
bx, by = op['block_size']
n_blksx, n_blksy = (n_cellsx - bx) + 1, (n_cellsy - by) + 1
hog_shape = n_blksy * by, n_blksx * bx, op['orientations']
image_hog = np.reshape(img_fd, hog_shape)
return image_hog |
<SYSTEM_TASK:>
Retrieves a file from a Key
<END_TASK>
<USER_TASK:>
Description:
def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False):
"""
Retrieves a file from a Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: ignored in this subclass.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
""" |
if self.key_type & self.KEY_STREAM_READABLE:
raise BotoClientError('Stream is not Readable')
elif self.key_type & self.KEY_STREAM_WRITABLE:
key_file = self.fp
else:
key_file = open(self.full_path, 'rb')
try:
shutil.copyfileobj(key_file, fp)
finally:
key_file.close() |
<SYSTEM_TASK:>
Store an object in a file using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
num_cb=10, policy=None, md5=None):
"""
Store an object in a file using the name of the Key object as the
key in file URI and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: ignored in this subclass.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: ignored in this subclass.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded
version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
:param md5: ignored in this subclass.
""" |
if self.key_type & self.KEY_STREAM_WRITABLE:
raise BotoClientError('Stream is not writable')
elif self.key_type & self.KEY_STREAM_READABLE:
key_file = self.fp
else:
if not replace and os.path.exists(self.full_path):
return
key_file = open(self.full_path, 'wb')
try:
shutil.copyfileobj(fp, key_file)
finally:
key_file.close() |
<SYSTEM_TASK:>
Retrieve file data from the Key, and return contents as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
torrent=False):
"""
Retrieve file data from the Key, and return contents as a string.
:type headers: dict
:param headers: ignored in this subclass.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type torrent: bool
:param torrent: ignored in this subclass.
:rtype: string
:returns: The contents of the file as a string
""" |
fp = StringIO.StringIO()
self.get_contents_to_file(fp)
return fp.getvalue() |
<SYSTEM_TASK:>
Finds a new box that exactly encloses all the given boxes.
<END_TASK>
<USER_TASK:>
Description:
def enclosing_box(boxes):
"""
Finds a new box that exactly encloses all the given boxes.
:param boxes: Array of Box objects
:return: Box object that encloses all boxes
""" |
x = max(0, min([box.x for box in boxes]))
y = max(0, min([box.y for box in boxes]))
x2 = max([box.bottom_right()[0] for box in boxes])
y2 = max([box.bottom_right()[1] for box in boxes])
return Box.from_xy(x, y, x2, y2) |
<SYSTEM_TASK:>
Finds the left most box out of the given boxes.
<END_TASK>
<USER_TASK:>
Description:
def left_most(boxes):
"""
Finds the left most box out of the given boxes.
:param boxes: Array of Box objects
:return: The left-most Box object
""" |
x_list = [(box.x, box) for box in boxes]
x_list.sort()
return x_list[0][1] |
<SYSTEM_TASK:>
Finds the right most box out of the given boxes.
<END_TASK>
<USER_TASK:>
Description:
def right_most(boxes):
"""
Finds the right most box out of the given boxes.
:param boxes: Array of Box objects
:return: The right-most Box object
""" |
x_list = [(box.x, box) for box in boxes]
x_list.sort()
return x_list[-1][1] |
<SYSTEM_TASK:>
Finds an intersection box that is common to both given boxes.
<END_TASK>
<USER_TASK:>
Description:
def intersection_box(box1, box2):
"""
Finds an intersection box that is common to both given boxes.
:param box1: Box object 1
:param box2: Box object 2
:return: None if there is no intersection otherwise the new Box
""" |
b1_x2, b1_y2 = box1.bottom_right()
b2_x2, b2_y2 = box2.bottom_right()
x, y = max(box1.x, box2.x), max(box1.y, box2.y)
x2, y2 = min(b1_x2, b2_x2), min(b1_y2, b2_y2)
w, h = max(0, x2-x), max(0, y2-y)
return Box(x, y, w, h) |
<SYSTEM_TASK:>
Check whether this box and given box overlaps at least by given threshold.
<END_TASK>
<USER_TASK:>
Description:
def overlaps(self, box, th=0.0001):
"""
Check whether this box and given box overlaps at least by given threshold.
:param box: Box to compare with
:param th: Threshold above which overlapping should be considered
:returns: True if overlaps
""" |
int_box = Box.intersection_box(self, box)
small_box = self if self.smaller(box) else box
return True if int_box.area() / small_box.area() >= th else False |
<SYSTEM_TASK:>
Expands the box co-ordinates by given percentage on four sides. Ignores negative values.
<END_TASK>
<USER_TASK:>
Description:
def expand(self, percentage):
"""
Expands the box co-ordinates by given percentage on four sides. Ignores negative values.
:param percentage: Percentage to expand
:return: New expanded Box
""" |
ex_h = math.ceil(self.height * percentage / 100)
ex_w = math.ceil(self.width * percentage / 100)
x = max(0, self.x - ex_w)
y = max(0, self.y - ex_h)
x2 = self.x + self.width + ex_w
y2 = self.y + self.height + ex_h
return Box.from_xy(x, y, x2, y2) |
<SYSTEM_TASK:>
Add padding around four sides of box
<END_TASK>
<USER_TASK:>
Description:
def padding(self, px):
"""
Add padding around four sides of box
:param px: padding value in pixels.
Can be an array in the format of [top right bottom left] or single value.
:return: New padding added box
""" |
# if px is not an array, have equal padding all sides
if not isinstance(px, list):
px = [px] * 4
x = max(0, self.x - px[3])
y = max(0, self.y - px[0])
x2 = self.x + self.width + px[1]
y2 = self.y + self.height + px[2]
return Box.from_xy(x, y, x2, y2) |
<SYSTEM_TASK:>
Finds a point inside the box that is exactly at the given percentage place.
<END_TASK>
<USER_TASK:>
Description:
def pos_by_percent(self, x_percent, y_percent):
"""
Finds a point inside the box that is exactly at the given percentage place.
:param x_percent: how much percentage from left edge
:param y_percent: how much percentage from top edge
:return: A point inside the box
""" |
x = round(x_percent * self.width)
y = round(y_percent * self.height)
return int(x), int(y) |
<SYSTEM_TASK:>
Version of installed pip.
<END_TASK>
<USER_TASK:>
Description:
def pip_version(self):
"""Version of installed pip.""" |
if not self._pip_exists:
return None
if not hasattr(self, '_pip_version'):
# don't call `self._execute_pip` here as that method calls this one
output = self._execute(self._pip + ['-V'], log=False).split()[1]
self._pip_version = tuple([int(n) for n in output.split('.')])
return self._pip_version |
<SYSTEM_TASK:>
Executes `virtualenv` to create a new environment.
<END_TASK>
<USER_TASK:>
Description:
def _create(self):
"""Executes `virtualenv` to create a new environment.""" |
if self.readonly:
raise VirtualenvReadonlyException()
args = ['virtualenv']
if self.system_site_packages:
args.append('--system-site-packages')
if self.python is None:
args.append(self.name)
else:
args.extend(['-p', self.python, self.name])
proc = subprocess.Popen(args, cwd=self.root, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = proc.communicate()
returncode = proc.returncode
if returncode:
raise VirtualenvCreationException((returncode, output, self.name))
self._write_to_log(output, truncate=True)
self._write_to_error(error, truncate=True) |
<SYSTEM_TASK:>
Executes pip commands.
<END_TASK>
<USER_TASK:>
Description:
def _execute_pip(self, args, log=True):
"""
Executes pip commands.
:param args: Arguments to pass to pip (list[str])
:param log: Log the output to a file [default: True] (boolean)
:return: See _execute
""" |
# Copy the pip calling arguments so they can be extended
exec_args = list(self._pip)
# Older versions of pip don't support the version check argument.
# Fixes https://github.com/sjkingo/virtualenv-api/issues/35
if self.pip_version[0] >= 6:
exec_args.append('--disable-pip-version-check')
exec_args.extend(args)
return self._execute(exec_args, log=log) |
<SYSTEM_TASK:>
Executes the given command inside the environment and returns the output.
<END_TASK>
<USER_TASK:>
Description:
def _execute(self, args, log=True):
"""Executes the given command inside the environment and returns the output.""" |
if not self._ready:
self.open_or_create()
output = ''
error = ''
try:
proc = subprocess.Popen(args, cwd=self.path, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = proc.communicate()
returncode = proc.returncode
if returncode:
raise subprocess.CalledProcessError(returncode, proc, (output, error))
return to_text(output)
except OSError as e:
# raise a more meaningful error with the program name
prog = args[0]
if prog[0] != os.sep:
prog = os.path.join(self.path, prog)
raise OSError('%s: %s' % (prog, six.u(str(e))))
except subprocess.CalledProcessError as e:
output, error = e.output
e.output = output
raise e
finally:
if log:
try:
self._write_to_log(to_text(output))
self._write_to_error(to_text(error))
except NameError:
pass |
<SYSTEM_TASK:>
Writes the given output to the log file, appending unless `truncate` is True.
<END_TASK>
<USER_TASK:>
Description:
def _write_to_log(self, s, truncate=False):
"""Writes the given output to the log file, appending unless `truncate` is True.""" |
# if truncate is True, set write mode to truncate
with open(self._logfile, 'w' if truncate else 'a') as fp:
fp.writelines((to_text(s) if six.PY2 else to_text(s), )) |
<SYSTEM_TASK:>
Writes the given output to the error file, appending unless `truncate` is True.
<END_TASK>
<USER_TASK:>
Description:
def _write_to_error(self, s, truncate=False):
"""Writes the given output to the error file, appending unless `truncate` is True.""" |
# if truncate is True, set write mode to truncate
with open(self._errorfile, 'w' if truncate else 'a') as fp:
fp.writelines((to_text(s)), ) |
<SYSTEM_TASK:>
Returns True if pip exists inside the virtual environment. Can be
<END_TASK>
<USER_TASK:>
Description:
def _pip_exists(self):
"""Returns True if pip exists inside the virtual environment. Can be
used as a naive way to verify that the environment is installed.""" |
return os.path.isfile(os.path.join(self.path, 'bin', 'pip')) |
<SYSTEM_TASK:>
Shortcut method to upgrade a package. If `force` is set to True,
<END_TASK>
<USER_TASK:>
Description:
def upgrade(self, package, force=False):
"""Shortcut method to upgrade a package. If `force` is set to True,
the package and all of its dependencies will be reinstalled, otherwise
if the package is up to date, this command is a no-op.""" |
self.install(package, upgrade=True, force=force) |
<SYSTEM_TASK:>
Upgrades all installed packages to their latest versions.
<END_TASK>
<USER_TASK:>
Description:
def upgrade_all(self):
"""
Upgrades all installed packages to their latest versions.
""" |
for pkg in self.installed_package_names:
self.install(pkg, upgrade=True) |
<SYSTEM_TASK:>
Searches the PyPi repository for the given `term` and returns a
<END_TASK>
<USER_TASK:>
Description:
def search(self, term):
"""
Searches the PyPi repository for the given `term` and returns a
dictionary of results.
New in 2.1.5: returns a dictionary instead of list of tuples
""" |
packages = {}
results = self._execute_pip(['search', term], log=False) # Don't want to log searches
for result in results.split(linesep):
try:
name, description = result.split(six.u(' - '), 1)
except ValueError:
# '-' not in result so unable to split into tuple;
# this could be from a multi-line description
continue
else:
name = name.strip()
if len(name) == 0:
continue
packages[name] = description.split(six.u('<br'), 1)[0].strip()
return packages |
<SYSTEM_TASK:>
Enable availability zones to this Access Point.
<END_TASK>
<USER_TASK:>
Description:
def enable_zones(self, zones):
"""
Enable availability zones to this Access Point.
All zones must be in the same region as the Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
""" |
if isinstance(zones, str) or isinstance(zones, unicode):
zones = [zones]
new_zones = self.connection.enable_availability_zones(self.name, zones)
self.availability_zones = new_zones |
<SYSTEM_TASK:>
Disable availability zones from this Access Point.
<END_TASK>
<USER_TASK:>
Description:
def disable_zones(self, zones):
"""
Disable availability zones from this Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
""" |
if isinstance(zones, str) or isinstance(zones, unicode):
zones = [zones]
new_zones = self.connection.disable_availability_zones(self.name, zones)
self.availability_zones = new_zones |
<SYSTEM_TASK:>
Adds instances to this load balancer. All instances must be in the same
<END_TASK>
<USER_TASK:>
Description:
def register_instances(self, instances):
"""
Adds instances to this load balancer. All instances must be in the same
region as the load balancer. Adding endpoints that are already
registered with the load balancer has no effect.
:param list instances: List of instance IDs (strings) that you'd like
to add to this load balancer.
""" |
if isinstance(instances, str) or isinstance(instances, unicode):
instances = [instances]
new_instances = self.connection.register_instances(self.name, instances)
self.instances = new_instances |
<SYSTEM_TASK:>
Register cls as a class representing an ID3 frame.
<END_TASK>
<USER_TASK:>
Description:
def frameclass(cls):
"""Register cls as a class representing an ID3 frame.
Sets cls.frameid and cls._version if not present, and registers the
new frame in Tag's known_frames dictionary.
To be used as a decorator on the class definition:
@frameclass
class UFID(Frame):
_framespec = (NullTerminatedStringSpec("owner"), BinaryDataSpec("data"))
""" |
assert issubclass(cls, Frames.Frame)
# Register v2.2 versions of v2.3/v2.4 frames if encoded by inheritance.
if len(cls.__name__) == 3:
base = cls.__bases__[0]
if issubclass(base, Frames.Frame) and base._in_version(3, 4):
assert not hasattr(base, "_v2_frame")
base._v2_frame = cls
# Override frameid from base with v2.2 name
if base.frameid == cls.frameid:
cls.frameid = cls.__name__
# Add frameid.
if not hasattr(cls, "frameid"):
cls.frameid = cls.__name__
assert Tag._is_frame_id(cls.frameid)
# Supply _version attribute if missing.
if len(cls.frameid) == 3:
cls._version = 2
if len(cls.frameid) == 4 and not cls._version:
cls._version = (3, 4)
# Register cls as a known frame.
assert cls.frameid not in Tag.known_frames
Tag.known_frames[cls.frameid] = cls
return cls |
<SYSTEM_TASK:>
Returns a list of frames in this tag.
<END_TASK>
<USER_TASK:>
Description:
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
""" |
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames |
<SYSTEM_TASK:>
Read an ID3v2 tag from a file.
<END_TASK>
<USER_TASK:>
Description:
def read(cls, filename, offset=0):
"""Read an ID3v2 tag from a file.""" |
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add an email grant to a
<END_TASK>
<USER_TASK:>
Description:
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
""" |
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl) |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add a canonical user
<END_TASK>
<USER_TASK:>
Description:
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
""" |
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl) |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add an email group
<END_TASK>
<USER_TASK:>
Description:
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
""" |
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers) |
<SYSTEM_TASK:>
Convenience method that provides a quick way to add a canonical group
<END_TASK>
<USER_TASK:>
Description:
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
""" |
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl) |
<SYSTEM_TASK:>
Store an object in GS using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
""" |
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError('"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size) |
<SYSTEM_TASK:>
Store an object in S3 using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
""" |
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5)
fp.close()
return r |
<SYSTEM_TASK:>
Describes a single Elastic MapReduce job flow
<END_TASK>
<USER_TASK:>
Description:
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
""" |
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0] |
<SYSTEM_TASK:>
Retrieve all the Elastic MapReduce job flows on your account
<END_TASK>
<USER_TASK:>
Description:
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
""" |
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) |
<SYSTEM_TASK:>
Terminate an Elastic MapReduce job flow
<END_TASK>
<USER_TASK:>
Description:
def terminate_jobflows(self, jobflow_ids):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
""" |
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST') |
<SYSTEM_TASK:>
Adds steps to a jobflow
<END_TASK>
<USER_TASK:>
Description:
def add_jobflow_steps(self, jobflow_id, steps):
"""
Adds steps to a jobflow
:type jobflow_id: str
:param jobflow_id: The job flow id
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
""" |
if type(steps) != types.ListType:
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
# Step args
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
return self.get_object(
'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST') |
<SYSTEM_TASK:>
Adds instance groups to a running cluster.
<END_TASK>
<USER_TASK:>
Description:
def add_instance_groups(self, jobflow_id, instance_groups):
"""
Adds instance groups to a running cluster.
:type jobflow_id: str
:param jobflow_id: The id of the jobflow which will take the
new instance groups
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
""" |
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
return self.get_object('AddInstanceGroups', params,
AddInstanceGroupsResponse, verb='POST') |
<SYSTEM_TASK:>
Modify the number of nodes and configuration settings in an
<END_TASK>
<USER_TASK:>
Description:
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
Modify the number of nodes and configuration settings in an
instance group.
:type instance_group_ids: list(str)
:param instance_group_ids: A list of the ID's of the instance
groups to be modified
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
""" |
if type(instance_group_ids) != types.ListType:
instance_group_ids = [instance_group_ids]
if type(new_sizes) != types.ListType:
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
# could be wrong - the example amazon gives uses
# InstanceRequestCount, while the api documentation
# says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
return self.get_object('ModifyInstanceGroups', params,
ModifyInstanceGroupsResponse, verb='POST') |
<SYSTEM_TASK:>
Set termination protection on specified Elastic MapReduce job flows
<END_TASK>
<USER_TASK:>
Description:
def set_termination_protection(self, jobflow_id,
termination_protection_status):
"""
Set termination protection on specified Elastic MapReduce job flows
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
:type termination_protection_status: bool
:param termination_protection_status: Termination protection status
""" |
assert termination_protection_status in (True, False)
params = {}
params['TerminationProtected'] = (termination_protection_status and "true") or "false"
self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')
return self.get_status('SetTerminationProtection', params, verb='POST') |
<SYSTEM_TASK:>
Takes an InstanceGroup; returns a dict that, when its keys are
<END_TASK>
<USER_TASK:>
Description:
def _build_instance_group_args(self, instance_group):
"""
Takes an InstanceGroup; returns a dict that, when its keys are
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
""" |
params = {
'InstanceCount' : instance_group.num_instances,
'InstanceRole' : instance_group.role,
'InstanceType' : instance_group.type,
'Name' : instance_group.name,
'Market' : instance_group.market
}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params |
<SYSTEM_TASK:>
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
<END_TASK>
<USER_TASK:>
Description:
def _build_instance_group_list_args(self, instance_groups):
"""
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
""" |
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
for i, instance_group in enumerate(instance_groups):
ig_dict = self._build_instance_group_args(instance_group)
for key, value in ig_dict.iteritems():
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
return params |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.