text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get all parameter groups associated with your account in a region.
<END_TASK>
<USER_TASK:>
Description:
def get_all_dbparameter_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all parameter groups associated with your account in a region.
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
If not provided, all DBParameter groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
""" |
params = {}
if groupname:
params['DBParameterGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBParameterGroups', params,
[('DBParameterGroup', ParameterGroup)]) |
<SYSTEM_TASK:>
Get all parameters associated with a ParameterGroup
<END_TASK>
<USER_TASK:>
Description:
def get_all_dbparameters(self, groupname, source=None,
max_records=None, marker=None):
"""
Get all parameters associated with a ParameterGroup
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
:type source: str
:param source: Specifies which parameters to return.
If not specified, all parameters will be returned.
Valid values are: user|system|engine-default
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
:return: The ParameterGroup
""" |
params = {'DBParameterGroupName' : groupname}
if source:
params['Source'] = source
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
pg = self.get_object('DescribeDBParameters', params, ParameterGroup)
pg.name = groupname
return pg |
<SYSTEM_TASK:>
Create a new dbparameter group for your account.
<END_TASK>
<USER_TASK:>
Description:
def create_parameter_group(self, name, engine='MySQL5.1', description=''):
"""
Create a new dbparameter group for your account.
:type name: string
:param name: The name of the new dbparameter group
:type engine: str
:param engine: Name of database engine.
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
""" |
params = {'DBParameterGroupName': name,
'DBParameterGroupFamily': engine,
'Description' : description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup) |
<SYSTEM_TASK:>
Modify a parameter group for your account.
<END_TASK>
<USER_TASK:>
Description:
def modify_parameter_group(self, name, parameters=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
""" |
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_list('ModifyDBParameterGroup', params,
ParameterGroup, verb='POST') |
<SYSTEM_TASK:>
Resets some or all of the parameters of a ParameterGroup to the
<END_TASK>
<USER_TASK:>
Description:
def reset_parameter_group(self, name, reset_all_params=False,
parameters=None):
"""
Resets some or all of the parameters of a ParameterGroup to the
default value
:type key_name: string
:param key_name: The name of the ParameterGroup to reset
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The parameters to reset. If not supplied,
all parameters will be reset.
""" |
params = {'DBParameterGroupName':name}
if reset_all_params:
params['ResetAllParameters'] = 'true'
else:
params['ResetAllParameters'] = 'false'
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_status('ResetDBParameterGroup', params) |
<SYSTEM_TASK:>
Add a new rule to an existing security group.
<END_TASK>
<USER_TASK:>
Description:
def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR a CIDR block but not both.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
you are granting access to.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security group you are granting
access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
""" |
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = urllib.quote(cidr_ip)
return self.get_object('AuthorizeDBSecurityGroupIngress', params,
DBSecurityGroup) |
<SYSTEM_TASK:>
Remove an existing rule from an existing security group.
<END_TASK>
<USER_TASK:>
Description:
def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None,
ec2_security_group_owner_id=None, cidr_ip=None):
"""
Remove an existing rule from an existing security group.
You need to pass in either ec2_security_group_name and
ec2_security_group_owner_id OR a CIDR block.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
from which you are removing access.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security from which you are
removing access.
:type cidr_ip: string
:param cidr_ip: The CIDR block from which you are removing access.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
""" |
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = cidr_ip
return self.get_object('RevokeDBSecurityGroupIngress', params,
DBSecurityGroup) |
<SYSTEM_TASK:>
Get information about DB Snapshots.
<END_TASK>
<USER_TASK:>
Description:
def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
max_records=None, marker=None):
"""
Get information about DB Snapshots.
:type snapshot_id: str
:param snapshot_id: The unique identifier of an RDS snapshot.
If not provided, all RDS snapshots will be returned.
:type instance_id: str
:param instance_id: The identifier of a DBInstance. If provided,
only the DBSnapshots related to that instance will
be returned.
If not provided, all RDS snapshots will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot`
""" |
params = {}
if snapshot_id:
params['DBSnapshotIdentifier'] = snapshot_id
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSnapshots', params,
[('DBSnapshot', DBSnapshot)]) |
<SYSTEM_TASK:>
Create a new DB snapshot.
<END_TASK>
<USER_TASK:>
Description:
def create_dbsnapshot(self, snapshot_id, dbinstance_id):
"""
Create a new DB snapshot.
:type snapshot_id: string
:param snapshot_id: The identifier for the DBSnapshot
:type dbinstance_id: string
:param dbinstance_id: The source identifier for the RDS instance from
which the snapshot is created.
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
""" |
params = {'DBSnapshotIdentifier' : snapshot_id,
'DBInstanceIdentifier' : dbinstance_id}
return self.get_object('CreateDBSnapshot', params, DBSnapshot) |
<SYSTEM_TASK:>
Create a new DBInstance from a DB snapshot.
<END_TASK>
<USER_TASK:>
Description:
def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
instance_class, port=None,
availability_zone=None):
"""
Create a new DBInstance from a DB snapshot.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:type instance_id: string
:param instance_id: The source identifier for the RDS instance from
which the snapshot is created.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
""" |
params = {'DBSnapshotIdentifier' : identifier,
'DBInstanceIdentifier' : instance_id,
'DBInstanceClass' : instance_class}
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceFromDBSnapshot',
params, DBInstance) |
<SYSTEM_TASK:>
Create a new DBInstance from a point in time.
<END_TASK>
<USER_TASK:>
Description:
def restore_dbinstance_from_point_in_time(self, source_instance_id,
target_instance_id,
use_latest=False,
restore_time=None,
dbinstance_class=None,
port=None,
availability_zone=None):
"""
Create a new DBInstance from a point in time.
:type source_instance_id: string
:param source_instance_id: The identifier for the source DBInstance.
:type target_instance_id: string
:param target_instance_id: The identifier of the new DBInstance.
:type use_latest: bool
:param use_latest: If True, the latest snapshot availabile will
be used.
:type restore_time: datetime
:param restore_time: The date and time to restore from. Only
used if use_latest is False.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
""" |
params = {'SourceDBInstanceIdentifier' : source_instance_id,
'TargetDBInstanceIdentifier' : target_instance_id}
if use_latest:
params['UseLatestRestorableTime'] = 'true'
elif restore_time:
params['RestoreTime'] = restore_time.isoformat()
if dbinstance_class:
params['DBInstanceClass'] = dbinstance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceToPointInTime',
params, DBInstance) |
<SYSTEM_TASK:>
Get information about events related to your DBInstances,
<END_TASK>
<USER_TASK:>
Description:
def get_all_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None,
max_records=None, marker=None):
"""
Get information about events related to your DBInstances,
DBSecurityGroups and DBParameterGroups.
:type source_identifier: str
:param source_identifier: If supplied, the events returned will be
limited to those that apply to the identified
source. The value of this parameter depends
on the value of source_type. If neither
parameter is specified, all events in the time
span will be returned.
:type source_type: str
:param source_type: Specifies how the source_identifier should
be interpreted. Valid values are:
b-instance | db-security-group |
db-parameter-group | db-snapshot
:type start_time: datetime
:param start_time: The beginning of the time interval for events.
If not supplied, all available events will
be returned.
:type end_time: datetime
:param end_time: The ending of the time interval for events.
If not supplied, all available events will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.event.Event`
""" |
params = {}
if source_identifier and source_type:
params['SourceIdentifier'] = source_identifier
params['SourceType'] = source_type
if start_time:
params['StartTime'] = start_time.isoformat()
if end_time:
params['EndTime'] = end_time.isoformat()
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeEvents', params, [('Event', Event)]) |
<SYSTEM_TASK:>
Read an ID3v1 tag from a file.
<END_TASK>
<USER_TASK:>
Description:
def read(cls, filename, offset=None, encoding="iso-8859-1"):
"""Read an ID3v1 tag from a file.""" |
with fileutil.opened(filename, "rb") as file:
if offset is None:
file.seek(-128, 2)
else:
file.seek(offset)
data = file.read(128)
return cls.decode(data, encoding=encoding) |
<SYSTEM_TASK:>
sets or changes a bucket's acl. We include a version_id argument
<END_TASK>
<USER_TASK:>
Description:
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
"""sets or changes a bucket's acl. We include a version_id argument
to support a polymorphic interface for callers, however,
version_id is not relevant for Google Cloud Storage buckets
and is therefore ignored here.""" |
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers) |
<SYSTEM_TASK:>
sets or changes a bucket's default object acl
<END_TASK>
<USER_TASK:>
Description:
def set_def_acl(self, acl_or_str, key_name='', headers=None):
"""sets or changes a bucket's default object acl""" |
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_def_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
else:
self.set_def_canned_acl(acl_or_str, key_name, headers=headers) |
<SYSTEM_TASK:>
returns a bucket's acl. We include a version_id argument
<END_TASK>
<USER_TASK:>
Description:
def get_acl(self, key_name='', headers=None, version_id=None):
"""returns a bucket's acl. We include a version_id argument
to support a polymorphic interface for callers, however,
version_id is not relevant for Google Cloud Storage buckets
and is therefore ignored here.""" |
return self.get_acl_helper(key_name, headers, STANDARD_ACL) |
<SYSTEM_TASK:>
sets or changes a bucket's default object
<END_TASK>
<USER_TASK:>
Description:
def set_def_xml_acl(self, acl_str, key_name='', headers=None):
"""sets or changes a bucket's default object""" |
return self.set_xml_acl(acl_str, key_name, headers,
query_args=DEF_OBJ_ACL) |
<SYSTEM_TASK:>
Returns a list of all completed snapshots for this volume ID.
<END_TASK>
<USER_TASK:>
Description:
def get_snapshots(self):
"""
Returns a list of all completed snapshots for this volume ID.
""" |
ec2 = self.get_ec2_connection()
rs = ec2.get_all_snapshots()
all_vols = [self.volume_id] + self.past_volume_ids
snaps = []
for snapshot in rs:
if snapshot.volume_id in all_vols:
if snapshot.progress == '100%':
snapshot.date = boto.utils.parse_ts(snapshot.start_time)
snapshot.keep = True
snaps.append(snapshot)
snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))
return snaps |
<SYSTEM_TASK:>
Trim the number of snapshots for this volume. This method always
<END_TASK>
<USER_TASK:>
Description:
def trim_snapshots(self, delete=False):
"""
Trim the number of snapshots for this volume. This method always
keeps the oldest snapshot. It then uses the parameters passed in
to determine how many others should be kept.
The algorithm is to keep all snapshots from the current day. Then
it will keep the first snapshot of the day for the previous seven days.
Then, it will keep the first snapshot of the week for the previous
four weeks. After than, it will keep the first snapshot of the month
for as many months as there are.
""" |
snaps = self.get_snapshots()
# Always keep the oldest and the newest
if len(snaps) <= 2:
return snaps
snaps = snaps[1:-1]
now = datetime.datetime.now(snaps[0].date.tzinfo)
midnight = datetime.datetime(year=now.year, month=now.month,
day=now.day, tzinfo=now.tzinfo)
# Keep the first snapshot from each day of the previous week
one_week = datetime.timedelta(days=7, seconds=60*60)
print midnight-one_week, midnight
previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight)
print previous_week
if not previous_week:
return snaps
current_day = None
for snap in previous_week:
if current_day and current_day == snap.date.day:
snap.keep = False
else:
current_day = snap.date.day
# Get ourselves onto the next full week boundary
if previous_week:
week_boundary = previous_week[0].date
if week_boundary.weekday() != 0:
delta = datetime.timedelta(days=week_boundary.weekday())
week_boundary = week_boundary - delta
# Keep one within this partial week
partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date)
if len(partial_week) > 1:
for snap in partial_week[1:]:
snap.keep = False
# Keep the first snapshot of each week for the previous 4 weeks
for i in range(0,4):
weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary)
if len(weeks_worth) > 1:
for snap in weeks_worth[1:]:
snap.keep = False
week_boundary = week_boundary - one_week
# Now look through all remaining snaps and keep one per month
remainder = self.get_snapshot_range(snaps, end_date=week_boundary)
current_month = None
for snap in remainder:
if current_month and current_month == snap.date.month:
snap.keep = False
else:
current_month = snap.date.month
if delete:
for snap in snaps:
if not snap.keep:
boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name))
snap.delete()
return snaps |
<SYSTEM_TASK:>
Calculate token expiration
<END_TASK>
<USER_TASK:>
Description:
def calculate_expiration(self, token):
"""
Calculate token expiration
return expiration if the token need to set expiration or refresh,
otherwise return None.
Args:
token (dict): a decoded token
""" |
if not token:
return None
now = datetime.utcnow()
time_to_live = self.config["expiration"]
if "exp" not in token:
return now + timedelta(seconds=time_to_live)
elif self.config["refresh"]:
exp = datetime.utcfromtimestamp(token["exp"])
# 0.5: reduce refresh frequent
if exp - now < timedelta(seconds=0.5 * time_to_live):
return now + timedelta(seconds=time_to_live)
return None |
<SYSTEM_TASK:>
Checks for the existance of an AWS credential file.
<END_TASK>
<USER_TASK:>
Description:
def check_for_credential_file(self):
"""
Checks for the existance of an AWS credential file.
If the environment variable AWS_CREDENTIAL_FILE is
set and points to a file, that file will be read and
will be searched credentials.
Note that if credentials have been explicitelypassed
into the class constructor, those values always take
precedence.
""" |
if 'AWS_CREDENTIAL_FILE' in os.environ:
path = os.environ['AWS_CREDENTIAL_FILE']
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
lines = fp.readlines()
fp.close()
for line in lines:
if line[0] != '#':
if '=' in line:
name, value = line.split('=', 1)
if name.strip() == 'AWSAccessKeyId':
if 'aws_access_key_id' not in self.args:
value = value.strip()
self.args['aws_access_key_id'] = value
elif name.strip() == 'AWSSecretKey':
if 'aws_secret_access_key' not in self.args:
value = value.strip()
self.args['aws_secret_access_key'] = value
else:
print 'Warning: unable to read AWS_CREDENTIAL_FILE' |
<SYSTEM_TASK:>
First checks to see if a url argument was explicitly passed
<END_TASK>
<USER_TASK:>
Description:
def check_for_env_url(self):
"""
First checks to see if a url argument was explicitly passed
in. If so, that will be used. If not, it checks for the
existence of the environment variable specified in ENV_URL.
If this is set, it should contain a fully qualified URL to the
service you want to use.
Note that any values passed explicitly to the class constructor
will take precedence.
""" |
url = self.args.get('url', None)
if url:
del self.args['url']
if not url and self.EnvURL in os.environ:
url = os.environ[self.EnvURL]
if url:
rslt = urlparse.urlparse(url)
if 'is_secure' not in self.args:
if rslt.scheme == 'https':
self.args['is_secure'] = True
else:
self.args['is_secure'] = False
host = rslt.netloc
port = None
l = host.split(':')
if len(l) > 1:
host = l[0]
port = int(l[1])
if 'host' not in self.args:
self.args['host'] = host
if port and 'port' not in self.args:
self.args['port'] = port
if rslt.path and 'path' not in self.args:
self.args['path'] = rslt.path |
<SYSTEM_TASK:>
Find objects in Fedora with the specified content model.
<END_TASK>
<USER_TASK:>
Description:
def get_objects_with_cmodel(self, cmodel_uri, type=None):
"""
Find objects in Fedora with the specified content model.
:param cmodel_uri: content model URI (should be full URI in info:fedora/pid:### format)
:param type: type of object to return (e.g., class:`DigitalObject`)
:rtype: list of objects
""" |
uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)
return [self.get_object(uri, type) for uri in uris] |
<SYSTEM_TASK:>
Initialize a single object from Fedora, or create a new one, with the
<END_TASK>
<USER_TASK:>
Description:
def get_object(self, pid=None, type=None, create=None):
"""
Initialize a single object from Fedora, or create a new one, with the
same Fedora configuration and credentials.
:param pid: pid of the object to request, or a function that can be
called to get one. if not specified, :meth:`get_next_pid`
will be called if a pid is needed
:param type: type of object to return; defaults to :class:`DigitalObject`
:rtype: single object of the type specified
:create: boolean: create a new object? (if not specified, defaults
to False when pid is specified, and True when it is not)
""" |
objtype = type or self.default_object_type
if pid is None:
if create is None:
create = True
else:
if create is None:
create = False
return objtype(self.api, pid, create,
default_pidspace=self.default_pidspace) |
<SYSTEM_TASK:>
Create the write buffer and cache directory.
<END_TASK>
<USER_TASK:>
Description:
def create(self):
"""Create the write buffer and cache directory.""" |
if not self._sync and not hasattr(self, '_buffer'):
self._buffer = {}
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir) |
<SYSTEM_TASK:>
Delete the write buffer and cache directory.
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""Delete the write buffer and cache directory.""" |
if not self._sync:
del self._buffer
shutil.rmtree(self.cache_dir) |
<SYSTEM_TASK:>
Sync the write buffer, then close the cache.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Sync the write buffer, then close the cache.
If a closed :class:`FileCache` object's methods are called, a
:exc:`ValueError` will be raised.
""" |
self.sync()
self.sync = self.create = self.delete = self._closed
self._write_to_file = self._read_to_file = self._closed
self._key_to_filename = self._filename_to_key = self._closed
self.__getitem__ = self.__setitem__ = self.__delitem__ = self._closed
self.__iter__ = self.__len__ = self.__contains__ = self._closed |
<SYSTEM_TASK:>
Sync the write buffer with the cache files and clear the buffer.
<END_TASK>
<USER_TASK:>
Description:
def sync(self):
"""Sync the write buffer with the cache files and clear the buffer.
If the :class:`FileCache` object was opened with the optional ``'s'``
*flag* argument, then calling :meth:`sync` will do nothing.
""" |
if self._sync:
return # opened in sync mode, so skip the manual sync
self._sync = True
for ekey in self._buffer:
filename = self._key_to_filename(ekey)
self._write_to_file(filename, self._buffer[ekey])
self._buffer.clear()
self._sync = False |
<SYSTEM_TASK:>
Decode key using hex_codec to retrieve the original key.
<END_TASK>
<USER_TASK:>
Description:
def _decode_key(self, key):
"""Decode key using hex_codec to retrieve the original key.
Keys are returned as :class:`str` if serialization is enabled.
Keys are returned as :class:`bytes` if serialization is disabled.
""" |
bkey = codecs.decode(key.encode(self._keyencoding), 'hex_codec')
return bkey.decode(self._keyencoding) if self._serialize else bkey |
<SYSTEM_TASK:>
Return a list of absolute cache filenames
<END_TASK>
<USER_TASK:>
Description:
def _all_filenames(self):
"""Return a list of absolute cache filenames""" |
try:
return [os.path.join(self.cache_dir, filename) for filename in
os.listdir(self.cache_dir)]
except (FileNotFoundError, OSError):
return [] |
<SYSTEM_TASK:>
Return a list of all encoded key names.
<END_TASK>
<USER_TASK:>
Description:
def _all_keys(self):
"""Return a list of all encoded key names.""" |
file_keys = [self._filename_to_key(fn) for fn in self._all_filenames()]
if self._sync:
return set(file_keys)
else:
return set(file_keys + list(self._buffer)) |
<SYSTEM_TASK:>
Get a single datastream on a Fedora object; optionally, get the version
<END_TASK>
<USER_TASK:>
Description:
def getDatastreamDissemination(self, pid, dsID, asOfDateTime=None, stream=False,
head=False, rqst_headers=None):
"""Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
""" |
# /objects/{pid}/datastreams/{dsID}/content ? [asOfDateTime] [download]
http_args = {}
if rqst_headers is None:
rqst_headers = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
url = 'objects/%(pid)s/datastreams/%(dsid)s/content' % \
{'pid': pid, 'dsid': dsID}
if head:
reqmethod = self.head
else:
reqmethod = self.get
return reqmethod(url, params=http_args, stream=stream, headers=rqst_headers) |
<SYSTEM_TASK:>
Get top-level information aboug a single Fedora object; optionally,
<END_TASK>
<USER_TASK:>
Description:
def getObjectProfile(self, pid, asOfDateTime=None):
"""Get top-level information aboug a single Fedora object; optionally,
retrieve information as of a particular date-time.
:param pid: object pid
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:rtype: :class:`requests.models.Response`
""" |
# /objects/{pid} ? [format] [asOfDateTime]
http_args = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
url = 'objects/%(pid)s' % {'pid': pid}
return self.get(url, params=http_args) |
<SYSTEM_TASK:>
Get information about a single datastream on a Fedora object; optionally,
<END_TASK>
<USER_TASK:>
Description:
def getDatastream(self, pid, dsID, asOfDateTime=None, validateChecksum=False):
"""Get information about a single datastream on a Fedora object; optionally,
get information for the version of the datastream as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param validateChecksum: boolean; if True, request Fedora to recalculate
and verify the stored checksum against actual data
:rtype: :class:`requests.models.Response`
""" |
# /objects/{pid}/datastreams/{dsID} ? [asOfDateTime] [format] [validateChecksum]
http_args = {}
if validateChecksum:
# fedora only responds to lower-case validateChecksum option
http_args['validateChecksum'] = str(validateChecksum).lower()
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
uri = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
return self.get(uri, params=http_args) |
<SYSTEM_TASK:>
Ingest a new object into Fedora. Returns the pid of the new object on success.
<END_TASK>
<USER_TASK:>
Description:
def ingest(self, text, logMessage=None):
"""Ingest a new object into Fedora. Returns the pid of the new object on success.
Return response should have a status of 201 Created on success, and
the content of the response will be the newly created pid.
Wrapper function for `Fedora REST API ingest <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-ingest>`_
:param text: full text content of the object to be ingested
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
""" |
# NOTE: ingest method supports additional options for
# label/format/namespace/ownerId, etc - but we generally set
# those in the foxml that is passed in
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
headers = {'Content-Type': 'text/xml'}
url = 'objects/new'
# if text is unicode, it needs to be encoded so we can send the
# data as bytes; otherwise, we get ascii encode errors in httplib/ssl
if isinstance(text, six.text_type):
text = bytes(text.encode('utf-8'))
return self.post(url, data=text, params=http_args, headers=headers) |
<SYSTEM_TASK:>
Purge a datastream, or specific versions of a dastream, from
<END_TASK>
<USER_TASK:>
Description:
def purgeDatastream(self, pid, dsID, startDT=None, endDT=None, logMessage=None,
force=False):
"""Purge a datastream, or specific versions of a dastream, from
a Fedora object. On success, response content will include
a list of timestamps for the purged datastream versions; on failure,
response content may contain an error message.
:param pid: object pid
:param dsID: datastream ID
:param startDT: optional start datetime (when purging specific versions)
:param endDT: optional end datetime (when purging specific versions)
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
""" |
# /objects/{pid}/datastreams/{dsID} ? [startDT] [endDT] [logMessage] [force]
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
if startDT:
http_args['startDT'] = startDT
if endDT:
http_args['endDT'] = endDT
if force:
http_args['force'] = force
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
return self.delete(url, params=http_args) |
<SYSTEM_TASK:>
Purge an object from Fedora.
<END_TASK>
<USER_TASK:>
Description:
def purgeObject(self, pid, logMessage=None):
"""Purge an object from Fedora.
Returned response shoudl have a status of 200 on success; response
content is a timestamp.
Wrapper function for
`REST API purgeObject <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-purgeObject>`_
:param pid: pid of the object to be purged
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
""" |
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
url = 'objects/%(pid)s' % {'pid': pid}
return self.delete(url, params=http_args) |
<SYSTEM_TASK:>
Search for all subjects related to the specified predicate and object.
<END_TASK>
<USER_TASK:>
Description:
def get_subjects(self, predicate, object):
"""
Search for all subjects related to the specified predicate and object.
:param predicate:
:param object:
:rtype: generator of RDF statements
""" |
for statement in self.spo_search(predicate=predicate, object=object):
yield str(statement[0]) |
<SYSTEM_TASK:>
Search for all subjects related to the specified subject and object.
<END_TASK>
<USER_TASK:>
Description:
def get_predicates(self, subject, object):
"""
Search for all subjects related to the specified subject and object.
:param subject:
:param object:
:rtype: generator of RDF statements
""" |
for statement in self.spo_search(subject=subject, object=object):
yield str(statement[1]) |
<SYSTEM_TASK:>
Search for all subjects related to the specified subject and predicate.
<END_TASK>
<USER_TASK:>
Description:
def get_objects(self, subject, predicate):
"""
Search for all subjects related to the specified subject and predicate.
:param subject:
:param object:
:rtype: generator of RDF statements
""" |
for statement in self.spo_search(subject=subject, predicate=predicate):
yield str(statement[2]) |
<SYSTEM_TASK:>
Creates a CloudFormation Stack as specified by the template.
<END_TASK>
<USER_TASK:>
Description:
def create_stack(self, stack_name, template_body=None, template_url=None,
parameters=[], notification_arns=[], disable_rollback=False,
timeout_in_minutes=None, capabilities=None):
"""
Creates a CloudFormation Stack as specified by the template.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
Stacks
:type template_body: string
:param template_body: The template body (JSON string)
:type template_url: string
:param template_url: An S3 URL of a stored template JSON document. If
both the template_body and template_url are
specified, the template_body takes precedence
:type parameters: list of tuples
:param parameters: A list of (key, value) pairs for template input
parameters.
:type notification_arns: list of strings
:param notification_arns: A list of SNS topics to send Stack event
notifications to.
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: int
:param timeout_in_minutes: Maximum amount of time to let the Stack
spend creating itself. If this timeout is exceeded,
the Stack will enter the CREATE_FAILED state.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:rtype: string
:return: The unique Stack ID.
""" |
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
if len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
params['Parameters.member.%d.ParameterKey' % (i+1)] = key
params['Parameters.member.%d.ParameterValue' % (i+1)] = value
if capabilities:
for i, value in enumerate(capabilities):
params['Capabilities.member.%d' % (i+1)] = value
if len(notification_arns) > 0:
self.build_list_params(params, notification_arns,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
response = self.make_request('CreateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
body = json.loads(body)
return body['CreateStackResponse']['CreateStackResult']['StackId']
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body) |
<SYSTEM_TASK:>
Add a change request
<END_TASK>
<USER_TASK:>
Description:
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None):
"""Add a change request""" |
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight)
self.changes.append([action, change])
return change |
<SYSTEM_TASK:>
Overwritten to also add the NextRecordName and
<END_TASK>
<USER_TASK:>
Description:
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName and
NextRecordType to the base object""" |
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
else:
return ResultSet.endElement(self, name, value, connection) |
<SYSTEM_TASK:>
Make this an alias resource record set
<END_TASK>
<USER_TASK:>
Description:
def set_alias(self, alias_hosted_zone_id, alias_dns_name):
"""Make this an alias resource record set""" |
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name |
<SYSTEM_TASK:>
Update the state of the Table object based on the response
<END_TASK>
<USER_TASK:>
Description:
def update_from_response(self, response):
"""
Update the state of the Table object based on the response
data received from Amazon DynamoDB.
""" |
if 'Table' in response:
self._dict.update(response['Table'])
elif 'TableDescription' in response:
self._dict.update(response['TableDescription'])
if 'KeySchema' in self._dict:
self._schema = Schema(self._dict['KeySchema']) |
<SYSTEM_TASK:>
Refresh all of the fields of the Table object by calling
<END_TASK>
<USER_TASK:>
Description:
def refresh(self, wait_for_active=False, retry_seconds=5):
"""
Refresh all of the fields of the Table object by calling
the underlying DescribeTable request.
:type wait_for_active: bool
:param wait_for_active: If True, this command will not return
until the table status, as returned from Amazon DynamoDB, is
'ACTIVE'.
:type retry_seconds: int
:param retry_seconds: If wait_for_active is True, this
parameter controls the number of seconds of delay between
calls to update_table in Amazon DynamoDB. Default is 5 seconds.
""" |
done = False
while not done:
response = self.layer2.describe_table(self.name)
self.update_from_response(response)
if wait_for_active:
if self.status == 'ACTIVE':
done = True
else:
time.sleep(retry_seconds)
else:
done = True |
<SYSTEM_TASK:>
Return an new, unsaved Item which can later be PUT to
<END_TASK>
<USER_TASK:>
Description:
def new_item(self, hash_key, range_key=None, attrs=None):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
""" |
return Item(self, hash_key, range_key, attrs) |
<SYSTEM_TASK:>
Scan through this table, this is a very long
<END_TASK>
<USER_TASK:>
Description:
def scan(self, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
count=False, exclusive_start_key=None, item_class=Item):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
""" |
return self.layer2.scan(self, scan_filter, attributes_to_get,
request_limit, max_results,
exclusive_start_key, item_class=item_class) |
<SYSTEM_TASK:>
Retrieve a language by a code.
<END_TASK>
<USER_TASK:>
Description:
def get_by_code(self, code):
"""
Retrieve a language by a code.
:param code: iso code (any of the three) or its culture code
:return: a Language object
""" |
if any(x in code for x in ('_', '-')):
cc = CultureCode.objects.get(code=code.replace('_', '-'))
return cc.language
elif len(code) == 2:
return self.get(iso_639_1=code)
elif len(code) == 3:
return self.get(Q(iso_639_2T=code) |
Q(iso_639_2B=code) |
Q(iso_639_3=code))
raise ValueError(
'Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code))) |
<SYSTEM_TASK:>
Returns a list of Server instances, one for each Server object
<END_TASK>
<USER_TASK:>
Description:
def Inventory(cls):
"""
Returns a list of Server instances, one for each Server object
persisted in the db
""" |
l = ServerSet()
rs = cls.find()
for server in rs:
l.append(server)
return l |
<SYSTEM_TASK:>
Attach an EBS volume to this server
<END_TASK>
<USER_TASK:>
Description:
def attach_volume(self, volume, device="/dev/sdp"):
"""
Attach an EBS volume to this server
:param volume: EBS Volume to attach
:type volume: boto.ec2.volume.Volume
:param device: Device to attach to (default to /dev/sdp)
:type device: string
""" |
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) |
<SYSTEM_TASK:>
Saves URI to tracker file if one was passed to constructor.
<END_TASK>
<USER_TASK:>
Description:
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
""" |
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write(self.tracker_uri)
except IOError, e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close() |
<SYSTEM_TASK:>
Called when we start a new resumable upload or get a new tracker
<END_TASK>
<USER_TASK:>
Description:
def _set_tracker_uri(self, uri):
"""
Called when we start a new resumable upload or get a new tracker
URI for the upload. Saves URI and resets upload state.
Raises InvalidUriError if URI is syntactically invalid.
""" |
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc or not parse_result.query):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
qdict = cgi.parse_qs(parse_result.query)
if not qdict or not 'upload_id' in qdict:
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
self.tracker_uri_path = '%s/?%s' % (parse_result.netloc,
parse_result.query)
self.server_has_bytes = 0 |
<SYSTEM_TASK:>
Queries server to find out state of given upload.
<END_TASK>
<USER_TASK:>
Description:
def _query_server_state(self, conn, file_length):
"""
Queries server to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload server always returns the current start/end
state whenever a PUT doesn't complete.
Returns HTTP response from sending request.
Raises ResumableUploadException if problem querying server.
""" |
# Send an empty PUT so that server replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._build_content_range_header('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(conn, 'PUT',
path=self.tracker_uri_path,
auth_path=self.tracker_uri_path,
headers=put_headers,
host=self.tracker_uri_host) |
<SYSTEM_TASK:>
Queries server to find out what bytes it currently has.
<END_TASK>
<USER_TASK:>
Description:
def _query_server_pos(self, conn, file_length):
"""
Queries server to find out what bytes it currently has.
Returns (server_start, server_end), where the values are inclusive.
For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
Raises ResumableUploadException if problem querying server.
""" |
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
return (0, file_length) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the tracker URI to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# tracker URI to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from server state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search('bytes=(\d+)-(\d+)', range_spec)
if m:
server_start = long(m.group(1))
server_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the server does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the server
# has byte 0, omitting the Range header is used to indicate that
# the server doesn't have any bytes.
return self.SERVER_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload server state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
print 'Server has: Range: %d - %d.' % (server_start, server_end)
return (server_start, server_end) |
<SYSTEM_TASK:>
Makes one attempt to upload file bytes, using an existing resumable
<END_TASK>
<USER_TASK:>
Description:
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
Returns etag from server upon success.
Raises ResumableUploadException if any problems occur.
""" |
buf = fp.read(self.BUFFER_SIZE)
if cb:
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
put_headers = {}
if file_length:
range_header = self._build_content_range_header(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
headers=put_headers, host=self.tracker_uri_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
body = resp.read()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
return resp.getheader('etag') # Success
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition) |
<SYSTEM_TASK:>
Attempts a resumable upload.
<END_TASK>
<USER_TASK:>
Description:
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
num_cb):
"""
Attempts a resumable upload.
Returns etag from server upon success.
Raises ResumableUploadException if any problems occur.
""" |
(server_start, server_end) = self.SERVER_HAS_NOTHING
conn = key.bucket.connection
if self.tracker_uri:
# Try to resume existing resumable upload.
try:
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
key=key
if conn.debug >= 1:
print 'Resuming transfer.'
except ResumableUploadException, e:
if conn.debug >= 1:
print 'Unable to resume transfer (%s).' % e.message
self._start_new_resumable_upload(key, headers)
else:
self._start_new_resumable_upload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = server_end
if server_end == file_length:
# Boundary condition: complete file was already uploaded (e.g.,
# user interrupted a previous upload attempt after the upload
# completed but before the gsutil tracker file was deleted). Set
# total_bytes_uploaded to server_end so we'll attempt to upload
# no more bytes but will still make final HTTP request and get
# back the response (which contains the etag we need to compare
# at the end).
total_bytes_uploaded = server_end
else:
total_bytes_uploaded = server_end + 1
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.tracker_uri_host,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through server will terminate current upload
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
'can happen for various reasons, including specifying an '
'invalid request (e.g., an invalid canned ACL) or if the '
'file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close() |
<SYSTEM_TASK:>
Sets the S3 ACL grants for the given object to the appropriate
<END_TASK>
<USER_TASK:>
Description:
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
""" |
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read') |
<SYSTEM_TASK:>
Sets the S3 ACL grants for all objects in the Distribution
<END_TASK>
<USER_TASK:>
Description:
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
""" |
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace) |
<SYSTEM_TASK:>
Adds a new content object to the Distribution. The content
<END_TASK>
<USER_TASK:>
Description:
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
""" |
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object |
<SYSTEM_TASK:>
Creates a signed CloudFront URL that is only valid within the specified
<END_TASK>
<USER_TASK:>
Description:
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
""" |
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url |
<SYSTEM_TASK:>
Creates the required URL parameters for a signed URL.
<END_TASK>
<USER_TASK:>
Description:
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
""" |
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params |
<SYSTEM_TASK:>
Creates a custom policy string based on the supplied parameters.
<END_TASK>
<USER_TASK:>
Description:
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
""" |
condition = {}
if expires:
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":")) |
<SYSTEM_TASK:>
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
<END_TASK>
<USER_TASK:>
Description:
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
""" |
try:
from M2Crypto import EVP
except ImportError:
raise NotImplementedError("Boto depends on the python M2Crypto "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# if private_key_file is a file object read the key string from there
if isinstance(private_key_file, file):
private_key_string = private_key_file.read()
# Now load key and calculate signature
if private_key_string:
key = EVP.load_key_string(private_key_string)
else:
key = EVP.load_key(private_key_file)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature |
<SYSTEM_TASK:>
Base64 encodes a string using the URL-safe characters specified by
<END_TASK>
<USER_TASK:>
Description:
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
""" |
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64 |
<SYSTEM_TASK:>
Re-save any content models referencing the just-modified
<END_TASK>
<USER_TASK:>
Description:
def _update_content(sender, instance, created=None, **kwargs):
"""
Re-save any content models referencing the just-modified
``FileUpload``.
We don't do anything special to the content model, we just re-save
it. If signals are in use, we assume that the content model has
incorporated ``render_uploads`` into some kind of rendering that
happens automatically at save-time.
""" |
if created: # a brand new FileUpload won't be referenced
return
for ref in FileUploadReference.objects.filter(upload=instance):
try:
obj = ref.content_object
if obj:
obj.save()
except AttributeError:
pass |
<SYSTEM_TASK:>
Returns a dictionary containing the value of of all of the keyword
<END_TASK>
<USER_TASK:>
Description:
def get_params(self):
"""
Returns a dictionary containing the value of of all of the keyword
arguments passed when constructing this connection.
""" |
param_names = ['aws_access_key_id', 'aws_secret_access_key',
'is_secure', 'port', 'proxy', 'proxy_port',
'proxy_user', 'proxy_pass',
'debug', 'https_connection_factory']
params = {}
for name in param_names:
params[name] = getattr(self, name)
return params |
<SYSTEM_TASK:>
Retrieve all the EC2 images available on your account.
<END_TASK>
<USER_TASK:>
Description:
def get_all_images(self, image_ids=None, owners=None,
executable_by=None, filters=None):
"""
Retrieve all the EC2 images available on your account.
:type image_ids: list
:param image_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:type executable_by: list
:param executable_by: Returns AMIs for which the specified
user ID has explicit launch permissions
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
""" |
params = {}
if image_ids:
self.build_list_params(params, image_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
if executable_by:
self.build_list_params(params, executable_by, 'ExecutableBy')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST') |
<SYSTEM_TASK:>
Retrieve all the EC2 kernels available on your account.
<END_TASK>
<USER_TASK:>
Description:
def get_all_kernels(self, kernel_ids=None, owners=None):
"""
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
""" |
params = {}
if kernel_ids:
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST') |
<SYSTEM_TASK:>
Retrieve all the EC2 ramdisks available on your account.
<END_TASK>
<USER_TASK:>
Description:
def get_all_ramdisks(self, ramdisk_ids=None, owners=None):
"""
Retrieve all the EC2 ramdisks available on your account.
Constructs a filter to allow the processing to happen server side.
:type ramdisk_ids: list
:param ramdisk_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
""" |
params = {}
if ramdisk_ids:
self.build_list_params(params, ramdisk_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'ramdisk'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST') |
<SYSTEM_TASK:>
Register an image.
<END_TASK>
<USER_TASK:>
Description:
def register_image(self, name=None, description=None, image_location=None,
architecture=None, kernel_id=None, ramdisk_id=None,
root_device_name=None, block_device_map=None):
"""
Register an image.
:type name: string
:param name: The name of the AMI. Valid only for EBS-based images.
:type description: string
:param description: The description of the AMI.
:type image_location: string
:param image_location: Full path to your AMI manifest in
Amazon S3 storage.
Only used for S3-based AMI's.
:type architecture: string
:param architecture: The architecture of the AMI. Valid choices are:
i386 | x86_64
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch
the instances
:type root_device_name: string
:param root_device_name: The root device name (e.g. /dev/sdh)
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:rtype: string
:return: The new image id
""" |
params = {}
if name:
params['Name'] = name
if description:
params['Description'] = description
if architecture:
params['Architecture'] = architecture
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if image_location:
params['ImageLocation'] = image_location
if root_device_name:
params['RootDeviceName'] = root_device_name
if block_device_map:
block_device_map.build_list_params(params)
rs = self.get_object('RegisterImage', params, ResultSet, verb='POST')
image_id = getattr(rs, 'imageId', None)
return image_id |
<SYSTEM_TASK:>
Unregister an AMI.
<END_TASK>
<USER_TASK:>
Description:
def deregister_image(self, image_id, delete_snapshot=False):
"""
Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
snapshot associated with an EBS volume
mounted at /dev/sda1
:rtype: bool
:return: True if successful
""" |
snapshot_id = None
if delete_snapshot:
image = self.get_image(image_id)
for key in image.block_device_mapping:
if key == "/dev/sda1":
snapshot_id = image.block_device_mapping[key].snapshot_id
break
result = self.get_status('DeregisterImage',
{'ImageId':image_id}, verb='POST')
if result and snapshot_id:
return result and self.delete_snapshot(snapshot_id)
return result |
<SYSTEM_TASK:>
Will create an AMI from the instance in the running or stopped
<END_TASK>
<USER_TASK:>
Description:
def create_image(self, instance_id, name,
description=None, no_reboot=False):
"""
Will create an AMI from the instance in the running or stopped
state.
:type instance_id: string
:param instance_id: the ID of the instance to image.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the bundling process
should not attempt to shutdown the instance before
bundling. If this flag is True, the responsibility
of maintaining file system integrity is left to the
owner of the instance.
:rtype: string
:return: The new image id
""" |
params = {'InstanceId' : instance_id,
'Name' : name}
if description:
params['Description'] = description
if no_reboot:
params['NoReboot'] = 'true'
img = self.get_object('CreateImage', params, Image, verb='POST')
return img.id |
<SYSTEM_TASK:>
Gets an attribute from an image.
<END_TASK>
<USER_TASK:>
Description:
def get_image_attribute(self, image_id, attribute='launchPermission'):
"""
Gets an attribute from an image.
:type image_id: string
:param image_id: The Amazon image id for which you want info about
:type attribute: string
:param attribute: The attribute you need information about.
Valid choices are:
* launchPermission
* productCodes
* blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the
attribute requested
""" |
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_object('DescribeImageAttribute', params,
ImageAttribute, verb='POST') |
<SYSTEM_TASK:>
Resets an attribute of an AMI to its default value.
<END_TASK>
<USER_TASK:>
Description:
def reset_image_attribute(self, image_id, attribute='launchPermission'):
"""
Resets an attribute of an AMI to its default value.
:type image_id: string
:param image_id: ID of the AMI for which an attribute will be described
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
""" |
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_status('ResetImageAttribute', params, verb='POST') |
<SYSTEM_TASK:>
Retrieve all the instances associated with your account.
<END_TASK>
<USER_TASK:>
Description:
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
if 'group-id' in filters:
gid = filters.get('group-id')
if not gid.startswith('sg-') or len(gid) != 11:
warnings.warn(
"The group-id filter now requires a security group "
"identifier (sg-*) instead of a group name. To filter "
"by group name use the 'group-name' filter instead.",
UserWarning)
self.build_filter_params(params, filters)
return self.get_list('DescribeInstances', params,
[('item', Reservation)], verb='POST') |
<SYSTEM_TASK:>
Retrieve all the instances in your account scheduled for maintenance.
<END_TASK>
<USER_TASK:>
Description:
def get_all_instance_status(self, instance_ids=None,
max_results=None, next_token=None,
filters=None):
"""
Retrieve all the instances in your account scheduled for maintenance.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type max_results: int
:param max_results: The maximum number of paginated instance
items per response.
:type next_token: str
:param next_token: A string specifying the next paginated set
of results to return.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of instances that have maintenance scheduled.
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if max_results:
params['MaxResults'] = max_results
if next_token:
params['NextToken'] = next_token
if filters:
self.build_filter_params(params, filters)
return self.get_object('DescribeInstanceStatus', params,
InstanceStatusSet, verb='POST') |
<SYSTEM_TASK:>
Runs an image on EC2.
<END_TASK>
<USER_TASK:>
Description:
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, client_token=None,
security_group_ids=None):
"""
Runs an image on EC2.
:type image_id: string
:param image_id: The ID of the image to run
:type min_count: int
:param min_count: The minimum number of instances to launch
:type max_count: int
:param max_count: The maximum number of instances to launch
:type key_name: string
:param key_name: The name of the key pair with which to launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run:
* m1.small
* m1.large
* m1.xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1.micro
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can optionally use
this parameter to assign the instance a
specific available IP address from the
subnet (e.g., 10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated
via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or
terminates on
instance-initiated
shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type client_token: string
:param client_token: Unique, case-sensitive identifier you provide
to ensure idempotency of the request.
Maximum 64 ASCII characters
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
:type security_group_ids: list of strings
:param security_group_ids: The ID of the VPC security groups with
which to associate instances
""" |
params = {'ImageId':image_id,
'MinCount':min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
if security_group_ids:
l = []
for group in security_group_ids:
if isinstance(group, SecurityGroup):
l.append(group.id)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroupId')
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroup')
if user_data:
params['UserData'] = base64.b64encode(user_data)
if addressing_type:
params['AddressingType'] = addressing_type
if instance_type:
params['InstanceType'] = instance_type
if placement:
params['Placement.AvailabilityZone'] = placement
if placement_group:
params['Placement.GroupName'] = placement_group
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['Monitoring.Enabled'] = 'true'
if subnet_id:
params['SubnetId'] = subnet_id
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
if block_device_map:
block_device_map.build_list_params(params)
if disable_api_termination:
params['DisableApiTermination'] = 'true'
if instance_initiated_shutdown_behavior:
val = instance_initiated_shutdown_behavior
params['InstanceInitiatedShutdownBehavior'] = val
if client_token:
params['ClientToken'] = client_token
return self.get_object('RunInstances', params, Reservation, verb='POST') |
<SYSTEM_TASK:>
Terminate the instances specified
<END_TASK>
<USER_TASK:>
Description:
def terminate_instances(self, instance_ids=None):
"""
Terminate the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to terminate
:rtype: list
:return: A list of the instances terminated
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('TerminateInstances', params,
[('item', Instance)], verb='POST') |
<SYSTEM_TASK:>
Stop the instances specified
<END_TASK>
<USER_TASK:>
Description:
def stop_instances(self, instance_ids=None, force=False):
"""
Stop the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to stop
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
""" |
params = {}
if force:
params['Force'] = 'true'
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StopInstances', params,
[('item', Instance)], verb='POST') |
<SYSTEM_TASK:>
Start the instances specified
<END_TASK>
<USER_TASK:>
Description:
def start_instances(self, instance_ids=None):
"""
Start the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to start
:rtype: list
:return: A list of the instances started
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StartInstances', params,
[('item', Instance)], verb='POST') |
<SYSTEM_TASK:>
Retrieves the console output for the specified instance.
<END_TASK>
<USER_TASK:>
Description:
def get_console_output(self, instance_id):
"""
Retrieves the console output for the specified instance.
:type instance_id: string
:param instance_id: The instance ID of a running instance on the cloud.
:rtype: :class:`boto.ec2.instance.ConsoleOutput`
:return: The console output as a ConsoleOutput object
""" |
params = {}
self.build_list_params(params, [instance_id], 'InstanceId')
return self.get_object('GetConsoleOutput', params,
ConsoleOutput, verb='POST') |
<SYSTEM_TASK:>
Reboot the specified instances.
<END_TASK>
<USER_TASK:>
Description:
def reboot_instances(self, instance_ids=None):
"""
Reboot the specified instances.
:type instance_ids: list
:param instance_ids: The instances to terminate and reboot
""" |
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_status('RebootInstances', params) |
<SYSTEM_TASK:>
Gets an attribute from an instance.
<END_TASK>
<USER_TASK:>
Description:
def get_instance_attribute(self, instance_id, attribute):
"""
Gets an attribute from an instance.
:type instance_id: string
:param instance_id: The Amazon id of the instance
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
* instanceType|kernel|ramdisk|userData|
* disableApiTermination|
* instanceInitiatedShutdownBehavior|
* rootDeviceName|blockDeviceMapping
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
""" |
params = {'InstanceId' : instance_id}
if attribute:
params['Attribute'] = attribute
return self.get_object('DescribeInstanceAttribute', params,
InstanceAttribute, verb='POST') |
<SYSTEM_TASK:>
Changes an attribute of an instance
<END_TASK>
<USER_TASK:>
Description:
def modify_instance_attribute(self, instance_id, attribute, value):
"""
Changes an attribute of an instance
:type instance_id: string
:param instance_id: The instance id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change.
* AttributeName - Expected value (default)
* instanceType - A valid instance type (m1.small)
* kernel - Kernel ID (None)
* ramdisk - Ramdisk ID (None)
* userData - Base64 encoded String (None)
* disableApiTermination - Boolean (true)
* instanceInitiatedShutdownBehavior - stop|terminate
* rootDeviceName - device name (None)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
""" |
# Allow a bool to be passed in for value of disableApiTermination
if attribute == 'disableApiTermination':
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'InstanceId' : instance_id,
'Attribute' : attribute,
'Value' : value}
return self.get_status('ModifyInstanceAttribute', params, verb='POST') |
<SYSTEM_TASK:>
Resets an attribute of an instance to its default value.
<END_TASK>
<USER_TASK:>
Description:
def reset_instance_attribute(self, instance_id, attribute):
"""
Resets an attribute of an instance to its default value.
:type instance_id: string
:param instance_id: ID of the instance
:type attribute: string
:param attribute: The attribute to reset. Valid values are:
kernel|ramdisk
:rtype: bool
:return: Whether the operation succeeded or not
""" |
params = {'InstanceId' : instance_id,
'Attribute' : attribute}
return self.get_status('ResetInstanceAttribute', params, verb='POST') |
<SYSTEM_TASK:>
Retrieve all the spot instances requests associated with your account.
<END_TASK>
<USER_TASK:>
Description:
def get_all_spot_instance_requests(self, request_ids=None,
filters=None):
"""
Retrieve all the spot instances requests associated with your account.
:type request_ids: list
:param request_ids: A list of strings of spot instance request IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of
:class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
""" |
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
if filters:
if 'launch.group-id' in filters:
lgid = filters.get('launch.group-id')
if not lgid.startswith('sg-') or len(lgid) != 11:
warnings.warn(
"The 'launch.group-id' filter now requires a security "
"group id (sg-*) and no longer supports filtering by "
"group name. Please update your filters accordingly.",
UserWarning)
self.build_filter_params(params, filters)
return self.get_list('DescribeSpotInstanceRequests', params,
[('item', SpotInstanceRequest)], verb='POST') |
<SYSTEM_TASK:>
Retrieve the recent history of spot instances pricing.
<END_TASK>
<USER_TASK:>
Description:
def get_spot_price_history(self, start_time=None, end_time=None,
instance_type=None, product_description=None,
availability_zone=None):
"""
Retrieve the recent history of spot instances pricing.
:type start_time: str
:param start_time: An indication of how far back to provide price
changes for. An ISO8601 DateTime string.
:type end_time: str
:param end_time: An indication of how far forward to provide price
changes for. An ISO8601 DateTime string.
:type instance_type: str
:param instance_type: Filter responses to a particular instance type.
:type product_description: str
:param product_description: Filter responses to a particular platform.
Valid values are currently: "Linux/UNIX",
"SUSE Linux", and "Windows"
:type availability_zone: str
:param availability_zone: The availability zone for which prices
should be returned
:rtype: list
:return: A list tuples containing price and timestamp.
""" |
params = {}
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if instance_type:
params['InstanceType'] = instance_type
if product_description:
params['ProductDescription'] = product_description
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_list('DescribeSpotPriceHistory', params,
[('item', SpotPriceHistory)], verb='POST') |
<SYSTEM_TASK:>
Request instances on the spot market at a particular price.
<END_TASK>
<USER_TASK:>
Description:
def request_spot_instances(self, price, image_id, count=1, type='one-time',
valid_from=None, valid_until=None,
launch_group=None, availability_zone_group=None,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None):
"""
Request instances on the spot market at a particular price.
:type price: str
:param price: The maximum price of your bid
:type image_id: string
:param image_id: The ID of the image to run
:type count: int
:param count: The of instances to requested
:type type: str
:param type: Type of request. Can be 'one-time' or 'persistent'.
Default is one-time.
:type valid_from: str
:param valid_from: Start date of the request. An ISO8601 time string.
:type valid_until: str
:param valid_until: End date of the request. An ISO8601 time string.
:type launch_group: str
:param launch_group: If supplied, all requests will be fulfilled
as a group.
:type availability_zone_group: str
:param availability_zone_group: If supplied, all requests will be
fulfilled within a single
availability zone.
:type key_name: string
:param key_name: The name of the key pair with which to launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run:
* m1.small
* m1.large
* m1.xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1.micro
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:rtype: Reservation
:return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
associated with the request for machines
""" |
params = {'LaunchSpecification.ImageId':image_id,
'Type' : type,
'SpotPrice' : price}
if count:
params['InstanceCount'] = count
if valid_from:
params['ValidFrom'] = valid_from
if valid_until:
params['ValidUntil'] = valid_until
if launch_group:
params['LaunchGroup'] = launch_group
if availability_zone_group:
params['AvailabilityZoneGroup'] = availability_zone_group
if key_name:
params['LaunchSpecification.KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l,
'LaunchSpecification.SecurityGroup')
if user_data:
params['LaunchSpecification.UserData'] = base64.b64encode(user_data)
if addressing_type:
params['LaunchSpecification.AddressingType'] = addressing_type
if instance_type:
params['LaunchSpecification.InstanceType'] = instance_type
if placement:
params['LaunchSpecification.Placement.AvailabilityZone'] = placement
if kernel_id:
params['LaunchSpecification.KernelId'] = kernel_id
if ramdisk_id:
params['LaunchSpecification.RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['LaunchSpecification.Monitoring.Enabled'] = 'true'
if subnet_id:
params['LaunchSpecification.SubnetId'] = subnet_id
if block_device_map:
block_device_map.build_list_params(params, 'LaunchSpecification.')
return self.get_list('RequestSpotInstances', params,
[('item', SpotInstanceRequest)],
verb='POST') |
<SYSTEM_TASK:>
Cancel the specified Spot Instance Requests.
<END_TASK>
<USER_TASK:>
Description:
def cancel_spot_instance_requests(self, request_ids):
"""
Cancel the specified Spot Instance Requests.
:type request_ids: list
:param request_ids: A list of strings of the Request IDs to terminate
:rtype: list
:return: A list of the instances terminated
""" |
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
return self.get_list('CancelSpotInstanceRequests', params,
[('item', Instance)], verb='POST') |
<SYSTEM_TASK:>
Create a spot instance datafeed subscription for this account.
<END_TASK>
<USER_TASK:>
Description:
def create_spot_datafeed_subscription(self, bucket, prefix):
"""
Create a spot instance datafeed subscription for this account.
:type bucket: str or unicode
:param bucket: The name of the bucket where spot instance data
will be written. The account issuing this request
must have FULL_CONTROL access to the bucket
specified in the request.
:type prefix: str or unicode
:param prefix: An optional prefix that will be pre-pended to all
data files written to the bucket.
:rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
:return: The datafeed subscription object or None
""" |
params = {'Bucket' : bucket}
if prefix:
params['Prefix'] = prefix
return self.get_object('CreateSpotDatafeedSubscription',
params, SpotDatafeedSubscription, verb='POST') |
<SYSTEM_TASK:>
Get all Availability Zones associated with the current region.
<END_TASK>
<USER_TASK:>
Description:
def get_all_zones(self, zones=None, filters=None):
"""
Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects
""" |
params = {}
if zones:
self.build_list_params(params, zones, 'ZoneName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeAvailabilityZones', params,
[('item', Zone)], verb='POST') |
<SYSTEM_TASK:>
Associate an Elastic IP address with a currently running instance.
<END_TASK>
<USER_TASK:>
Description:
def associate_address(self, instance_id, public_ip=None, allocation_id=None):
"""
Associate an Elastic IP address with a currently running instance.
This requires one of ``public_ip`` or ``allocation_id`` depending
on if you're associating a VPC address or a plain EC2 address.
:type instance_id: string
:param instance_id: The ID of the instance
:type public_ip: string
:param public_ip: The public IP address for EC2 based allocations.
:type allocation_id: string
:param allocation_id: The allocation ID for a VPC-based elastic IP.
:rtype: bool
:return: True if successful
""" |
params = { 'InstanceId' : instance_id }
if public_ip is not None:
params['PublicIp'] = public_ip
elif allocation_id is not None:
params['AllocationId'] = allocation_id
return self.get_status('AssociateAddress', params, verb='POST') |
<SYSTEM_TASK:>
Disassociate an Elastic IP address from a currently running instance.
<END_TASK>
<USER_TASK:>
Description:
def disassociate_address(self, public_ip=None, association_id=None):
"""
Disassociate an Elastic IP address from a currently running instance.
:type public_ip: string
:param public_ip: The public IP address for EC2 elastic IPs.
:type association_id: string
:param association_id: The association ID for a VPC based elastic ip.
:rtype: bool
:return: True if successful
""" |
params = {}
if public_ip is not None:
params['PublicIp'] = public_ip
elif association_id is not None:
params['AssociationId'] = association_id
return self.get_status('DisassociateAddress', params, verb='POST') |
<SYSTEM_TASK:>
Free up an Elastic IP address.
<END_TASK>
<USER_TASK:>
Description:
def release_address(self, public_ip=None, allocation_id=None):
"""
Free up an Elastic IP address.
:type public_ip: string
:param public_ip: The public IP address for EC2 elastic IPs.
:type allocation_id: string
:param allocation_id: The ID for VPC elastic IPs.
:rtype: bool
:return: True if successful
""" |
params = {}
if public_ip is not None:
params['PublicIp'] = public_ip
elif allocation_id is not None:
params['AllocationId'] = allocation_id
return self.get_status('ReleaseAddress', params, verb='POST') |
<SYSTEM_TASK:>
Get all Volumes associated with the current credentials.
<END_TASK>
<USER_TASK:>
Description:
def get_all_volumes(self, volume_ids=None, filters=None):
"""
Get all Volumes associated with the current credentials.
:type volume_ids: list
:param volume_ids: Optional list of volume ids. If this list
is present, only the volumes associated with
these volume ids will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.volume.Volume`
:return: The requested Volume objects
""" |
params = {}
if volume_ids:
self.build_list_params(params, volume_ids, 'VolumeId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeVolumes', params,
[('item', Volume)], verb='POST') |
<SYSTEM_TASK:>
Create a new EBS Volume.
<END_TASK>
<USER_TASK:>
Description:
def create_volume(self, size, zone, snapshot=None):
"""
Create a new EBS Volume.
:type size: int
:param size: The size of the new volume, in GiB
:type zone: string or :class:`boto.ec2.zone.Zone`
:param zone: The availability zone in which the Volume will be created.
:type snapshot: string or :class:`boto.ec2.snapshot.Snapshot`
:param snapshot: The snapshot from which the new Volume will be created.
""" |
if isinstance(zone, Zone):
zone = zone.name
params = {'AvailabilityZone' : zone}
if size:
params['Size'] = size
if snapshot:
if isinstance(snapshot, Snapshot):
snapshot = snapshot.id
params['SnapshotId'] = snapshot
return self.get_object('CreateVolume', params, Volume, verb='POST') |
<SYSTEM_TASK:>
Attach an EBS volume to an EC2 instance.
<END_TASK>
<USER_TASK:>
Description:
def attach_volume(self, volume_id, instance_id, device):
"""
Attach an EBS volume to an EC2 instance.
:type volume_id: str
:param volume_id: The ID of the EBS volume to be attached.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposted (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
""" |
params = {'InstanceId' : instance_id,
'VolumeId' : volume_id,
'Device' : device}
return self.get_status('AttachVolume', params, verb='POST') |
<SYSTEM_TASK:>
Get all EBS Snapshots associated with the current credentials.
<END_TASK>
<USER_TASK:>
Description:
def get_all_snapshots(self, snapshot_ids=None,
owner=None, restorable_by=None,
filters=None):
"""
Get all EBS Snapshots associated with the current credentials.
:type snapshot_ids: list
:param snapshot_ids: Optional list of snapshot ids. If this list is
present, only the Snapshots associated with
these snapshot ids will be returned.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.snapshot.Snapshot`
:return: The requested Snapshot objects
""" |
params = {}
if snapshot_ids:
self.build_list_params(params, snapshot_ids, 'SnapshotId')
if owner:
params['Owner'] = owner
if restorable_by:
params['RestorableBy'] = restorable_by
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeSnapshots', params,
[('item', Snapshot)], verb='POST') |
<SYSTEM_TASK:>
Create a snapshot of an existing EBS Volume.
<END_TASK>
<USER_TASK:>
Description:
def create_snapshot(self, volume_id, description=None):
"""
Create a snapshot of an existing EBS Volume.
:type volume_id: str
:param volume_id: The ID of the volume to be snapshot'ed
:type description: str
:param description: A description of the snapshot.
Limited to 255 characters.
:rtype: bool
:return: True if successful
""" |
params = {'VolumeId' : volume_id}
if description:
params['Description'] = description[0:255]
snapshot = self.get_object('CreateSnapshot', params,
Snapshot, verb='POST')
volume = self.get_all_volumes([volume_id])[0]
volume_name = volume.tags.get('Name')
if volume_name:
snapshot.add_tag('Name', volume_name)
return snapshot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.