text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Returns a Python data structure with information about all <END_TASK> <USER_TASK:> Description: def get_all_hosted_zones(self, start_marker=None, zone_list=None): """ Returns a Python data structure with information about all Hosted Zones defined for the AWS account. :param int start_marker: start marker to pass when fetching additional results after a truncated list :param list zone_list: a HostedZones list to prepend to results """
params = {} if start_marker: params = {'marker': start_marker} response = self.make_request('GET', '/%s/hostedzone' % self.Version, params=params) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element(list_marker='HostedZones', item_marker=('HostedZone',)) h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) if zone_list: e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) while e['ListHostedZonesResponse'].has_key('NextMarker'): next_marker = e['ListHostedZonesResponse']['NextMarker'] zone_list = e['ListHostedZonesResponse']['HostedZones'] e = self.get_all_hosted_zones(next_marker, zone_list) return e
<SYSTEM_TASK:> Get information about a proposed set of changes, as submitted <END_TASK> <USER_TASK:> Description: def get_change(self, change_id): """ Get information about a proposed set of changes, as submitted by the change_rrsets method. Returns a Python data structure with status information about the changes. :type change_id: str :param change_id: The unique identifier for the set of changes. This ID is returned in the response to the change_rrsets method. """
uri = '/%s/change/%s' % (self.Version, change_id) response = self.make_request('GET', uri) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e
<SYSTEM_TASK:> Given an instruction and meta information this attempts to find <END_TASK> <USER_TASK:> Description: def find_best_instruction(addr, cpu_name, meta=None): """Given an instruction and meta information this attempts to find the best instruction for the frame. In some circumstances we can fix it up a bit to improve the accuracy. For more information see `symbolize_frame`. """
addr = rv = parse_addr(addr) # In case we're not on the crashing frame we apply a simple heuristic: # since we're most likely dealing with return addresses we just assume # that the call is one instruction behind the current one. if not meta or meta.get('frame_number') != 0: rv = get_previous_instruction(addr, cpu_name) # In case registers are available we can check if the PC register # does not match the given address we have from the first frame. # If that is the case and we got one of a few signals taht are likely # it seems that going with one instruction back is actually the # correct thing to do. else: regs = meta.get('registers') ip = get_ip_register(regs, cpu_name) if ip is not None and ip != addr and \ meta.get('signal') in (SIGILL, SIGBUS, SIGSEGV): rv = get_previous_instruction(addr, cpu_name) # Don't ask me why we do this, but apparently on arm we get better # hits if we look at the end of an instruction in the DWARF file than # the beginning. return round_to_instruction_end(rv, cpu_name)
<SYSTEM_TASK:> List key objects within a bucket. This returns an instance of an <END_TASK> <USER_TASK:> Description: def list(self, prefix='', delimiter='', marker='', headers=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. The Key objects returned by the iterator are obtained by parsing the results of a GET on the bucket, also known as the List Objects request. The XML returned by this request contains only a subset of the information about each key. Certain metadata fields such as Content-Type and user metadata are not available in the XML. Therefore, if you want these additional metadata fields you will have to do a HEAD request on the Key in the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """
return BucketListResultSet(self, prefix, delimiter, marker, headers)
<SYSTEM_TASK:> Create a new key in the bucket by copying another existing key. <END_TASK> <USER_TASK:> Description: def copy_key(self, new_key_name, src_bucket_name, src_key_name, metadata=None, src_version_id=None, storage_class='STANDARD', preserve_acl=False, encrypt_key=False, headers=None, query_args=None): """ Create a new key in the bucket by copying another existing key. :type new_key_name: string :param new_key_name: The name of the new key :type src_bucket_name: string :param src_bucket_name: The name of the source bucket :type src_key_name: string :param src_key_name: The name of the source key :type src_version_id: string :param src_version_id: The version id for the key. This param is optional. If not specified, the newest version of the key will be copied. :type metadata: dict :param metadata: Metadata to be associated with new key. If metadata is supplied, it will replace the metadata of the source key being copied. If no metadata is supplied, the source key's metadata will be copied to the new key. :type storage_class: string :param storage_class: The storage class of the new key. By default, the new key will use the standard storage class. Possible values are: STANDARD | REDUCED_REDUNDANCY :type preserve_acl: bool :param preserve_acl: If True, the ACL from the source key will be copied to the destination key. If False, the destination key will have the default ACL. Note that preserving the ACL in the new key object will require two additional API calls to S3, one to retrieve the current ACL and one to set that ACL on the new object. If you don't care about the ACL, a value of False will be significantly more efficient. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type headers: dict :param headers: A dictionary of header name/value pairs. :type query_args: string :param query_args: A string of additional querystring arguments to append to the request :rtype: :class:`boto.s3.key.Key` or subclass :returns: An instance of the newly created key object """
headers = headers or {} provider = self.connection.provider src_key_name = boto.utils.get_utf8_value(src_key_name) if preserve_acl: if self.name == src_bucket_name: src_bucket = self else: src_bucket = self.connection.get_bucket(src_bucket_name) acl = src_bucket.get_xml_acl(src_key_name) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) if src_version_id: src += '?versionId=%s' % src_version_id headers[provider.copy_source_header] = str(src) # make sure storage_class_header key exists before accessing it if provider.storage_class_header and storage_class: headers[provider.storage_class_header] = storage_class if metadata: headers[provider.metadata_directive_header] = 'REPLACE' headers = boto.utils.merge_meta(headers, metadata, provider) elif not query_args: # Can't use this header with multi-part copy. headers[provider.metadata_directive_header] = 'COPY' response = self.connection.make_request('PUT', self.name, new_key_name, headers=headers, query_args=query_args) body = response.read() if response.status == 200: key = self.new_key(new_key_name) h = handler.XmlHandler(key, self) xml.sax.parseString(body, h) if hasattr(key, 'Error'): raise provider.storage_copy_error(key.Code, key.Message, body) key.handle_version_headers(response) if preserve_acl: self.set_xml_acl(acl, new_key_name) return key else: raise provider.storage_response_error(response.status, response.reason, body)
<SYSTEM_TASK:> Set a subresource for a bucket or key. <END_TASK> <USER_TASK:> Description: def set_subresource(self, subresource, value, key_name = '', headers=None, version_id=None): """ Set a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to set. :type value: string :param value: The value of the subresource. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. """
if not subresource: raise TypeError('set_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, data=value.encode('UTF-8'), query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Get a subresource for a bucket or key. <END_TASK> <USER_TASK:> Description: def get_subresource(self, subresource, key_name='', headers=None, version_id=None): """ Get a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to get. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. :rtype: string :returns: The value of the subresource. """
if not subresource: raise TypeError('get_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body
<SYSTEM_TASK:> Returns the LocationConstraint for the bucket. <END_TASK> <USER_TASK:> Description: def get_location(self): """ Returns the LocationConstraint for the bucket. :rtype: str :return: The LocationConstraint for the bucket or the empty string if no constraint was specified when bucket was created. """
response = self.connection.make_request('GET', self.name, query_args='location') body = response.read() if response.status == 200: rs = ResultSet(self) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs.LocationConstraint else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Returns the current status of versioning on the bucket. <END_TASK> <USER_TASK:> Description: def get_versioning_status(self, headers=None): """ Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended. """
response = self.connection.make_request('GET', self.name, query_args='versioning', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: d = {} ver = re.search(self.VersionRE, body) if ver: d['Versioning'] = ver.group(1) mfa = re.search(self.MFADeleteRE, body) if mfa: d['MfaDelete'] = mfa.group(1) return d else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Returns the current lifecycle configuration on the bucket. <END_TASK> <USER_TASK:> Description: def get_lifecycle_config(self, headers=None): """ Returns the current lifecycle configuration on the bucket. :rtype: :class:`boto.s3.lifecycle.Lifecycle` :returns: A LifecycleConfig object that describes all current lifecycle rules in effect for the bucket. """
response = self.connection.make_request('GET', self.name, query_args='lifecycle', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: lifecycle = Lifecycle(self) h = handler.XmlHandler(lifecycle, self) xml.sax.parseString(body, h) return lifecycle else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Configure this bucket to act as a website <END_TASK> <USER_TASK:> Description: def configure_website(self, suffix, error_key='', headers=None): """ Configure this bucket to act as a website :type suffix: str :param suffix: Suffix that is appended to a request that is for a "directory" on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not be empty and must not include a slash character. :type error_key: str :param error_key: The object key name to use when a 4XX class error occurs. This is optional. """
if error_key: error_frag = self.WebsiteErrorFragment % error_key else: error_frag = '' body = self.WebsiteBody % (suffix, error_frag) response = self.connection.make_request('PUT', self.name, data=body, query_args='website', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Returns the current status of website configuration on the bucket. <END_TASK> <USER_TASK:> Description: def get_website_configuration(self, headers=None): """ Returns the current status of website configuration on the bucket. :rtype: dict :returns: A dictionary containing a Python representation of the XML response from S3. The overall structure is: * WebsiteConfiguration * IndexDocument * Suffix : suffix that is appended to request that is for a "directory" on the website endpoint * ErrorDocument * Key : name of object to serve when an error occurs """
response = self.connection.make_request('GET', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Removes all website configuration from the bucket. <END_TASK> <USER_TASK:> Description: def delete_website_configuration(self, headers=None): """ Removes all website configuration from the bucket. """
response = self.connection.make_request('DELETE', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Returns the fully qualified hostname to use is you want to access this <END_TASK> <USER_TASK:> Description: def get_website_endpoint(self): """ Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not. """
l = [self.name] l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) l.append('.'.join(self.connection.host.split('.')[-2:])) return '.'.join(l)
<SYSTEM_TASK:> Add or replace the JSON policy associated with the bucket. <END_TASK> <USER_TASK:> Description: def set_policy(self, policy, headers=None): """ Add or replace the JSON policy associated with the bucket. :type policy: str :param policy: The JSON policy as a string. """
response = self.connection.make_request('PUT', self.name, data=policy, query_args='policy', headers=headers) body = response.read() if response.status >= 200 and response.status <= 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Start a multipart upload operation. <END_TASK> <USER_TASK:> Description: def initiate_multipart_upload(self, key_name, headers=None, reduced_redundancy=False, metadata=None, encrypt_key=False): """ Start a multipart upload operation. :type key_name: string :param key_name: The name of the key that will ultimately result from this multipart upload operation. This will be exactly as the key appears in the bucket after the upload process has been completed. :type headers: dict :param headers: Additional HTTP headers to send and store with the resulting key in S3. :type reduced_redundancy: boolean :param reduced_redundancy: In multipart uploads, the storage class is specified when initiating the upload, not when uploading individual parts. So if you want the resulting key to use the reduced redundancy storage class set this flag when you initiate the upload. :type metadata: dict :param metadata: Any metadata that you would like to set on the key that results from the multipart upload. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. """
query_args = 'uploads' provider = self.connection.provider if headers is None: headers = {} if reduced_redundancy: storage_class_header = provider.storage_class_header if storage_class_header: headers[storage_class_header] = 'REDUCED_REDUNDANCY' # TODO: what if the provider doesn't support reduced redundancy? # (see boto.s3.key.Key.set_contents_from_file) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' if metadata is None: metadata = {} headers = boto.utils.merge_meta(headers, metadata, self.connection.provider) response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: resp = MultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Complete a multipart upload operation. <END_TASK> <USER_TASK:> Description: def complete_multipart_upload(self, key_name, upload_id, xml_body, headers=None): """ Complete a multipart upload operation. """
query_args = 'uploadId=%s' % upload_id if headers is None: headers = {} headers['Content-Type'] = 'text/xml' response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers, data=xml_body) contains_error = False body = response.read() # Some errors will be reported in the body of the response # even though the HTTP response code is 200. This check # does a quick and dirty peek in the body for an error element. if body.find('<Error>') > 0: contains_error = True boto.log.debug(body) if response.status == 200 and not contains_error: resp = CompleteMultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Gets a connection from the pool for the named host. Returns <END_TASK> <USER_TASK:> Description: def get_http_connection(self, host, is_secure): """ Gets a connection from the pool for the named host. Returns None if there is no connection that can be reused. """
if is_secure: return AsyncHTTPSConnection(host, http_client=self._httpclient) else: return AsyncHTTPConnection(host, http_client=self._httpclient)
<SYSTEM_TASK:> Verifies the authenticity of a notification message. <END_TASK> <USER_TASK:> Description: def verify(self, secret_key): """ Verifies the authenticity of a notification message. TODO: This is doing a form of authentication and this functionality should really be merged with the pluggable authentication mechanism at some point. """
verification_input = NotificationMessage.SERVICE_NAME verification_input += NotificationMessage.OPERATION_NAME verification_input += self.timestamp h = hmac.new(key=secret_key, digestmod=sha) h.update(verification_input) signature_calc = base64.b64encode(h.digest()) return self.signature == signature_calc
<SYSTEM_TASK:> Update the image's state information by making a call to fetch <END_TASK> <USER_TASK:> Description: def update(self, validate=False): """ Update the image's state information by making a call to fetch the current image attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the image the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """
rs = self.connection.get_all_images([self.id]) if len(rs) > 0: img = rs[0] if img.id == self.id: self._update(img) elif validate: raise ValueError('%s is not a valid Image ID' % self.id) return self.state
<SYSTEM_TASK:> Returns a string containing the XML version of the Lifecycle <END_TASK> <USER_TASK:> Description: def to_xml(self): """ Returns a string containing the XML version of the Lifecycle configuration as defined by S3. """
s = '<LifecycleConfiguration>' for rule in self: s += rule.to_xml() s += '</LifecycleConfiguration>' return s
<SYSTEM_TASK:> Commits pending updates to Amazon DynamoDB. <END_TASK> <USER_TASK:> Description: def save(self, expected_value=None, return_values=None): """ Commits pending updates to Amazon DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name/value pairs before they were updated. Possible values are: None, 'ALL_OLD', 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. If 'ALL_NEW' is specified, then all the attributes of the new version of the item are returned. If 'UPDATED_NEW' is specified, the new versions of only the updated attributes are returned. """
return self.table.layer2.update_item(self, expected_value, return_values)
<SYSTEM_TASK:> Delete the item from DynamoDB. <END_TASK> <USER_TASK:> Description: def delete(self, expected_value=None, return_values=None): """ Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """
return self.table.layer2.delete_item(self, expected_value, return_values)
<SYSTEM_TASK:> Connect to an EC2 Api endpoint. Additional arguments are passed <END_TASK> <USER_TASK:> Description: def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ Connect to an EC2 Api endpoint. Additional arguments are passed through to connect_ec2. :type url: string :param url: A url for the ec2 api endpoint to connect to :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """
from boto.ec2.regioninfo import RegionInfo purl = urlparse.urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name = purl.hostname, endpoint = purl.hostname) kwargs['aws_access_key_id']=aws_access_key_id kwargs['aws_secret_access_key']=aws_secret_access_key return(connect_ec2(**kwargs))
<SYSTEM_TASK:> Connect to a Walrus service. <END_TASK> <USER_TASK:> Description: def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Walrus', is_secure=False, **kwargs): """ Connect to a Walrus service. :type host: string :param host: the host name or ip address of the Walrus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Walrus """
from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat # Check for values in boto config, if not supplied as args if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'walrus_host', None) return S3Connection(aws_access_key_id, aws_secret_access_key, host=host, port=port, path=path, calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
<SYSTEM_TASK:> Connect to the Internet Archive via their S3-like API. <END_TASK> <USER_TASK:> Description: def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, is_secure=False, **kwargs): """ Connect to the Internet Archive via their S3-like API. :type ia_access_key_id: string :param ia_access_key_id: Your IA Access Key ID. This will also look in your boto config file for an entry in the Credentials section called "ia_access_key_id" :type ia_secret_access_key: string :param ia_secret_access_key: Your IA Secret Access Key. This will also look in your boto config file for an entry in the Credentials section called "ia_secret_access_key" :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to the Internet Archive """
from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat access_key = config.get('Credentials', 'ia_access_key_id', ia_access_key_id) secret_key = config.get('Credentials', 'ia_secret_access_key', ia_secret_access_key) return S3Connection(access_key, secret_key, host='s3.us.archive.org', calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
<SYSTEM_TASK:> Instantiate a StorageUri from a URI string. <END_TASK> <USER_TASK:> Description: def storage_uri(uri_str, default_scheme='file', debug=0, validate=True, bucket_storage_uri_class=BucketStorageUri, suppress_consec_slashes=True): """ Instantiate a StorageUri from a URI string. :type uri_str: string :param uri_str: URI naming bucket + optional object. :type default_scheme: string :param default_scheme: default scheme for scheme-less URIs. :type debug: int :param debug: debug level to pass in to boto connection (range 0..2). :type validate: bool :param validate: whether to check for bucket name validity. :type bucket_storage_uri_class: BucketStorageUri interface. :param bucket_storage_uri_class: Allows mocking for unit tests. :param suppress_consec_slashes: If provided, controls whether consecutive slashes will be suppressed in key paths. We allow validate to be disabled to allow caller to implement bucket-level wildcarding (outside the boto library; see gsutil). :rtype: :class:`boto.StorageUri` subclass :return: StorageUri subclass for given URI. ``uri_str`` must be one of the following formats: * gs://bucket/name * s3://bucket/name * gs://bucket * s3://bucket * filename The last example uses the default scheme ('file', unless overridden) """
# Manually parse URI components instead of using urlparse.urlparse because # what we're calling URIs don't really fit the standard syntax for URIs # (the latter includes an optional host/net location part). end_scheme_idx = uri_str.find('://') if end_scheme_idx == -1: # Check for common error: user specifies gs:bucket instead # of gs://bucket. Some URI parsers allow this, but it can cause # confusion for callers, so we don't. if uri_str.find(':') != -1: raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str) scheme = default_scheme.lower() path = uri_str else: scheme = uri_str[0:end_scheme_idx].lower() path = uri_str[end_scheme_idx + 3:] if scheme not in ['file', 's3', 'gs']: raise InvalidUriError('Unrecognized scheme "%s"' % scheme) if scheme == 'file': # For file URIs we have no bucket name, and use the complete path # (minus 'file://') as the object name. is_stream = False if path == '-': is_stream = True return FileStorageUri(path, debug, is_stream) else: path_parts = path.split('/', 1) bucket_name = path_parts[0] if (validate and bucket_name and # Disallow buckets violating charset or not [3..255] chars total. (not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name) # Disallow buckets with individual DNS labels longer than 63. or re.search('[-_a-z0-9]{64}', bucket_name))): raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str) # If enabled, ensure the bucket name is valid, to avoid possibly # confusing other parts of the code. (For example if we didn't # catch bucket names containing ':', when a user tried to connect to # the server with that name they might get a confusing error about # non-integer port numbers.) object_name = '' if len(path_parts) > 1: object_name = path_parts[1] return bucket_storage_uri_class( scheme, bucket_name, object_name, debug, suppress_consec_slashes=suppress_consec_slashes)
<SYSTEM_TASK:> Returns a StorageUri for the given key. <END_TASK> <USER_TASK:> Description: def storage_uri_for_key(key): """Returns a StorageUri for the given key. :type key: :class:`boto.s3.key.Key` or subclass :param key: URI naming bucket + optional object. """
if not isinstance(key, boto.s3.key.Key): raise InvalidUriError('Requested key (%s) is not a subclass of ' 'boto.s3.key.Key' % str(type(key))) prov_name = key.bucket.connection.provider.get_provider_name() uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name) return storage_uri(uri_str)
<SYSTEM_TASK:> Get a particular user which are tagged based on the id_ <END_TASK> <USER_TASK:> Description: def get_tags_users(self, id_): """ Get a particular user which are tagged based on the id_ """
return _get_request(_TAGS_USERS.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Get startup based on id <END_TASK> <USER_TASK:> Description: def get_startup(self, id_): """ Get startup based on id """
return _get_request(_STARTUP.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Get startups based on which companies are raising funding <END_TASK> <USER_TASK:> Description: def get_startups_filtered_by(self, filter_='raising'): """ Get startups based on which companies are raising funding """
url = _STARTUP_RAISING.format(c_api=_C_API_BEGINNING, api=_API_VERSION, filter_=filter_, at=self.access_token) return _get_request(url)
<SYSTEM_TASK:> Search for a particular slug <END_TASK> <USER_TASK:> Description: def get_search_for_slugs(self, slug): """ Search for a particular slug """
return _get_request(_SLUG_SEARCH.format(c_api=_C_API_BEGINNING, api=_API_VERSION, slug=_format_query(slug), at=self.access_token))
<SYSTEM_TASK:> Get reviews for a particular user <END_TASK> <USER_TASK:> Description: def get_reviews(self, user_id): """ Get reviews for a particular user """
url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING, api=_API_VERSION, user_id=user_id, at=self.access_token) return _get_request(url)
<SYSTEM_TASK:> Get a particular review id, independent from the user_id and <END_TASK> <USER_TASK:> Description: def get_review_id(self, id_): """ Get a particular review id, independent from the user_id and startup_id """
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Get all available regions for the SDB service. <END_TASK> <USER_TASK:> Description: def regions(): """ Get all available regions for the SDB service. :rtype: list :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances """
return [SDBRegionInfo(name='us-east-1', endpoint='sdb.amazonaws.com'), SDBRegionInfo(name='eu-west-1', endpoint='sdb.eu-west-1.amazonaws.com'), SDBRegionInfo(name='us-west-1', endpoint='sdb.us-west-1.amazonaws.com'), SDBRegionInfo(name='sa-east-1', endpoint='sdb.sa-east-1.amazonaws.com'), SDBRegionInfo(name='us-west-2', endpoint='sdb.us-west-2.amazonaws.com'), SDBRegionInfo(name='ap-northeast-1', endpoint='sdb.ap-northeast-1.amazonaws.com'), SDBRegionInfo(name='ap-southeast-1', endpoint='sdb.ap-southeast-1.amazonaws.com') ]
<SYSTEM_TASK:> Add this file to the init.d directory <END_TASK> <USER_TASK:> Description: def add_init_script(self, file, name): """ Add this file to the init.d directory """
f_path = os.path.join("/etc/init.d", name) f = open(f_path, "w") f.write(file) f.close() os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC) self.run("/usr/sbin/update-rc.d %s defaults" % name)
<SYSTEM_TASK:> Create a user on the local system <END_TASK> <USER_TASK:> Description: def create_user(self, user): """ Create a user on the local system """
self.run("useradd -m %s" % user) usr = getpwnam(user) return usr
<SYSTEM_TASK:> Compute MD5 hash on passed file and return results in a tuple of values. <END_TASK> <USER_TASK:> Description: def compute_md5(fp, buf_size=8192, size=None): """ Compute MD5 hash on passed file and return results in a tuple of values. :type fp: file :param fp: File pointer to the file to MD5 hash. The file pointer will be reset to its current location before the method returns. :type buf_size: integer :param buf_size: Number of bytes per read request. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where the file is being split inplace into different parts. Less bytes may be available. :rtype: tuple :return: A tuple containing the hex digest version of the MD5 hash as the first element, the base64 encoded version of the plain digest as the second element and the data size as the third element. """
m = md5() spos = fp.tell() if size and size < buf_size: s = fp.read(size) else: s = fp.read(buf_size) while s: m.update(s) if size: size -= len(s) if size <= 0: break if size and size < buf_size: s = fp.read(size) else: s = fp.read(buf_size) hex_md5 = m.hexdigest() base64md5 = base64.encodestring(m.digest()) if base64md5[-1] == '\n': base64md5 = base64md5[0:-1] # data_size based on bytes read. data_size = fp.tell() - spos fp.seek(spos) return (hex_md5, base64md5, data_size)
<SYSTEM_TASK:> Get all available regions for the CloudWatch service. <END_TASK> <USER_TASK:> Description: def regions(): """ Get all available regions for the CloudWatch service. :rtype: list :return: A list of :class:`boto.RegionInfo` instances """
regions = [] for region_name in RegionData: region = RegionInfo(name=region_name, endpoint=RegionData[region_name], connection_cls=CloudWatchConnection) regions.append(region) return regions
<SYSTEM_TASK:> Get time-series data for one or more statistics of a given metric. <END_TASK> <USER_TASK:> Description: def get_metric_statistics(self, period, start_time, end_time, metric_name, namespace, statistics, dimensions=None, unit=None): """ Get time-series data for one or more statistics of a given metric. :type period: integer :param period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. The default value is 60. :type start_time: datetime :param start_time: The time stamp to use for determining the first datapoint to return. The value specified is inclusive; results include datapoints with the time stamp specified. :type end_time: datetime :param end_time: The time stamp to use for determining the last datapoint to return. The value specified is exclusive; results will include datapoints up to the time stamp specified. :type metric_name: string :param metric_name: The metric name. :type namespace: string :param namespace: The metric's namespace. :type statistics: list :param statistics: A list of statistics names Valid values: Average | Sum | SampleCount | Maximum | Minimum :type dimensions: dict :param dimensions: A dictionary of dimension key/values where the key is the dimension name and the value is either a scalar value or an iterator of values to be associated with that dimension. :rtype: list """
params = {'Period' : period, 'MetricName' : metric_name, 'Namespace' : namespace, 'StartTime' : start_time.isoformat(), 'EndTime' : end_time.isoformat()} self.build_list_params(params, statistics, 'Statistics.member.%d') if dimensions: self.build_dimension_param(dimensions, params) return self.get_list('GetMetricStatistics', params, [('member', Datapoint)])
<SYSTEM_TASK:> Returns a list of the valid metrics for which there is recorded <END_TASK> <USER_TASK:> Description: def list_metrics(self, next_token=None, dimensions=None, metric_name=None, namespace=None): """ Returns a list of the valid metrics for which there is recorded data available. :type next_token: str :param next_token: A maximum of 500 metrics will be returned at one time. If more results are available, the ResultSet returned will contain a non-Null next_token attribute. Passing that token as a parameter to list_metrics will retrieve the next page of metrics. :type dimension: dict :param dimension_filters: A dictionary containing name/value pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension name that you want to filter on, a list of values to filter on or None if you want all metrics with that Dimension name. :type metric_name: str :param metric_name: The name of the Metric to filter against. If None, all Metric names will be returned. :type namespace: str :param namespace: A Metric namespace to filter against (e.g. AWS/EC2). If None, Metrics from all namespaces will be returned. """
params = {} if next_token: params['NextToken'] = next_token if dimensions: self.build_dimension_param(dimensions, params) if metric_name: params['MetricName'] = metric_name if namespace: params['Namespace'] = namespace return self.get_list('ListMetrics', params, [('member', Metric)])
<SYSTEM_TASK:> Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch <END_TASK> <USER_TASK:> Description: def put_metric_data(self, namespace, name, value=None, timestamp=None, unit=None, dimensions=None, statistics=None): """ Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. If a list is specified for some, but not all, of the arguments, the remaining arguments are repeated a corresponding number of times. :type namespace: str :param namespace: The namespace of the metric. :type name: str or list :param name: The name of the metric. :type value: float or list :param value: The value for the metric. :type timestamp: datetime or list :param timestamp: The time stamp used for the metric. If not specified, the default value is set to the time the metric data was received. :type unit: string or list :param unit: The unit of the metric. Valid Values: Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | Megabytes | Gigabytes | Terabytes | Bits | Kilobits | Megabits | Gigabits | Terabits | Percent | Count | Bytes/Second | Kilobytes/Second | Megabytes/Second | Gigabytes/Second | Terabytes/Second | Bits/Second | Kilobits/Second | Megabits/Second | Gigabits/Second | Terabits/Second | Count/Second | None :type dimensions: dict :param dimensions: Add extra name value pairs to associate with the metric, i.e.: {'name1': value1, 'name2': (value2, value3)} :type statistics: dict or list :param statistics: Use a statistic set instead of a value, for example:: {'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000} """
params = {'Namespace': namespace} self.build_put_params(params, name, value=value, timestamp=timestamp, unit=unit, dimensions=dimensions, statistics=statistics) return self.get_status('PutMetricData', params)
<SYSTEM_TASK:> Retrieves alarms with the specified names. If no name is specified, all <END_TASK> <USER_TASK:> Description: def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, state_value=None, next_token=None): """ Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action. :type action_prefix: string :param action_name: The action name prefix. :type alarm_name_prefix: string :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified if this parameter is specified. :type alarm_names: list :param alarm_names: A list of alarm names to retrieve information for. :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type state_value: string :param state_value: The state value to be used in matching alarms. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list """
params = {} if action_prefix: params['ActionPrefix'] = action_prefix if alarm_name_prefix: params['AlarmNamePrefix'] = alarm_name_prefix elif alarm_names: self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token if state_value: params['StateValue'] = state_value return self.get_list('DescribeAlarms', params, [('MetricAlarms', MetricAlarms)])[0]
<SYSTEM_TASK:> Retrieves history for the specified alarm. Filter alarms by date range <END_TASK> <USER_TASK:> Description: def describe_alarm_history(self, alarm_name=None, start_date=None, end_date=None, max_records=None, history_item_type=None, next_token=None): """ Retrieves history for the specified alarm. Filter alarms by date range or item type. If an alarm name is not specified, Amazon CloudWatch returns histories for all of the owner's alarms. Amazon CloudWatch retains the history of deleted alarms for a period of six weeks. If an alarm has been deleted, its history can still be queried. :type alarm_name: string :param alarm_name: The name of the alarm. :type start_date: datetime :param start_date: The starting date to retrieve alarm history. :type end_date: datetime :param end_date: The starting date to retrieve alarm history. :type history_item_type: string :param history_item_type: The type of alarm histories to retreive (ConfigurationUpdate | StateUpdate | Action) :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list """
params = {} if alarm_name: params['AlarmName'] = alarm_name if start_date: params['StartDate'] = start_date.isoformat() if end_date: params['EndDate'] = end_date.isoformat() if history_item_type: params['HistoryItemType'] = history_item_type if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token return self.get_list('DescribeAlarmHistory', params, [('member', AlarmHistoryItem)])
<SYSTEM_TASK:> Retrieves all alarms for a single metric. Specify a statistic, period, <END_TASK> <USER_TASK:> Description: def describe_alarms_for_metric(self, metric_name, namespace, period=None, statistic=None, dimensions=None, unit=None): """ Retrieves all alarms for a single metric. Specify a statistic, period, or unit to filter the set of alarms further. :type metric_name: string :param metric_name: The name of the metric :type namespace: string :param namespace: The namespace of the metric. :type period: int :param period: The period in seconds over which the statistic is applied. :type statistic: string :param statistic: The statistic for the metric. :param dimension_filters: A dictionary containing name/value pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension name that you want to filter on, a list of values to filter on or None if you want all metrics with that Dimension name. :type unit: string :rtype list """
params = {'MetricName' : metric_name, 'Namespace' : namespace} if period: params['Period'] = period if statistic: params['Statistic'] = statistic if dimensions: self.build_dimension_param(dimensions, params) if unit: params['Unit'] = unit return self.get_list('DescribeAlarmsForMetric', params, [('member', MetricAlarm)])
<SYSTEM_TASK:> Creates or updates an alarm and associates it with the specified Amazon <END_TASK> <USER_TASK:> Description: def put_metric_alarm(self, alarm): """ Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm. When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed. When updating an existing alarm, its StateValue is left unchanged. :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm :param alarm: MetricAlarm object. """
params = { 'AlarmName' : alarm.name, 'MetricName' : alarm.metric, 'Namespace' : alarm.namespace, 'Statistic' : alarm.statistic, 'ComparisonOperator' : alarm.comparison, 'Threshold' : alarm.threshold, 'EvaluationPeriods' : alarm.evaluation_periods, 'Period' : alarm.period, } if alarm.actions_enabled is not None: params['ActionsEnabled'] = alarm.actions_enabled if alarm.alarm_actions: self.build_list_params(params, alarm.alarm_actions, 'AlarmActions.member.%s') if alarm.description: params['AlarmDescription'] = alarm.description if alarm.dimensions: self.build_dimension_param(alarm.dimensions, params) if alarm.insufficient_data_actions: self.build_list_params(params, alarm.insufficient_data_actions, 'InsufficientDataActions.member.%s') if alarm.ok_actions: self.build_list_params(params, alarm.ok_actions, 'OKActions.member.%s') if alarm.unit: params['Unit'] = alarm.unit alarm.connection = self return self.get_status('PutMetricAlarm', params)
<SYSTEM_TASK:> Deletes all specified alarms. In the event of an error, no <END_TASK> <USER_TASK:> Description: def delete_alarms(self, alarms): """ Deletes all specified alarms. In the event of an error, no alarms are deleted. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarms, 'AlarmNames.member.%s') return self.get_status('DeleteAlarms', params)
<SYSTEM_TASK:> Enables actions for the specified alarms. <END_TASK> <USER_TASK:> Description: def enable_alarm_actions(self, alarm_names): """ Enables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('EnableAlarmActions', params)
<SYSTEM_TASK:> Disables actions for the specified alarms. <END_TASK> <USER_TASK:> Description: def disable_alarm_actions(self, alarm_names): """ Disables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('DisableAlarmActions', params)
<SYSTEM_TASK:> Determine how long until the next scheduled time for a Task. <END_TASK> <USER_TASK:> Description: def check(self): """ Determine how long until the next scheduled time for a Task. Returns the number of seconds until the next scheduled time or zero if the task needs to be run immediately. If it's an hourly task and it's never been run, run it now. If it's a daily task and it's never been run and the hour is right, run it now. """
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)) if self.hourly and not self.last_executed: return 0 if self.daily and not self.last_executed: if int(self.hour) == self.now.hour: return 0 else: return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 delta = self.now - self.last_executed if self.hourly: if delta.seconds >= 60*60: return 0 else: return 60*60 - delta.seconds else: if int(self.hour) == self.now.hour: if delta.days >= 1: return 0 else: return 82800 # 23 hours, just to be safe else: return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
<SYSTEM_TASK:> Increment a single value <END_TASK> <USER_TASK:> Description: def _inc(self, val): """Increment a single value"""
assert(len(val) == self.sequence_length) return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
<SYSTEM_TASK:> Connect to our domain <END_TASK> <USER_TASK:> Description: def _connect(self): """Connect to our domain"""
if not self._db: import boto sdb = boto.connect_sdb() if not self.domain_name: self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) try: self._db = sdb.get_domain(self.domain_name) except SDBResponseError, e: if e.status == 400: self._db = sdb.create_domain(self.domain_name) else: raise return self._db
<SYSTEM_TASK:> Load a credential file as is setup like the Java utilities <END_TASK> <USER_TASK:> Description: def load_credential_file(self, path): """Load a credential file as is setup like the Java utilities"""
c_data = StringIO.StringIO() c_data.write("[Credentials]\n") for line in open(path, "r").readlines(): c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) c_data.seek(0) self.readfp(c_data)
<SYSTEM_TASK:> Write the specified Section.Option to the config file specified by path. <END_TASK> <USER_TASK:> Description: def save_option(self, path, section, option, value): """ Write the specified Section.Option to the config file specified by path. Replace any previous value. If the path doesn't exist, create it. Also add the option the the in-memory config. """
config = ConfigParser.SafeConfigParser() config.read(path) if not config.has_section(section): config.add_section(section) config.set(section, option, value) fp = open(path, 'w') config.write(fp) fp.close() if not self.has_section(section): self.add_section(section) self.set(section, option, value)
<SYSTEM_TASK:> Instantiate a BucketStorageUri from the current BucketStorageUri, <END_TASK> <USER_TASK:> Description: def clone_replace_name(self, new_name): """Instantiate a BucketStorageUri from the current BucketStorageUri, but replacing the object_name. @type new_name: string @param new_name: new object name """
if not self.bucket_name: raise InvalidUriError('clone_replace_name() on bucket-less URI %s' % self.uri) return BucketStorageUri( self.scheme, self.bucket_name, new_name, self.debug, suppress_consec_slashes=self.suppress_consec_slashes)
<SYSTEM_TASK:> sets or updates a bucket's acl <END_TASK> <USER_TASK:> Description: def set_acl(self, acl_or_str, key_name='', validate=True, headers=None, version_id=None): """sets or updates a bucket's acl"""
if not self.bucket_name: raise InvalidUriError('set_acl on bucket-less URI (%s)' % self.uri) self.get_bucket(validate, headers).set_acl(acl_or_str, key_name, headers, version_id)
<SYSTEM_TASK:> Update the data associated with this volume by querying EC2. <END_TASK> <USER_TASK:> Description: def update(self, validate=False): """ Update the data associated with this volume by querying EC2. :type validate: bool :param validate: By default, if EC2 returns no data about the volume the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """
# Check the resultset since Eucalyptus ignores the volumeId param unfiltered_rs = self.connection.get_all_volumes([self.id]) rs = [ x for x in unfiltered_rs if x.id == self.id ] if len(rs) > 0: self._update(rs[0]) elif validate: raise ValueError('%s is not a valid Volume ID' % self.id) return self.status
<SYSTEM_TASK:> Attach this EBS volume to an EC2 instance. <END_TASK> <USER_TASK:> Description: def attach(self, instance_id, device): """ Attach this EBS volume to an EC2 instance. :type instance_id: str :param instance_id: The ID of the EC2 instance to which it will be attached. :type device: str :param device: The device on the instance through which the volume will be exposed (e.g. /dev/sdh) :rtype: bool :return: True if successful """
return self.connection.attach_volume(self.id, instance_id, device)
<SYSTEM_TASK:> Detach this EBS volume from an EC2 instance. <END_TASK> <USER_TASK:> Description: def detach(self, force=False): """ Detach this EBS volume from an EC2 instance. :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful """
instance_id = None if self.attach_data: instance_id = self.attach_data.instance_id device = None if self.attach_data: device = self.attach_data.device return self.connection.detach_volume(self.id, instance_id, device, force)
<SYSTEM_TASK:> Get the attachment state. <END_TASK> <USER_TASK:> Description: def attachment_state(self): """ Get the attachment state. """
state = None if self.attach_data: state = self.attach_data.status return state
<SYSTEM_TASK:> Get all snapshots related to this volume. Note that this requires <END_TASK> <USER_TASK:> Description: def snapshots(self, owner=None, restorable_by=None): """ Get all snapshots related to this volume. Note that this requires that all available snapshots for the account be retrieved from EC2 first and then the list is filtered client-side to contain only those for this volume. :type owner: str :param owner: If present, only the snapshots owned by the specified user will be returned. Valid values are: self | amazon | AWS Account ID :type restorable_by: str :param restorable_by: If present, only the snapshots that are restorable by the specified account id will be returned. :rtype: list of L{boto.ec2.snapshot.Snapshot} :return: The requested Snapshot objects """
rs = self.connection.get_all_snapshots(owner=owner, restorable_by=restorable_by) mine = [] for snap in rs: if snap.volume_id == self.id: mine.append(snap) return mine
<SYSTEM_TASK:> A custom object hook for use when decoding JSON item bodys. <END_TASK> <USER_TASK:> Description: def item_object_hook(dct): """ A custom object hook for use when decoding JSON item bodys. This hook will transform Amazon DynamoDB JSON responses to something that maps directly to native Python types. """
if len(dct.keys()) > 1: return dct if 'S' in dct: return dct['S'] if 'N' in dct: return convert_num(dct['N']) if 'SS' in dct: return set(dct['SS']) if 'NS' in dct: return set(map(convert_num, dct['NS'])) return dct
<SYSTEM_TASK:> Convert a set of pending item updates into the structure <END_TASK> <USER_TASK:> Description: def dynamize_attribute_updates(self, pending_updates): """ Convert a set of pending item updates into the structure required by Layer1. """
d = {} for attr_name in pending_updates: action, value = pending_updates[attr_name] if value is None: # DELETE without an attribute value d[attr_name] = {"Action": action} else: d[attr_name] = {"Action": action, "Value": self.dynamize_value(value)} return d
<SYSTEM_TASK:> Convert a layer2 range_key_condition parameter into the <END_TASK> <USER_TASK:> Description: def dynamize_range_key_condition(self, range_key_condition): """ Convert a layer2 range_key_condition parameter into the structure required by Layer1. """
d = None if range_key_condition: d = {} for range_value in range_key_condition: range_condition = range_key_condition[range_value] if range_condition == 'BETWEEN': if isinstance(range_value, tuple): avl = [self.dynamize_value(v) for v in range_value] else: msg = 'BETWEEN condition requires a tuple value' raise TypeError(msg) elif isinstance(range_value, tuple): msg = 'Tuple can only be supplied with BETWEEN condition' raise TypeError(msg) else: avl = [self.dynamize_value(range_value)] d = {'AttributeValueList': avl, 'ComparisonOperator': range_condition} return d
<SYSTEM_TASK:> Convert a layer2 scan_filter parameter into the <END_TASK> <USER_TASK:> Description: def dynamize_scan_filter(self, scan_filter): """ Convert a layer2 scan_filter parameter into the structure required by Layer1. """
d = None if scan_filter: d = {} for attr_name, op, value in scan_filter: if op == 'BETWEEN': if isinstance(value, tuple): avl = [self.dynamize_value(v) for v in value] else: msg = 'BETWEEN condition requires a tuple value' raise TypeError(msg) elif op == 'NULL' or op == 'NOT_NULL': avl = None elif isinstance(value, tuple): msg = 'Tuple can only be supplied with BETWEEN condition' raise TypeError(msg) else: avl = [self.dynamize_value(value)] dd = {'ComparisonOperator': op} if avl: dd['AttributeValueList'] = avl d[attr_name] = dd return d
<SYSTEM_TASK:> Convert an expected_value parameter into the data structure <END_TASK> <USER_TASK:> Description: def dynamize_expected_value(self, expected_value): """ Convert an expected_value parameter into the data structure required for Layer1. """
d = None if expected_value: d = {} for attr_name in expected_value: attr_value = expected_value[attr_name] if attr_value is True: attr_value = {'Exists': True} elif attr_value is False: attr_value = {'Exists': False} else: val = self.dynamize_value(expected_value[attr_name]) attr_value = {'Value': val} d[attr_name] = attr_value return d
<SYSTEM_TASK:> Convert a last_evaluated_key parameter into the data structure <END_TASK> <USER_TASK:> Description: def dynamize_last_evaluated_key(self, last_evaluated_key): """ Convert a last_evaluated_key parameter into the data structure required for Layer1. """
d = None if last_evaluated_key: hash_key = last_evaluated_key['HashKeyElement'] d = {'HashKeyElement': self.dynamize_value(hash_key)} if 'RangeKeyElement' in last_evaluated_key: range_key = last_evaluated_key['RangeKeyElement'] d['RangeKeyElement'] = self.dynamize_value(range_key) return d
<SYSTEM_TASK:> Convert a request_items parameter into the data structure <END_TASK> <USER_TASK:> Description: def dynamize_request_items(self, batch_list): """ Convert a request_items parameter into the data structure required for Layer1. """
d = None if batch_list: d = {} for batch in batch_list: batch_dict = {} key_list = [] for key in batch.keys: if isinstance(key, tuple): hash_key, range_key = key else: hash_key = key range_key = None k = self.build_key_from_values(batch.table.schema, hash_key, range_key) key_list.append(k) batch_dict['Keys'] = key_list if batch.attributes_to_get: batch_dict['AttributesToGet'] = batch.attributes_to_get d[batch.table.name] = batch_dict return d
<SYSTEM_TASK:> Take a scalar Python value and return a string representing <END_TASK> <USER_TASK:> Description: def get_dynamodb_type(self, val): """ Take a scalar Python value and return a string representing the corresponding Amazon DynamoDB type. If the value passed in is not a supported type, raise a TypeError. """
dynamodb_type = None if is_num(val): dynamodb_type = 'N' elif is_str(val): dynamodb_type = 'S' elif isinstance(val, (set, frozenset)): if False not in map(is_num, val): dynamodb_type = 'NS' elif False not in map(is_str, val): dynamodb_type = 'SS' if dynamodb_type is None: raise TypeError('Unsupported type "%s" for value "%s"' % (type(val), val)) return dynamodb_type
<SYSTEM_TASK:> Take a scalar Python value and return a dict consisting <END_TASK> <USER_TASK:> Description: def dynamize_value(self, val): """ Take a scalar Python value and return a dict consisting of the Amazon DynamoDB type specification and the value that needs to be sent to Amazon DynamoDB. If the type of the value is not supported, raise a TypeError """
def _str(val): """ DynamoDB stores booleans as numbers. True is 1, False is 0. This function converts Python booleans into DynamoDB friendly representation. """ if isinstance(val, bool): return str(int(val)) return str(val) dynamodb_type = self.get_dynamodb_type(val) if dynamodb_type == 'N': val = {dynamodb_type : _str(val)} elif dynamodb_type == 'S': val = {dynamodb_type : val} elif dynamodb_type == 'NS': val = {dynamodb_type : [ str(n) for n in val]} elif dynamodb_type == 'SS': val = {dynamodb_type : [ n for n in val]} return val
<SYSTEM_TASK:> Build a Key structure to be used for accessing items <END_TASK> <USER_TASK:> Description: def build_key_from_values(self, schema, hash_key, range_key=None): """ Build a Key structure to be used for accessing items in Amazon DynamoDB. This method takes the supplied hash_key and optional range_key and validates them against the schema. If there is a mismatch, a TypeError is raised. Otherwise, a Python dict version of a Amazon DynamoDB Key data structure is returned. :type hash_key: int, float, str, or unicode :param hash_key: The hash key of the item you are looking for. The type of the hash key should match the type defined in the schema. :type range_key: int, float, str or unicode :param range_key: The range key of the item your are looking for. This should be supplied only if the schema requires a range key. The type of the range key should match the type defined in the schema. """
dynamodb_key = {} dynamodb_value = self.dynamize_value(hash_key) if dynamodb_value.keys()[0] != schema.hash_key_type: msg = 'Hashkey must be of type: %s' % schema.hash_key_type raise TypeError(msg) dynamodb_key['HashKeyElement'] = dynamodb_value if range_key is not None: dynamodb_value = self.dynamize_value(range_key) if dynamodb_value.keys()[0] != schema.range_key_type: msg = 'RangeKey must be of type: %s' % schema.range_key_type raise TypeError(msg) dynamodb_key['RangeKeyElement'] = dynamodb_value return dynamodb_key
<SYSTEM_TASK:> Retrieve the Table object for an existing table. <END_TASK> <USER_TASK:> Description: def get_table(self, name): """ Retrieve the Table object for an existing table. :type name: str :param name: The name of the desired table. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the table. """
response = self.layer1.describe_table(name) return Table(self, response)
<SYSTEM_TASK:> Delete this table and all items in it. After calling this <END_TASK> <USER_TASK:> Description: def delete_table(self, table): """ Delete this table and all items in it. After calling this the Table objects status attribute will be set to 'DELETING'. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being deleted. """
response = self.layer1.delete_table(table.name) table.update_from_response(response)
<SYSTEM_TASK:> Create a Schema object used when creating a Table. <END_TASK> <USER_TASK:> Description: def create_schema(self, hash_key_name, hash_key_proto_value, range_key_name=None, range_key_proto_value=None): """ Create a Schema object used when creating a Table. :type hash_key_name: str :param hash_key_name: The name of the HashKey for the schema. :type hash_key_proto_value: int|long|float|str|unicode :param hash_key_proto_value: A sample or prototype of the type of value you want to use for the HashKey. :type range_key_name: str :param range_key_name: The name of the RangeKey for the schema. This parameter is optional. :type range_key_proto_value: int|long|float|str|unicode :param range_key_proto_value: A sample or prototype of the type of value you want to use for the RangeKey. This parameter is optional. """
schema = {} hash_key = {} hash_key['AttributeName'] = hash_key_name hash_key_type = self.get_dynamodb_type(hash_key_proto_value) hash_key['AttributeType'] = hash_key_type schema['HashKeyElement'] = hash_key if range_key_name and range_key_proto_value is not None: range_key = {} range_key['AttributeName'] = range_key_name range_key_type = self.get_dynamodb_type(range_key_proto_value) range_key['AttributeType'] = range_key_type schema['RangeKeyElement'] = range_key return Schema(schema)
<SYSTEM_TASK:> Commit pending item updates to Amazon DynamoDB. <END_TASK> <USER_TASK:> Description: def update_item(self, item, expected_value=None, return_values=None): """ Commit pending item updates to Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to update in Amazon DynamoDB. It is expected that you would have called the add_attribute, put_attribute and/or delete_attribute methods on this Item prior to calling this method. Those queued changes are what will be updated. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name/value pairs before they were updated. Possible values are: None, 'ALL_OLD', 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. If 'ALL_NEW' is specified, then all the attributes of the new version of the item are returned. If 'UPDATED_NEW' is specified, the new versions of only the updated attributes are returned. """
expected_value = self.dynamize_expected_value(expected_value) key = self.build_key_from_values(item.table.schema, item.hash_key, item.range_key) attr_updates = self.dynamize_attribute_updates(item._updates) response = self.layer1.update_item(item.table.name, key, attr_updates, expected_value, return_values, object_hook=item_object_hook) item._updates.clear() if 'ConsumedCapacityUnits' in response: item.consumed_units = response['ConsumedCapacityUnits'] return response
<SYSTEM_TASK:> Delete the item from Amazon DynamoDB. <END_TASK> <USER_TASK:> Description: def delete_item(self, item, expected_value=None, return_values=None): """ Delete the item from Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to delete from Amazon DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """
expected_value = self.dynamize_expected_value(expected_value) key = self.build_key_from_values(item.table.schema, item.hash_key, item.range_key) return self.layer1.delete_item(item.table.name, key, expected=expected_value, return_values=return_values, object_hook=item_object_hook)
<SYSTEM_TASK:> Perform a scan of DynamoDB. <END_TASK> <USER_TASK:> Description: def scan(self, table, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item): """ Perform a scan of DynamoDB. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being scanned. :type scan_filter: A list of tuples :param scan_filter: A list of tuples where each tuple consists of an attribute name, a comparison operator, and either a scalar or tuple consisting of the values to compare the attribute to. Valid comparison operators are shown below along with the expected number of values that should be supplied. * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: generator """
sf = self.dynamize_scan_filter(scan_filter) response = True n = 0 while response: if response is True: pass elif response.has_key("LastEvaluatedKey"): exclusive_start_key = response['LastEvaluatedKey'] else: break response = self.layer1.scan(table.name, sf, attributes_to_get,request_limit, count, exclusive_start_key, object_hook=item_object_hook) if response: for item in response['Items']: if max_results and n == max_results: break yield item_class(table, attrs=item) n += 1
<SYSTEM_TASK:> Returns a list of Photo objects. <END_TASK> <USER_TASK:> Description: def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\ min_upload_date='', max_upload_date='',\ min_taken_date='', max_taken_date='', \ license='', per_page='', page='', sort=''): """Returns a list of Photo objects. If auth=True then will auth the user. Can see private etc """
method = 'flickr.photos.search' data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\ min_upload_date=min_upload_date,\ max_upload_date=max_upload_date, \ min_taken_date=min_taken_date, \ max_taken_date=max_taken_date, \ license=license, per_page=per_page,\ page=page, sort=sort) photos = [] if isinstance(data.rsp.photos.photo, list): for photo in data.rsp.photos.photo: photos.append(_parse_photo(photo)) else: photos = [_parse_photo(data.rsp.photos.photo)] return photos
<SYSTEM_TASK:> Add a photo to the user's favorites. <END_TASK> <USER_TASK:> Description: def favorites_add(photo_id): """Add a photo to the user's favorites."""
method = 'flickr.favorites.add' _dopost(method, auth=True, photo_id=photo_id) return True
<SYSTEM_TASK:> Remove a photo from the user's favorites. <END_TASK> <USER_TASK:> Description: def favorites_remove(photo_id): """Remove a photo from the user's favorites."""
method = 'flickr.favorites.remove' _dopost(method, auth=True, photo_id=photo_id) return True
<SYSTEM_TASK:> Get a list of groups the auth'd user can post photos to. <END_TASK> <USER_TASK:> Description: def groups_pools_getGroups(): """Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups' data = _doget(method, auth=True) groups = [] if isinstance(data.rsp.groups.group, list): for group in data.rsp.groups.group: groups.append(Group(group.id, name=group.name, \ privacy=group.privacy)) else: group = data.rsp.groups.group groups = [Group(group.id, name=group.name, privacy=group.privacy)] return groups
<SYSTEM_TASK:> Gets the popular tags for a user in dictionary form tag=>count <END_TASK> <USER_TASK:> Description: def tags_getListUserPopular(user_id='', count=''): """Gets the popular tags for a user in dictionary form tag=>count"""
method = 'flickr.tags.getListUserPopular' auth = user_id == '' data = _doget(method, auth=auth, user_id=user_id) result = {} if isinstance(data.rsp.tags.tag, list): for tag in data.rsp.tags.tag: result[tag.text] = tag.count else: result[data.rsp.tags.tag.text] = data.rsp.tags.tag.count return result
<SYSTEM_TASK:> Gets the related tags for given tag. <END_TASK> <USER_TASK:> Description: def tags_getrelated(tag): """Gets the related tags for given tag."""
method = 'flickr.tags.getRelated' data = _doget(method, auth=False, tag=tag) if isinstance(data.rsp.tags.tag, list): return [tag.text for tag in data.rsp.tags.tag] else: return [data.rsp.tags.tag.text]
<SYSTEM_TASK:> Return the latitude+longitutde of the picture. <END_TASK> <USER_TASK:> Description: def getLocation(self): """ Return the latitude+longitutde of the picture. Returns None if no location given for this pic. """
method = 'flickr.photos.geo.getLocation' try: data = _doget(method, photo_id=self.id) except FlickrError: # Some other error might have occured too!? return None loc = data.rsp.photo.location return [loc.latitude, loc.longitude]
<SYSTEM_TASK:> Edit the photos in this set. <END_TASK> <USER_TASK:> Description: def editPhotos(self, photos, primary=None): """Edit the photos in this set. photos - photos for set primary - primary photo (if None will used current) """
method = 'flickr.photosets.editPhotos' if primary is None: primary = self.primary ids = [photo.id for photo in photos] if primary.id not in ids: ids.append(primary.id) _dopost(method, auth=True, photoset_id=self.id,\ primary_photo_id=primary.id, photo_ids=ids) self.__count = len(ids) return True
<SYSTEM_TASK:> Add a photo to this set. <END_TASK> <USER_TASK:> Description: def addPhoto(self, photo): """Add a photo to this set. photo - the photo """
method = 'flickr.photosets.addPhoto' _dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id) self.__count += 1 return True
<SYSTEM_TASK:> Deletes the photoset. <END_TASK> <USER_TASK:> Description: def delete(self): """Deletes the photoset. """
method = 'flickr.photosets.delete' _dopost(method, auth=True, photoset_id=self.id) return True
<SYSTEM_TASK:> Create a new photoset. <END_TASK> <USER_TASK:> Description: def create(cls, photo, title, description=''): """Create a new photoset. photo - primary photo """
if not isinstance(photo, Photo): raise TypeError, "Photo expected" method = 'flickr.photosets.create' data = _dopost(method, auth=True, title=title,\ description=description,\ primary_photo_id=photo.id) set = Photoset(data.rsp.photoset.id, title, Photo(photo.id), photos=1, description=description) return set
<SYSTEM_TASK:> Get a list of photo objects for this group <END_TASK> <USER_TASK:> Description: def getPhotos(self, tags='', per_page='', page=''): """Get a list of photo objects for this group"""
method = 'flickr.groups.pools.getPhotos' data = _doget(method, group_id=self.id, tags=tags,\ per_page=per_page, page=page) photos = [] for photo in data.rsp.photos.photo: photos.append(_parse_photo(photo)) return photos
<SYSTEM_TASK:> This method returns the single key around which this anonymous Bucket <END_TASK> <USER_TASK:> Description: def get_all_keys(self, headers=None, **params): """ This method returns the single key around which this anonymous Bucket was instantiated. :rtype: SimpleResultSet :return: The result from file system listing the keys requested """
key = Key(self.name, self.contained_key) return SimpleResultSet([key])
<SYSTEM_TASK:> Creates a new key <END_TASK> <USER_TASK:> Description: def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE): """ Creates a new key :type key_name: string :param key_name: The name of the key to create :rtype: :class:`boto.file.key.Key` :returns: An instance of the newly created key object """
if key_name == '-': return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE) else: dir_name = os.path.dirname(key_name) if dir_name and not os.path.exists(dir_name): os.makedirs(dir_name) fp = open(key_name, 'wb') return Key(self.name, key_name, fp)
<SYSTEM_TASK:> Delete an SQS Queue. <END_TASK> <USER_TASK:> Description: def delete_queue(self, queue, force_deletion=False, callback=None): """ Delete an SQS Queue. :type queue: A Queue object :param queue: The SQS queue to be deleted :type force_deletion: Boolean :param force_deletion: Normally, SQS will not delete a queue that contains messages. However, if the force_deletion argument is True, the queue will be deleted regardless of whether there are messages in the queue or not. USE WITH CAUTION. This will delete all messages in the queue as well. :rtype: bool :return: True if the command succeeded, False otherwise """
return self.get_status('DeleteQueue', None, queue.id, callback=callback)
<SYSTEM_TASK:> Read messages from an SQS Queue. <END_TASK> <USER_TASK:> Description: def receive_message(self, queue, number_messages=1, visibility_timeout=None, attributes=None, callback=None): """ Read messages from an SQS Queue. :type queue: A Queue object :param queue: The Queue from which messages are read. :type number_messages: int :param number_messages: The maximum number of messages to read (default=1) :type visibility_timeout: int :param visibility_timeout: The number of seconds the message should remain invisible to other queue readers (default=None which uses the Queues default) :type attributes: str :param attributes: The name of additional attribute to return with response or All if you want all attributes. The default is to return no additional attributes. Valid values: All|SenderId|SentTimestamp| ApproximateReceiveCount| ApproximateFirstReceiveTimestamp :rtype: list :return: A list of :class:`boto.sqs.message.Message` objects. """
params = {'MaxNumberOfMessages' : number_messages} if visibility_timeout: params['VisibilityTimeout'] = visibility_timeout if attributes: self.build_list_params(params, attributes, 'AttributeName') return self.get_list('ReceiveMessage', params, [('Message', queue.message_class)], queue.id, queue, callback=callback)
<SYSTEM_TASK:> Extends the read lock timeout for the specified message from <END_TASK> <USER_TASK:> Description: def change_message_visibility(self, queue, receipt_handle, visibility_timeout, callback=None): """ Extends the read lock timeout for the specified message from the specified queue to the specified value. :type queue: A :class:`boto.sqs.queue.Queue` object :param queue: The Queue from which messages are read. :type receipt_handle: str :param queue: The receipt handle associated with the message whose visibility timeout will be changed. :type visibility_timeout: int :param visibility_timeout: The new value of the message's visibility timeout in seconds. """
params = {'ReceiptHandle' : receipt_handle, 'VisibilityTimeout' : visibility_timeout} return self.get_status('ChangeMessageVisibility', params, queue.id, callback=callback)
<SYSTEM_TASK:> Get all available regions for the RDS service. <END_TASK> <USER_TASK:> Description: def regions(): """ Get all available regions for the RDS service. :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` """
return [RDSRegionInfo(name='us-east-1', endpoint='rds.us-east-1.amazonaws.com'), RDSRegionInfo(name='eu-west-1', endpoint='rds.eu-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-1', endpoint='rds.us-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-2', endpoint='rds.us-west-2.amazonaws.com'), RDSRegionInfo(name='sa-east-1', endpoint='rds.sa-east-1.amazonaws.com'), RDSRegionInfo(name='ap-northeast-1', endpoint='rds.ap-northeast-1.amazonaws.com'), RDSRegionInfo(name='ap-southeast-1', endpoint='rds.ap-southeast-1.amazonaws.com') ]
<SYSTEM_TASK:> Retrieve all the DBInstances in your account. <END_TASK> <USER_TASK:> Description: def get_all_dbinstances(self, instance_id=None, max_records=None, marker=None): """ Retrieve all the DBInstances in your account. :type instance_id: str :param instance_id: DB Instance identifier. If supplied, only information this instance will be returned. Otherwise, info about all DB Instances will be returned. :type max_records: int :param max_records: The maximum number of records to be returned. If more results are available, a MoreToken will be returned in the response that can be used to retrieve additional records. Default is 100. :type marker: str :param marker: The marker provided by a previous request. :rtype: list :return: A list of :class:`boto.rds.dbinstance.DBInstance` """
params = {} if instance_id: params['DBInstanceIdentifier'] = instance_id if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeDBInstances', params, [('DBInstance', DBInstance)])
<SYSTEM_TASK:> Create a new DBInstance. <END_TASK> <USER_TASK:> Description: def create_dbinstance(self, id, allocated_storage, instance_class, master_username, master_password, port=3306, engine='MySQL5.1', db_name=None, param_group=None, security_groups=None, availability_zone=None, preferred_maintenance_window=None, backup_retention_period=None, preferred_backup_window=None, multi_az=False, engine_version=None, auto_minor_version_upgrade=True): """ Create a new DBInstance. :type id: str :param id: Unique identifier for the new instance. Must contain 1-63 alphanumeric characters. First character must be a letter. May not end with a hyphen or contain two consecutive hyphens :type allocated_storage: int :param allocated_storage: Initially allocated storage size, in GBs. Valid values are [5-1024] :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Valid values are: * db.m1.small * db.m1.large * db.m1.xlarge * db.m2.xlarge * db.m2.2xlarge * db.m2.4xlarge :type engine: str :param engine: Name of database engine. Must be MySQL5.1 for now. :type master_username: str :param master_username: Name of master user for the DBInstance. Must be 1-15 alphanumeric characters, first must be a letter. :type master_password: str :param master_password: Password of master user for the DBInstance. Must be 4-16 alphanumeric characters. :type port: int :param port: Port number on which database accepts connections. Valid values [1115-65535]. Defaults to 3306. :type db_name: str :param db_name: Name of a database to create when the DBInstance is created. Default is to create no databases. :type param_group: str :param param_group: Name of DBParameterGroup to associate with this DBInstance. If no groups are specified no parameter groups will be used. :type security_groups: list of str or list of DBSecurityGroup objects :param security_groups: List of names of DBSecurityGroup to authorize on this DBInstance. :type availability_zone: str :param availability_zone: Name of the availability zone to place DBInstance into. :type preferred_maintenance_window: str :param preferred_maintenance_window: The weekly time range (in UTC) during which maintenance can occur. Default is Sun:05:00-Sun:09:00 :type backup_retention_period: int :param backup_retention_period: The number of days for which automated backups are retained. Setting this to zero disables automated backups. :type preferred_backup_window: str :param preferred_backup_window: The daily time range during which automated backups are created (if enabled). Must be in h24:mi-hh24:mi format (UTC). :type multi_az: bool :param multi_az: If True, specifies the DB Instance will be deployed in multiple availability zones. :type engine_version: str :param engine_version: Version number of the database engine to use. :type auto_minor_version_upgrade: bool :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default is True. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The new db instance. """
params = {'DBInstanceIdentifier': id, 'AllocatedStorage': allocated_storage, 'DBInstanceClass': instance_class, 'Engine': engine, 'MasterUsername': master_username, 'MasterUserPassword': master_password, 'Port': port, 'MultiAZ': str(multi_az).lower(), 'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower()} if db_name: params['DBName'] = db_name if param_group: params['DBParameterGroupName'] = param_group if security_groups: l = [] for group in security_groups: if isinstance(group, DBSecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'DBSecurityGroups.member') if availability_zone: params['AvailabilityZone'] = availability_zone if preferred_maintenance_window: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window: params['PreferredBackupWindow'] = preferred_backup_window if engine_version: params['EngineVersion'] = engine_version return self.get_object('CreateDBInstance', params, DBInstance)
<SYSTEM_TASK:> Create a new DBInstance Read Replica. <END_TASK> <USER_TASK:> Description: def create_dbinstance_read_replica(self, id, source_id, instance_class=None, port=3306, availability_zone=None, auto_minor_version_upgrade=None): """ Create a new DBInstance Read Replica. :type id: str :param id: Unique identifier for the new instance. Must contain 1-63 alphanumeric characters. First character must be a letter. May not end with a hyphen or contain two consecutive hyphens :type source_id: str :param source_id: Unique identifier for the DB Instance for which this DB Instance will act as a Read Replica. :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Default is to inherit from the source DB Instance. Valid values are: * db.m1.small * db.m1.large * db.m1.xlarge * db.m2.xlarge * db.m2.2xlarge * db.m2.4xlarge :type port: int :param port: Port number on which database accepts connections. Default is to inherit from source DB Instance. Valid values [1115-65535]. Defaults to 3306. :type availability_zone: str :param availability_zone: Name of the availability zone to place DBInstance into. :type auto_minor_version_upgrade: bool :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default is to inherit this value from the source DB Instance. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The new db instance. """
params = {'DBInstanceIdentifier' : id, 'SourceDBInstanceIdentifier' : source_id} if instance_class: params['DBInstanceClass'] = instance_class if port: params['Port'] = port if availability_zone: params['AvailabilityZone'] = availability_zone if auto_minor_version_upgrade is not None: if auto_minor_version_upgrade is True: params['AutoMinorVersionUpgrade'] = 'true' else: params['AutoMinorVersionUpgrade'] = 'false' return self.get_object('CreateDBInstanceReadReplica', params, DBInstance)
<SYSTEM_TASK:> Modify an existing DBInstance. <END_TASK> <USER_TASK:> Description: def modify_dbinstance(self, id, param_group=None, security_groups=None, preferred_maintenance_window=None, master_password=None, allocated_storage=None, instance_class=None, backup_retention_period=None, preferred_backup_window=None, multi_az=False, apply_immediately=False): """ Modify an existing DBInstance. :type id: str :param id: Unique identifier for the new instance. :type security_groups: list of str or list of DBSecurityGroup objects :param security_groups: List of names of DBSecurityGroup to authorize on this DBInstance. :type preferred_maintenance_window: str :param preferred_maintenance_window: The weekly time range (in UTC) during which maintenance can occur. Default is Sun:05:00-Sun:09:00 :type master_password: str :param master_password: Password of master user for the DBInstance. Must be 4-15 alphanumeric characters. :type allocated_storage: int :param allocated_storage: The new allocated storage size, in GBs. Valid values are [5-1024] :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Changes will be applied at next maintenance window unless apply_immediately is True. Valid values are: * db.m1.small * db.m1.large * db.m1.xlarge * db.m2.xlarge * db.m2.2xlarge * db.m2.4xlarge :type apply_immediately: bool :param apply_immediately: If true, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. :type backup_retention_period: int :param backup_retention_period: The number of days for which automated backups are retained. Setting this to zero disables automated backups. :type preferred_backup_window: str :param preferred_backup_window: The daily time range during which automated backups are created (if enabled). Must be in h24:mi-hh24:mi format (UTC). :type multi_az: bool :param multi_az: If True, specifies the DB Instance will be deployed in multiple availability zones. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The modified db instance. """
params = {'DBInstanceIdentifier' : id} if param_group: params['DBParameterGroupName'] = param_group if security_groups: l = [] for group in security_groups: if isinstance(group, DBSecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'DBSecurityGroups.member') if preferred_maintenance_window: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if master_password: params['MasterUserPassword'] = master_password if allocated_storage: params['AllocatedStorage'] = allocated_storage if instance_class: params['DBInstanceClass'] = instance_class if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window: params['PreferredBackupWindow'] = preferred_backup_window if multi_az: params['MultiAZ'] = 'true' if apply_immediately: params['ApplyImmediately'] = 'true' return self.get_object('ModifyDBInstance', params, DBInstance)
<SYSTEM_TASK:> Delete an existing DBInstance. <END_TASK> <USER_TASK:> Description: def delete_dbinstance(self, id, skip_final_snapshot=False, final_snapshot_id=''): """ Delete an existing DBInstance. :type id: str :param id: Unique identifier for the new instance. :type skip_final_snapshot: bool :param skip_final_snapshot: This parameter determines whether a final db snapshot is created before the instance is deleted. If True, no snapshot is created. If False, a snapshot is created before deleting the instance. :type final_snapshot_id: str :param final_snapshot_id: If a final snapshot is requested, this is the identifier used for that snapshot. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The deleted db instance. """
params = {'DBInstanceIdentifier' : id} if skip_final_snapshot: params['SkipFinalSnapshot'] = 'true' else: params['SkipFinalSnapshot'] = 'false' params['FinalDBSnapshotIdentifier'] = final_snapshot_id return self.get_object('DeleteDBInstance', params, DBInstance)