text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def setDatastreamState(self, pid, dsID, dsState): '''Update datastream state. :param pid: object pid :param dsID: datastream id :param dsState: datastream state :returns: boolean success ''' # /objects/{pid}/datastreams/{dsID} ? [dsState] http_args = {'dsState' : dsState} url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID} response = self.put(url, params=http_args) # returns response code 200 on success return response.status_code == requests.codes.ok
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def setDatastreamVersionable(self, pid, dsID, versionable): '''Update datastream versionable setting. :param pid: object pid :param dsID: datastream id :param versionable: boolean :returns: boolean success ''' # /objects/{pid}/datastreams/{dsID} ? [versionable] http_args = {'versionable': versionable} url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID} response = self.put(url, params=http_args) # returns response code 200 on success return response.status_code == requests.codes.ok
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def upload(self, data, callback=None, content_type=None, size=None): ''' Upload a multi-part file for content to ingest. Returns a temporary upload id that can be used as a datstream location. :param data: content string, file-like object, or iterable with content to be uploaded :param callback: optional callback method to monitor the upload; see :mod:`requests-toolbelt` documentation for more details: https://toolbelt.readthedocs.org/en/latest/user.html#uploading-data :param content_type: optional content type of the data :param size: optional size of the data; required when using an iterable for the data :returns: upload id on success ''' url = 'upload' # fedora only expects content uploaded as multipart file; # make string content into a file-like object so requests.post # sends it the way Fedora expects. # NOTE: checking for both python 2.x next method and # python 3.x __next__ to test if data is iteraable if not hasattr(data, 'read') and \ not (hasattr(data, '__next__') or hasattr(data, 'next')): data = six.BytesIO(force_bytes(data)) # if data is an iterable, wrap in a readable iterator that # requests-toolbelt can read data from elif not hasattr(data, 'read') and \ (hasattr(data, '__next__') or hasattr(data, 'next')): if size is None: raise Exception('Cannot upload iterable with unknown size') data = ReadableIterator(data, size) # use requests-toolbelt multipart encoder to avoid reading # the full content of large files into memory menc = MultipartEncoder(fields={'file': ('file', data, content_type)}) if callback is not None: menc = MultipartEncoderMonitor(menc, callback) headers = {'Content-Type': menc.content_type} if size: # latest version of requests requires str or bytes, not int if not isinstance(size, six.string_types): size = str(size) headers['Content-Length'] = size try: response = self.post(url, data=menc, headers=headers) except OverflowError: # Python __len__ uses integer so it is limited to system maxint, # and requests and requests-toolbelt use len() throughout. # This results in an overflow error when trying to upload a file # larger than system maxint (2GB on 32-bit OSes). # See http://bugs.python.org/issue12159 msg = 'upload content larger than system maxint (32-bit OS limitation)' logger.error('OverflowError: %s', msg) raise OverflowError(msg) if response.status_code == requests.codes.accepted: return response.text.strip()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subjects(self, predicate, object): """ Search for all subjects related to the specified predicate and object. :param predicate: :param object: :rtype: generator of RDF statements """
for statement in self.spo_search(predicate=predicate, object=object): yield str(statement[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_predicates(self, subject, object): """ Search for all subjects related to the specified subject and object. :param subject: :param object: :rtype: generator of RDF statements """
for statement in self.spo_search(subject=subject, object=object): yield str(statement[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_objects(self, subject, predicate): """ Search for all subjects related to the specified subject and predicate. :param subject: :param object: :rtype: generator of RDF statements """
for statement in self.spo_search(subject=subject, predicate=predicate): yield str(statement[2])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sparql_query(self, query, flush=None, limit=None): """ Run a Sparql query. :param query: sparql query string :rtype: list of dictionary """
return self.find_statements(query, language='sparql', type='tuples', flush=flush, limit=limit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sparql_count(self, query, flush=None): """ Count results for a Sparql query. :param query: sparql query string :rtype: int """
return self.count_statements(query, language='sparql', type='tuples', flush=flush)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_stack(self, stack_name, template_body=None, template_url=None, parameters=[], notification_arns=[], disable_rollback=False, timeout_in_minutes=None, capabilities=None): """ Creates a CloudFormation Stack as specified by the template. :type stack_name: string :param stack_name: The name of the Stack, must be unique amoung running Stacks :type template_body: string :param template_body: The template body (JSON string) :type template_url: string :param template_url: An S3 URL of a stored template JSON document. If both the template_body and template_url are specified, the template_body takes precedence :type parameters: list of tuples :param parameters: A list of (key, value) pairs for template input parameters. :type notification_arns: list of strings :param notification_arns: A list of SNS topics to send Stack event notifications to. :type disable_rollback: bool :param disable_rollback: Indicates whether or not to rollback on failure. :type timeout_in_minutes: int :param timeout_in_minutes: Maximum amount of time to let the Stack spend creating itself. If this timeout is exceeded, the Stack will enter the CREATE_FAILED state. :type capabilities: list :param capabilities: The list of capabilities you want to allow in the stack. Currently, the only valid capability is 'CAPABILITY_IAM'. :rtype: string :return: The unique Stack ID. """
params = {'ContentType': "JSON", 'StackName': stack_name, 'DisableRollback': self.encode_bool(disable_rollback)} if template_body: params['TemplateBody'] = template_body if template_url: params['TemplateURL'] = template_url if template_body and template_url: boto.log.warning("If both TemplateBody and TemplateURL are" " specified, only TemplateBody will be honored by the API") if len(parameters) > 0: for i, (key, value) in enumerate(parameters): params['Parameters.member.%d.ParameterKey' % (i+1)] = key params['Parameters.member.%d.ParameterValue' % (i+1)] = value if capabilities: for i, value in enumerate(capabilities): params['Capabilities.member.%d' % (i+1)] = value if len(notification_arns) > 0: self.build_list_params(params, notification_arns, "NotificationARNs.member") if timeout_in_minutes: params['TimeoutInMinutes'] = int(timeout_in_minutes) response = self.make_request('CreateStack', params, '/', 'POST') body = response.read() if response.status == 200: body = json.loads(body) return body['CreateStackResponse']['CreateStackResult']['StackId'] else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, weight=None): """Add a change request"""
change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name, identifier=identifier, weight=weight) self.changes.append([action, change]) return change
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_xml(self): """Convert this ResourceRecordSet into XML to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = "" for change in self.changes: changeParams = {"action": change[0], "record": change[1].to_xml()} changesXML += self.ChangeXML % changeParams params = {"comment": self.comment, "changes": changesXML} return self.ChangeResourceRecordSetsBody % params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commit(self): """Commit this change"""
if not self.connection: import boto self.connection = boto.connect_route53() return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def endElement(self, name, value, connection): """Overwritten to also add the NextRecordName and NextRecordType to the base object"""
if name == 'NextRecordName': self.next_record_name = value elif name == 'NextRecordType': self.next_record_type = value else: return ResultSet.endElement(self, name, value, connection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_alias(self, alias_hosted_zone_id, alias_dns_name): """Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id self.alias_dns_name = alias_dns_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_xml(self): """Spit this resource record set out as XML"""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None: # Use alias body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name) else: # Use resource record(s) records = "" for r in self.resource_records: records += self.ResourceRecordBody % r body = self.ResourceRecordsBody % { "ttl": self.ttl, "records": records, } weight = "" if self.identifier != None and self.weight != None: weight = self.WRRBody % {"identifier": self.identifier, "weight": self.weight} params = { "name": self.name, "type": self.type, "weight": weight, "body": body, } return self.XMLBody % params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_from_response(self, response): """ Update the state of the Table object based on the response data received from Amazon DynamoDB. """
if 'Table' in response: self._dict.update(response['Table']) elif 'TableDescription' in response: self._dict.update(response['TableDescription']) if 'KeySchema' in self._dict: self._schema = Schema(self._dict['KeySchema'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh(self, wait_for_active=False, retry_seconds=5): """ Refresh all of the fields of the Table object by calling the underlying DescribeTable request. :type wait_for_active: bool :param wait_for_active: If True, this command will not return until the table status, as returned from Amazon DynamoDB, is 'ACTIVE'. :type retry_seconds: int :param retry_seconds: If wait_for_active is True, this parameter controls the number of seconds of delay between calls to update_table in Amazon DynamoDB. Default is 5 seconds. """
done = False while not done: response = self.layer2.describe_table(self.name) self.update_from_response(response) if wait_for_active: if self.status == 'ACTIVE': done = True else: time.sleep(retry_seconds) else: done = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_item(self, hash_key, range_key=None, attrs=None): """ Return an new, unsaved Item which can later be PUT to Amazon DynamoDB. """
return Item(self, hash_key, range_key, attrs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scan(self, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item): """ Scan through this table, this is a very long and expensive operation, and should be avoided if at all possible. :type scan_filter: A list of tuples :param scan_filter: A list of tuples where each tuple consists of an attribute name, a comparison operator, and either a scalar or tuple consisting of the values to compare the attribute to. Valid comparison operators are shown below along with the expected number of values that should be supplied. * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: generator """
return self.layer2.scan(self, scan_filter, attributes_to_get, request_limit, max_results, exclusive_start_key, item_class=item_class)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_by_code(self, code): """ Retrieve a language by a code. :param code: iso code (any of the three) or its culture code :return: a Language object """
if any(x in code for x in ('_', '-')): cc = CultureCode.objects.get(code=code.replace('_', '-')) return cc.language elif len(code) == 2: return self.get(iso_639_1=code) elif len(code) == 3: return self.get(Q(iso_639_2T=code) | Q(iso_639_2B=code) | Q(iso_639_3=code)) raise ValueError( 'Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def state(self, state=vanilla.message.NoState): """ Returns a `State`_ `Pair`_. *state* if supplied sets the intial state. """
return vanilla.message.State(self, state=state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Inventory(cls): """ Returns a list of Server instances, one for each Server object persisted in the db """
l = ServerSet() rs = cls.find() for server in rs: l.append(server) return l
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_config(self, config): """ Set SDB based config """
self._config = config self._config.dump_to_sdb("botoConfigs", self.id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach_volume(self, volume, device="/dev/sdp"): """ Attach an EBS volume to this server :param volume: EBS Volume to attach :type volume: boto.ec2.volume.Volume :param device: Device to attach to (default to /dev/sdp) :type device: string """
if hasattr(volume, "id"): volume_id = volume.id else: volume_id = volume return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detach_volume(self, volume): """ Detach an EBS volume from this server :param volume: EBS Volume to detach :type volume: boto.ec2.volume.Volume """
if hasattr(volume, "id"): volume_id = volume.id else: volume_id = volume return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save_tracker_uri_to_file(self): """ Saves URI to tracker file if one was passed to constructor. """
if not self.tracker_file_name: return f = None try: f = open(self.tracker_file_name, 'w') f.write(self.tracker_uri) except IOError, e: raise ResumableUploadException( 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' 'if you\'re using an incorrectly configured upload tool\n' '(e.g., gsutil configured to save tracker files to an ' 'unwritable directory)' % (self.tracker_file_name, e.strerror), ResumableTransferDisposition.ABORT) finally: if f: f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_tracker_uri(self, uri): """ Called when we start a new resumable upload or get a new tracker URI for the upload. Saves URI and resets upload state. Raises InvalidUriError if URI is syntactically invalid. """
parse_result = urlparse.urlparse(uri) if (parse_result.scheme.lower() not in ['http', 'https'] or not parse_result.netloc or not parse_result.query): raise InvalidUriError('Invalid tracker URI (%s)' % uri) qdict = cgi.parse_qs(parse_result.query) if not qdict or not 'upload_id' in qdict: raise InvalidUriError('Invalid tracker URI (%s)' % uri) self.tracker_uri = uri self.tracker_uri_host = parse_result.netloc self.tracker_uri_path = '%s/?%s' % (parse_result.netloc, parse_result.query) self.server_has_bytes = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query_server_state(self, conn, file_length): """ Queries server to find out state of given upload. Note that this method really just makes special case use of the fact that the upload server always returns the current start/end state whenever a PUT doesn't complete. Returns HTTP response from sending request. Raises ResumableUploadException if problem querying server. """
# Send an empty PUT so that server replies with this resumable # transfer's state. put_headers = {} put_headers['Content-Range'] = ( self._build_content_range_header('*', file_length)) put_headers['Content-Length'] = '0' return AWSAuthConnection.make_request(conn, 'PUT', path=self.tracker_uri_path, auth_path=self.tracker_uri_path, headers=put_headers, host=self.tracker_uri_host)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query_server_pos(self, conn, file_length): """ Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server. """
resp = self._query_server_state(conn, file_length) if resp.status == 200: return (0, file_length) # Completed upload. if resp.status != 308: # This means the server didn't have any state for the given # upload ID, which can happen (for example) if the caller saved # the tracker URI to a file and then tried to restart the transfer # after that upload ID has gone stale. In that case we need to # start a new transfer (and the caller will then save the new # tracker URI to the tracker file). raise ResumableUploadException( 'Got non-308 response (%s) from server state query' % resp.status, ResumableTransferDisposition.START_OVER) got_valid_response = False range_spec = resp.getheader('range') if range_spec: # Parse 'bytes=<from>-<to>' range_spec. m = re.search('bytes=(\d+)-(\d+)', range_spec) if m: server_start = long(m.group(1)) server_end = long(m.group(2)) got_valid_response = True else: # No Range header, which means the server does not yet have # any bytes. Note that the Range header uses inclusive 'from' # and 'to' values. Since Range 0-0 would mean that the server # has byte 0, omitting the Range header is used to indicate that # the server doesn't have any bytes. return self.SERVER_HAS_NOTHING if not got_valid_response: raise ResumableUploadException( 'Couldn\'t parse upload server state query response (%s)' % str(resp.getheaders()), ResumableTransferDisposition.START_OVER) if conn.debug >= 1: print 'Server has: Range: %d - %d.' % (server_start, server_end) return (server_start, server_end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _start_new_resumable_upload(self, key, headers=None): """ Starts a new resumable upload. Raises ResumableUploadException if any errors occur. """
conn = key.bucket.connection if conn.debug >= 1: print 'Starting new resumable upload.' self.server_has_bytes = 0 # Start a new resumable upload by sending a POST request with an # empty body and the "X-Goog-Resumable: start" header. Include any # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length # (and raise an exception if they tried to pass one, since it's # a semantic error to specify it at this point, and if we were to # include one now it would cause the server to expect that many # bytes; the POST doesn't include the actual file bytes We set # the Content-Length in the subsequent PUT, based on the uploaded # file size. post_headers = {} for k in headers: if k.lower() == 'content-length': raise ResumableUploadException( 'Attempt to specify Content-Length header (disallowed)', ResumableTransferDisposition.ABORT) post_headers[k] = headers[k] post_headers[conn.provider.resumable_upload_header] = 'start' resp = conn.make_request( 'POST', key.bucket.name, key.name, post_headers) # Get tracker URI from response 'Location' header. body = resp.read() # Check for various status conditions. if resp.status in [500, 503]: # Retry status 500 and 503 errors after a delay. raise ResumableUploadException( 'Got status %d from attempt to start resumable upload. ' 'Will wait/retry' % resp.status, ResumableTransferDisposition.WAIT_BEFORE_RETRY) elif resp.status != 200 and resp.status != 201: raise ResumableUploadException( 'Got status %d from attempt to start resumable upload. ' 'Aborting' % resp.status, ResumableTransferDisposition.ABORT) # Else we got 200 or 201 response code, indicating the resumable # upload was created. tracker_uri = resp.getheader('Location') if not tracker_uri: raise ResumableUploadException( 'No resumable tracker URI found in resumable initiation ' 'POST response (%s)' % body, ResumableTransferDisposition.WAIT_BEFORE_RETRY) self._set_tracker_uri(tracker_uri) self._save_tracker_uri_to_file()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _upload_file_bytes(self, conn, http_conn, fp, file_length, total_bytes_uploaded, cb, num_cb): """ Makes one attempt to upload file bytes, using an existing resumable upload connection. Returns etag from server upon success. Raises ResumableUploadException if any problems occur. """
buf = fp.read(self.BUFFER_SIZE) if cb: if num_cb > 2: cb_count = file_length / self.BUFFER_SIZE / (num_cb-2) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = 0 cb(total_bytes_uploaded, file_length) # Build resumable upload headers for the transfer. Don't send a # Content-Range header if the file is 0 bytes long, because the # resumable upload protocol uses an *inclusive* end-range (so, sending # 'bytes 0-0/1' would actually mean you're sending a 1-byte file). put_headers = {} if file_length: range_header = self._build_content_range_header( '%d-%d' % (total_bytes_uploaded, file_length - 1), file_length) put_headers['Content-Range'] = range_header # Set Content-Length to the total bytes we'll send with this PUT. put_headers['Content-Length'] = str(file_length - total_bytes_uploaded) http_request = AWSAuthConnection.build_base_http_request( conn, 'PUT', path=self.tracker_uri_path, auth_path=None, headers=put_headers, host=self.tracker_uri_host) http_conn.putrequest('PUT', http_request.path) for k in put_headers: http_conn.putheader(k, put_headers[k]) http_conn.endheaders() # Turn off debug on http connection so upload content isn't included # in debug stream. http_conn.set_debuglevel(0) while buf: http_conn.send(buf) total_bytes_uploaded += len(buf) if cb: i += 1 if i == cb_count or cb_count == -1: cb(total_bytes_uploaded, file_length) i = 0 buf = fp.read(self.BUFFER_SIZE) if cb: cb(total_bytes_uploaded, file_length) if total_bytes_uploaded != file_length: # Abort (and delete the tracker file) so if the user retries # they'll start a new resumable upload rather than potentially # attempting to pick back up later where we left off. raise ResumableUploadException( 'File changed during upload: EOF at %d bytes of %d byte file.' % (total_bytes_uploaded, file_length), ResumableTransferDisposition.ABORT) resp = http_conn.getresponse() body = resp.read() # Restore http connection debug level. http_conn.set_debuglevel(conn.debug) if resp.status == 200: return resp.getheader('etag') # Success # Retry timeout (408) and status 500 and 503 errors after a delay. elif resp.status in [408, 500, 503]: disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY else: # Catch all for any other error codes. disposition = ResumableTransferDisposition.ABORT raise ResumableUploadException('Got response code %d while attempting ' 'upload (%s)' % (resp.status, resp.reason), disposition)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _attempt_resumable_upload(self, key, fp, file_length, headers, cb, num_cb): """ Attempts a resumable upload. Returns etag from server upon success. Raises ResumableUploadException if any problems occur. """
(server_start, server_end) = self.SERVER_HAS_NOTHING conn = key.bucket.connection if self.tracker_uri: # Try to resume existing resumable upload. try: (server_start, server_end) = ( self._query_server_pos(conn, file_length)) self.server_has_bytes = server_start key=key if conn.debug >= 1: print 'Resuming transfer.' except ResumableUploadException, e: if conn.debug >= 1: print 'Unable to resume transfer (%s).' % e.message self._start_new_resumable_upload(key, headers) else: self._start_new_resumable_upload(key, headers) # upload_start_point allows the code that instantiated the # ResumableUploadHandler to find out the point from which it started # uploading (e.g., so it can correctly compute throughput). if self.upload_start_point is None: self.upload_start_point = server_end if server_end == file_length: # Boundary condition: complete file was already uploaded (e.g., # user interrupted a previous upload attempt after the upload # completed but before the gsutil tracker file was deleted). Set # total_bytes_uploaded to server_end so we'll attempt to upload # no more bytes but will still make final HTTP request and get # back the response (which contains the etag we need to compare # at the end). total_bytes_uploaded = server_end else: total_bytes_uploaded = server_end + 1 fp.seek(total_bytes_uploaded) conn = key.bucket.connection # Get a new HTTP connection (vs conn.get_http_connection(), which reuses # pool connections) because httplib requires a new HTTP connection per # transaction. (Without this, calling http_conn.getresponse() would get # "ResponseNotReady".) http_conn = conn.new_http_connection(self.tracker_uri_host, conn.is_secure) http_conn.set_debuglevel(conn.debug) # Make sure to close http_conn at end so if a local file read # failure occurs partway through server will terminate current upload # and can report that progress on next attempt. try: return self._upload_file_bytes(conn, http_conn, fp, file_length, total_bytes_uploaded, cb, num_cb) except (ResumableUploadException, socket.error): resp = self._query_server_state(conn, file_length) if resp.status == 400: raise ResumableUploadException('Got 400 response from server ' 'state query after failed resumable upload attempt. This ' 'can happen for various reasons, including specifying an ' 'invalid request (e.g., an invalid canned ACL) or if the ' 'file size changed between upload attempts', ResumableTransferDisposition.ABORT) else: raise finally: http_conn.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_permissions(self, object, replace=False): """ Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """
if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_permissions_all(self, replace=False): """ Sets the S3 ACL grants for all objects in the Distribution to the appropriate value based on the type of Distribution. :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """
bucket = self._get_bucket() for key in bucket: self.set_permissions(key, replace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_object(self, name, content, headers=None, replace=True): """ Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object. """
if self.config.origin.origin_access_identity: policy = 'private' else: policy = 'public-read' bucket = self._get_bucket() object = bucket.new_key(name) object.set_contents_from_file(content, headers=headers, policy=policy) if self.config.origin.origin_access_identity: self.set_permissions(object, replace) return object
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_signed_url(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates a signed CloudFront URL that is only valid within the specified parameters. :type url: str :param url: The URL of the protected object. :type keypair_id: str :param keypair_id: The keypair ID of the Amazon KeyPair used to sign theURL. This ID MUST correspond to the private key specified with private_key_file or private_key_string. :type expire_time: int :param expire_time: The expiry time of the URL. If provided, the URL will expire after the time has passed. If not provided the URL will never expire. Format is a unix epoch. Use time.time() + duration_in_sec. :type valid_after_time: int :param valid_after_time: If provided, the URL will not be valid until after valid_after_time. Format is a unix epoch. Use time.time() + secs_until_valid. :type ip_address: str :param ip_address: If provided, only allows access from the specified IP address. Use '192.168.0.10' for a single IP or use '192.168.0.0/24' CIDR notation for a subnet. :type policy_url: str :param policy_url: If provided, allows the signature to contain wildcard globs in the URL. For example, you could provide: 'http://example.com/media/\*' and the policy and signature would allow access to all contents of the media subdirectory. If not specified, only allow access to the exact url provided in 'url'. :type private_key_file: str or file object. :param private_key_file: If provided, contains the filename of the private key file used for signing or an open file object containing the private key contents. Only one of private_key_file or private_key_string can be provided. :type private_key_string: str :param private_key_string: If provided, contains the private key string used for signing. Only one of private_key_file or private_key_string can be provided. :rtype: str :return: The signed URL. """
# Get the required parameters params = self._create_signing_params( url=url, keypair_id=keypair_id, expire_time=expire_time, valid_after_time=valid_after_time, ip_address=ip_address, policy_url=policy_url, private_key_file=private_key_file, private_key_string=private_key_string) #combine these into a full url if "?" in url: sep = "&" else: sep = "?" signed_url_params = [] for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]: if key in params: param = "%s=%s" % (key, params[key]) signed_url_params.append(param) signed_url = url + sep + "&".join(signed_url_params) return signed_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_signing_params(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates the required URL parameters for a signed URL. """
params = {} # Check if we can use a canned policy if expire_time and not valid_after_time and not ip_address and not policy_url: # we manually construct this policy string to ensure formatting # matches signature policy = self._canned_policy(url, expire_time) params["Expires"] = str(expire_time) else: # If no policy_url is specified, default to the full url. if policy_url is None: policy_url = url # Can't use canned policy policy = self._custom_policy(policy_url, expires=expire_time, valid_after=valid_after_time, ip_address=ip_address) encoded_policy = self._url_base64_encode(policy) params["Policy"] = encoded_policy #sign the policy signature = self._sign_string(policy, private_key_file, private_key_string) #now base64 encode the signature (URL safe as well) encoded_signature = self._url_base64_encode(signature) params["Signature"] = encoded_signature params["Key-Pair-Id"] = keypair_id return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _custom_policy(resource, expires=None, valid_after=None, ip_address=None): """ Creates a custom policy string based on the supplied parameters. """
condition = {} if expires: condition["DateLessThan"] = {"AWS:EpochTime": expires} if valid_after: condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after} if ip_address: if '/' not in ip_address: ip_address += "/32" condition["IpAddress"] = {"AWS:SourceIp": ip_address} policy = {"Statement": [{ "Resource": resource, "Condition": condition}]} return json.dumps(policy, separators=(",", ":"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sign_string(message, private_key_file=None, private_key_string=None): """ Signs a string for use with Amazon CloudFront. Requires the M2Crypto library be installed. """
try: from M2Crypto import EVP except ImportError: raise NotImplementedError("Boto depends on the python M2Crypto " "library to generate signed URLs for " "CloudFront") # Make sure only one of private_key_file and private_key_string is set if private_key_file and private_key_string: raise ValueError("Only specify the private_key_file or the private_key_string not both") if not private_key_file and not private_key_string: raise ValueError("You must specify one of private_key_file or private_key_string") # if private_key_file is a file object read the key string from there if isinstance(private_key_file, file): private_key_string = private_key_file.read() # Now load key and calculate signature if private_key_string: key = EVP.load_key_string(private_key_string) else: key = EVP.load_key(private_key_file) key.reset_context(md='sha1') key.sign_init() key.sign_update(str(message)) signature = key.sign_final() return signature
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _url_base64_encode(msg): """ Base64 encodes a string using the URL-safe characters specified by Amazon. """
msg_base64 = base64.b64encode(msg) msg_base64 = msg_base64.replace('+', '-') msg_base64 = msg_base64.replace('=', '_') msg_base64 = msg_base64.replace('/', '~') return msg_base64
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_content(sender, instance, created=None, **kwargs): """ Re-save any content models referencing the just-modified ``FileUpload``. We don't do anything special to the content model, we just re-save it. If signals are in use, we assume that the content model has incorporated ``render_uploads`` into some kind of rendering that happens automatically at save-time. """
if created: # a brand new FileUpload won't be referenced return for ref in FileUploadReference.objects.filter(upload=instance): try: obj = ref.content_object if obj: obj.save() except AttributeError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_params(self): """ Returns a dictionary containing the value of of all of the keyword arguments passed when constructing this connection. """
param_names = ['aws_access_key_id', 'aws_secret_access_key', 'is_secure', 'port', 'proxy', 'proxy_port', 'proxy_user', 'proxy_pass', 'debug', 'https_connection_factory'] params = {} for name in param_names: params[name] = getattr(self, name) return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_images(self, image_ids=None, owners=None, executable_by=None, filters=None): """ Retrieve all the EC2 images available on your account. :type image_ids: list :param image_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :type executable_by: list :param executable_by: Returns AMIs for which the specified user ID has explicit launch permissions :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.image.Image` """
params = {} if image_ids: self.build_list_params(params, image_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') if executable_by: self.build_list_params(params, executable_by, 'ExecutableBy') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_kernels(self, kernel_ids=None, owners=None): """ Retrieve all the EC2 kernels available on your account. Constructs a filter to allow the processing to happen server side. :type kernel_ids: list :param kernel_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :rtype: list :return: A list of :class:`boto.ec2.image.Image` """
params = {} if kernel_ids: self.build_list_params(params, kernel_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') filter = {'image-type' : 'kernel'} self.build_filter_params(params, filter) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_ramdisks(self, ramdisk_ids=None, owners=None): """ Retrieve all the EC2 ramdisks available on your account. Constructs a filter to allow the processing to happen server side. :type ramdisk_ids: list :param ramdisk_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :rtype: list :return: A list of :class:`boto.ec2.image.Image` """
params = {} if ramdisk_ids: self.build_list_params(params, ramdisk_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') filter = {'image-type' : 'ramdisk'} self.build_filter_params(params, filter) return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_image(self, name=None, description=None, image_location=None, architecture=None, kernel_id=None, ramdisk_id=None, root_device_name=None, block_device_map=None): """ Register an image. :type name: string :param name: The name of the AMI. Valid only for EBS-based images. :type description: string :param description: The description of the AMI. :type image_location: string :param image_location: Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI's. :type architecture: string :param architecture: The architecture of the AMI. Valid choices are: i386 | x86_64 :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type root_device_name: string :param root_device_name: The root device name (e.g. /dev/sdh) :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :rtype: string :return: The new image id """
params = {} if name: params['Name'] = name if description: params['Description'] = description if architecture: params['Architecture'] = architecture if kernel_id: params['KernelId'] = kernel_id if ramdisk_id: params['RamdiskId'] = ramdisk_id if image_location: params['ImageLocation'] = image_location if root_device_name: params['RootDeviceName'] = root_device_name if block_device_map: block_device_map.build_list_params(params) rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') image_id = getattr(rs, 'imageId', None) return image_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deregister_image(self, image_id, delete_snapshot=False): """ Unregister an AMI. :type image_id: string :param image_id: the ID of the Image to unregister :type delete_snapshot: bool :param delete_snapshot: Set to True if we should delete the snapshot associated with an EBS volume mounted at /dev/sda1 :rtype: bool :return: True if successful """
snapshot_id = None if delete_snapshot: image = self.get_image(image_id) for key in image.block_device_mapping: if key == "/dev/sda1": snapshot_id = image.block_device_mapping[key].snapshot_id break result = self.get_status('DeregisterImage', {'ImageId':image_id}, verb='POST') if result and snapshot_id: return result and self.delete_snapshot(snapshot_id) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_image(self, instance_id, name, description=None, no_reboot=False): """ Will create an AMI from the instance in the running or stopped state. :type instance_id: string :param instance_id: the ID of the instance to image. :type name: string :param name: The name of the new image :type description: string :param description: An optional human-readable string describing the contents and purpose of the AMI. :type no_reboot: bool :param no_reboot: An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. :rtype: string :return: The new image id """
params = {'InstanceId' : instance_id, 'Name' : name} if description: params['Description'] = description if no_reboot: params['NoReboot'] = 'true' img = self.get_object('CreateImage', params, Image, verb='POST') return img.id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_image_attribute(self, image_id, attribute='launchPermission'): """ Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested """
params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_image_attribute(self, image_id, attribute='launchPermission'): """ Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """
params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_status('ResetImageAttribute', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_instances(self, instance_ids=None, filters=None): """ Retrieve all the instances associated with your account. :type instance_ids: list :param instance_ids: A list of strings of instance IDs :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.instance.Reservation` """
params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') if filters: if 'group-id' in filters: gid = filters.get('group-id') if not gid.startswith('sg-') or len(gid) != 11: warnings.warn( "The group-id filter now requires a security group " "identifier (sg-*) instead of a group name. To filter " "by group name use the 'group-name' filter instead.", UserWarning) self.build_filter_params(params, filters) return self.get_list('DescribeInstances', params, [('item', Reservation)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_instance_status(self, instance_ids=None, max_results=None, next_token=None, filters=None): """ Retrieve all the instances in your account scheduled for maintenance. :type instance_ids: list :param instance_ids: A list of strings of instance IDs :type max_results: int :param max_results: The maximum number of paginated instance items per response. :type next_token: str :param next_token: A string specifying the next paginated set of results to return. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of instances that have maintenance scheduled. """
params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') if max_results: params['MaxResults'] = max_results if next_token: params['NextToken'] = next_token if filters: self.build_filter_params(params, filters) return self.get_object('DescribeInstanceStatus', params, InstanceStatusSet, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_instances(self, image_id, min_count=1, max_count=1, key_name=None, security_groups=None, user_data=None, addressing_type=None, instance_type='m1.small', placement=None, kernel_id=None, ramdisk_id=None, monitoring_enabled=False, subnet_id=None, block_device_map=None, disable_api_termination=False, instance_initiated_shutdown_behavior=None, private_ip_address=None, placement_group=None, client_token=None, security_group_ids=None): """ Runs an image on EC2. :type image_id: string :param image_id: The ID of the image to run :type min_count: int :param min_count: The minimum number of instances to launch :type max_count: int :param max_count: The maximum number of instances to launch :type key_name: string :param key_name: The name of the key pair with which to launch instances :type security_groups: list of strings :param security_groups: The names of the security groups with which to associate instances :type user_data: string :param user_data: The user data passed to the launched instances :type instance_type: string :param instance_type: The type of instance to run: * m1.small * m1.large * m1.xlarge * c1.medium * c1.xlarge * m2.xlarge * m2.2xlarge * m2.4xlarge * cc1.4xlarge * t1.micro :type placement: string :param placement: The availability zone in which to launch the instances :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type ramdisk_id: string :param ramdisk_id: The ID of the RAM disk with which to launch the instances :type monitoring_enabled: bool :param monitoring_enabled: Enable CloudWatch monitoring on the instance. :type subnet_id: string :param subnet_id: The subnet ID within which to launch the instances for VPC. :type private_ip_address: string :param private_ip_address: If you're using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :type disable_api_termination: bool :param disable_api_termination: If True, the instances will be locked and will not be able to be terminated via the API. :type instance_initiated_shutdown_behavior: string :param instance_initiated_shutdown_behavior: Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: * stop * terminate :type placement_group: string :param placement_group: If specified, this is the name of the placement group in which the instance(s) will be launched. :type client_token: string :param client_token: Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters :rtype: Reservation :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines :type security_group_ids: list of strings :param security_group_ids: The ID of the VPC security groups with which to associate instances """
params = {'ImageId':image_id, 'MinCount':min_count, 'MaxCount': max_count} if key_name: params['KeyName'] = key_name if security_group_ids: l = [] for group in security_group_ids: if isinstance(group, SecurityGroup): l.append(group.id) else: l.append(group) self.build_list_params(params, l, 'SecurityGroupId') if security_groups: l = [] for group in security_groups: if isinstance(group, SecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'SecurityGroup') if user_data: params['UserData'] = base64.b64encode(user_data) if addressing_type: params['AddressingType'] = addressing_type if instance_type: params['InstanceType'] = instance_type if placement: params['Placement.AvailabilityZone'] = placement if placement_group: params['Placement.GroupName'] = placement_group if kernel_id: params['KernelId'] = kernel_id if ramdisk_id: params['RamdiskId'] = ramdisk_id if monitoring_enabled: params['Monitoring.Enabled'] = 'true' if subnet_id: params['SubnetId'] = subnet_id if private_ip_address: params['PrivateIpAddress'] = private_ip_address if block_device_map: block_device_map.build_list_params(params) if disable_api_termination: params['DisableApiTermination'] = 'true' if instance_initiated_shutdown_behavior: val = instance_initiated_shutdown_behavior params['InstanceInitiatedShutdownBehavior'] = val if client_token: params['ClientToken'] = client_token return self.get_object('RunInstances', params, Reservation, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate_instances(self, instance_ids=None): """ Terminate the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to terminate :rtype: list :return: A list of the instances terminated """
params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('TerminateInstances', params, [('item', Instance)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_instances(self, instance_ids=None, force=False): """ Stop the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to stop :type force: bool :param force: Forces the instance to stop :rtype: list :return: A list of the instances stopped """
params = {} if force: params['Force'] = 'true' if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('StopInstances', params, [('item', Instance)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_instances(self, instance_ids=None): """ Start the instances specified :type instance_ids: list :param instance_ids: A list of strings of the Instance IDs to start :rtype: list :return: A list of the instances started """
params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('StartInstances', params, [('item', Instance)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_console_output(self, instance_id): """ Retrieves the console output for the specified instance. :type instance_id: string :param instance_id: The instance ID of a running instance on the cloud. :rtype: :class:`boto.ec2.instance.ConsoleOutput` :return: The console output as a ConsoleOutput object """
params = {} self.build_list_params(params, [instance_id], 'InstanceId') return self.get_object('GetConsoleOutput', params, ConsoleOutput, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reboot_instances(self, instance_ids=None): """ Reboot the specified instances. :type instance_ids: list :param instance_ids: The instances to terminate and reboot """
params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceId') return self.get_status('RebootInstances', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_instance_attribute(self, instance_id, attribute): """ Gets an attribute from an instance. :type instance_id: string :param instance_id: The Amazon id of the instance :type attribute: string :param attribute: The attribute you need information about Valid choices are: * instanceType|kernel|ramdisk|userData| * disableApiTermination| * instanceInitiatedShutdownBehavior| * rootDeviceName|blockDeviceMapping :rtype: :class:`boto.ec2.image.InstanceAttribute` :return: An InstanceAttribute object representing the value of the attribute requested """
params = {'InstanceId' : instance_id} if attribute: params['Attribute'] = attribute return self.get_object('DescribeInstanceAttribute', params, InstanceAttribute, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def modify_instance_attribute(self, instance_id, attribute, value): """ Changes an attribute of an instance :type instance_id: string :param instance_id: The instance id you wish to change :type attribute: string :param attribute: The attribute you wish to change. * AttributeName - Expected value (default) * instanceType - A valid instance type (m1.small) * kernel - Kernel ID (None) * ramdisk - Ramdisk ID (None) * userData - Base64 encoded String (None) * disableApiTermination - Boolean (true) * instanceInitiatedShutdownBehavior - stop|terminate * rootDeviceName - device name (None) :type value: string :param value: The new value for the attribute :rtype: bool :return: Whether the operation succeeded or not """
# Allow a bool to be passed in for value of disableApiTermination if attribute == 'disableApiTermination': if isinstance(value, bool): if value: value = 'true' else: value = 'false' params = {'InstanceId' : instance_id, 'Attribute' : attribute, 'Value' : value} return self.get_status('ModifyInstanceAttribute', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_instance_attribute(self, instance_id, attribute): """ Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not """
params = {'InstanceId' : instance_id, 'Attribute' : attribute} return self.get_status('ResetInstanceAttribute', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_spot_instance_requests(self, request_ids=None, filters=None): """ Retrieve all the spot instances requests associated with your account. :type request_ids: list :param request_ids: A list of strings of spot instance request IDs :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` """
params = {} if request_ids: self.build_list_params(params, request_ids, 'SpotInstanceRequestId') if filters: if 'launch.group-id' in filters: lgid = filters.get('launch.group-id') if not lgid.startswith('sg-') or len(lgid) != 11: warnings.warn( "The 'launch.group-id' filter now requires a security " "group id (sg-*) and no longer supports filtering by " "group name. Please update your filters accordingly.", UserWarning) self.build_filter_params(params, filters) return self.get_list('DescribeSpotInstanceRequests', params, [('item', SpotInstanceRequest)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_spot_price_history(self, start_time=None, end_time=None, instance_type=None, product_description=None, availability_zone=None): """ Retrieve the recent history of spot instances pricing. :type start_time: str :param start_time: An indication of how far back to provide price changes for. An ISO8601 DateTime string. :type end_time: str :param end_time: An indication of how far forward to provide price changes for. An ISO8601 DateTime string. :type instance_type: str :param instance_type: Filter responses to a particular instance type. :type product_description: str :param product_description: Filter responses to a particular platform. Valid values are currently: "Linux/UNIX", "SUSE Linux", and "Windows" :type availability_zone: str :param availability_zone: The availability zone for which prices should be returned :rtype: list :return: A list tuples containing price and timestamp. """
params = {} if start_time: params['StartTime'] = start_time if end_time: params['EndTime'] = end_time if instance_type: params['InstanceType'] = instance_type if product_description: params['ProductDescription'] = product_description if availability_zone: params['AvailabilityZone'] = availability_zone return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def request_spot_instances(self, price, image_id, count=1, type='one-time', valid_from=None, valid_until=None, launch_group=None, availability_zone_group=None, key_name=None, security_groups=None, user_data=None, addressing_type=None, instance_type='m1.small', placement=None, kernel_id=None, ramdisk_id=None, monitoring_enabled=False, subnet_id=None, block_device_map=None): """ Request instances on the spot market at a particular price. :type price: str :param price: The maximum price of your bid :type image_id: string :param image_id: The ID of the image to run :type count: int :param count: The of instances to requested :type type: str :param type: Type of request. Can be 'one-time' or 'persistent'. Default is one-time. :type valid_from: str :param valid_from: Start date of the request. An ISO8601 time string. :type valid_until: str :param valid_until: End date of the request. An ISO8601 time string. :type launch_group: str :param launch_group: If supplied, all requests will be fulfilled as a group. :type availability_zone_group: str :param availability_zone_group: If supplied, all requests will be fulfilled within a single availability zone. :type key_name: string :param key_name: The name of the key pair with which to launch instances :type security_groups: list of strings :param security_groups: The names of the security groups with which to associate instances :type user_data: string :param user_data: The user data passed to the launched instances :type instance_type: string :param instance_type: The type of instance to run: * m1.small * m1.large * m1.xlarge * c1.medium * c1.xlarge * m2.xlarge * m2.2xlarge * m2.4xlarge * cc1.4xlarge * t1.micro :type placement: string :param placement: The availability zone in which to launch the instances :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type ramdisk_id: string :param ramdisk_id: The ID of the RAM disk with which to launch the instances :type monitoring_enabled: bool :param monitoring_enabled: Enable CloudWatch monitoring on the instance. :type subnet_id: string :param subnet_id: The subnet ID within which to launch the instances for VPC. :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :rtype: Reservation :return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` associated with the request for machines """
params = {'LaunchSpecification.ImageId':image_id, 'Type' : type, 'SpotPrice' : price} if count: params['InstanceCount'] = count if valid_from: params['ValidFrom'] = valid_from if valid_until: params['ValidUntil'] = valid_until if launch_group: params['LaunchGroup'] = launch_group if availability_zone_group: params['AvailabilityZoneGroup'] = availability_zone_group if key_name: params['LaunchSpecification.KeyName'] = key_name if security_groups: l = [] for group in security_groups: if isinstance(group, SecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'LaunchSpecification.SecurityGroup') if user_data: params['LaunchSpecification.UserData'] = base64.b64encode(user_data) if addressing_type: params['LaunchSpecification.AddressingType'] = addressing_type if instance_type: params['LaunchSpecification.InstanceType'] = instance_type if placement: params['LaunchSpecification.Placement.AvailabilityZone'] = placement if kernel_id: params['LaunchSpecification.KernelId'] = kernel_id if ramdisk_id: params['LaunchSpecification.RamdiskId'] = ramdisk_id if monitoring_enabled: params['LaunchSpecification.Monitoring.Enabled'] = 'true' if subnet_id: params['LaunchSpecification.SubnetId'] = subnet_id if block_device_map: block_device_map.build_list_params(params, 'LaunchSpecification.') return self.get_list('RequestSpotInstances', params, [('item', SpotInstanceRequest)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cancel_spot_instance_requests(self, request_ids): """ Cancel the specified Spot Instance Requests. :type request_ids: list :param request_ids: A list of strings of the Request IDs to terminate :rtype: list :return: A list of the instances terminated """
params = {} if request_ids: self.build_list_params(params, request_ids, 'SpotInstanceRequestId') return self.get_list('CancelSpotInstanceRequests', params, [('item', Instance)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_spot_datafeed_subscription(self, bucket, prefix): """ Create a spot instance datafeed subscription for this account. :type bucket: str or unicode :param bucket: The name of the bucket where spot instance data will be written. The account issuing this request must have FULL_CONTROL access to the bucket specified in the request. :type prefix: str or unicode :param prefix: An optional prefix that will be pre-pended to all data files written to the bucket. :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` :return: The datafeed subscription object or None """
params = {'Bucket' : bucket} if prefix: params['Prefix'] = prefix return self.get_object('CreateSpotDatafeedSubscription', params, SpotDatafeedSubscription, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_zones(self, zones=None, filters=None): """ Get all Availability Zones associated with the current region. :type zones: list :param zones: Optional list of zones. If this list is present, only the Zones associated with these zone names will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.zone.Zone` :return: The requested Zone objects """
params = {} if zones: self.build_list_params(params, zones, 'ZoneName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def associate_address(self, instance_id, public_ip=None, allocation_id=None): """ Associate an Elastic IP address with a currently running instance. This requires one of ``public_ip`` or ``allocation_id`` depending on if you're associating a VPC address or a plain EC2 address. :type instance_id: string :param instance_id: The ID of the instance :type public_ip: string :param public_ip: The public IP address for EC2 based allocations. :type allocation_id: string :param allocation_id: The allocation ID for a VPC-based elastic IP. :rtype: bool :return: True if successful """
params = { 'InstanceId' : instance_id } if public_ip is not None: params['PublicIp'] = public_ip elif allocation_id is not None: params['AllocationId'] = allocation_id return self.get_status('AssociateAddress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disassociate_address(self, public_ip=None, association_id=None): """ Disassociate an Elastic IP address from a currently running instance. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type association_id: string :param association_id: The association ID for a VPC based elastic ip. :rtype: bool :return: True if successful """
params = {} if public_ip is not None: params['PublicIp'] = public_ip elif association_id is not None: params['AssociationId'] = association_id return self.get_status('DisassociateAddress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def release_address(self, public_ip=None, allocation_id=None): """ Free up an Elastic IP address. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type allocation_id: string :param allocation_id: The ID for VPC elastic IPs. :rtype: bool :return: True if successful """
params = {} if public_ip is not None: params['PublicIp'] = public_ip elif allocation_id is not None: params['AllocationId'] = allocation_id return self.get_status('ReleaseAddress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_volumes(self, volume_ids=None, filters=None): """ Get all Volumes associated with the current credentials. :type volume_ids: list :param volume_ids: Optional list of volume ids. If this list is present, only the volumes associated with these volume ids will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.volume.Volume` :return: The requested Volume objects """
params = {} if volume_ids: self.build_list_params(params, volume_ids, 'VolumeId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeVolumes', params, [('item', Volume)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_volume(self, size, zone, snapshot=None): """ Create a new EBS Volume. :type size: int :param size: The size of the new volume, in GiB :type zone: string or :class:`boto.ec2.zone.Zone` :param zone: The availability zone in which the Volume will be created. :type snapshot: string or :class:`boto.ec2.snapshot.Snapshot` :param snapshot: The snapshot from which the new Volume will be created. """
if isinstance(zone, Zone): zone = zone.name params = {'AvailabilityZone' : zone} if size: params['Size'] = size if snapshot: if isinstance(snapshot, Snapshot): snapshot = snapshot.id params['SnapshotId'] = snapshot return self.get_object('CreateVolume', params, Volume, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach_volume(self, volume_id, instance_id, device): """ Attach an EBS volume to an EC2 instance. :type volume_id: str :param volume_id: The ID of the EBS volume to be attached. :type instance_id: str :param instance_id: The ID of the EC2 instance to which it will be attached. :type device: str :param device: The device on the instance through which the volume will be exposted (e.g. /dev/sdh) :rtype: bool :return: True if successful """
params = {'InstanceId' : instance_id, 'VolumeId' : volume_id, 'Device' : device} return self.get_status('AttachVolume', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_snapshots(self, snapshot_ids=None, owner=None, restorable_by=None, filters=None): """ Get all EBS Snapshots associated with the current credentials. :type snapshot_ids: list :param snapshot_ids: Optional list of snapshot ids. If this list is present, only the Snapshots associated with these snapshot ids will be returned. :type owner: str :param owner: If present, only the snapshots owned by the specified user will be returned. Valid values are: * self * amazon * AWS Account ID :type restorable_by: str :param restorable_by: If present, only the snapshots that are restorable by the specified account id will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list of :class:`boto.ec2.snapshot.Snapshot` :return: The requested Snapshot objects """
params = {} if snapshot_ids: self.build_list_params(params, snapshot_ids, 'SnapshotId') if owner: params['Owner'] = owner if restorable_by: params['RestorableBy'] = restorable_by if filters: self.build_filter_params(params, filters) return self.get_list('DescribeSnapshots', params, [('item', Snapshot)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_snapshot(self, volume_id, description=None): """ Create a snapshot of an existing EBS Volume. :type volume_id: str :param volume_id: The ID of the volume to be snapshot'ed :type description: str :param description: A description of the snapshot. Limited to 255 characters. :rtype: bool :return: True if successful """
params = {'VolumeId' : volume_id} if description: params['Description'] = description[0:255] snapshot = self.get_object('CreateSnapshot', params, Snapshot, verb='POST') volume = self.get_all_volumes([volume_id])[0] volume_name = volume.tags.get('Name') if volume_name: snapshot.add_tag('Name', volume_name) return snapshot
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): """ Get information about an attribute of a snapshot. Only one attribute can be specified per call. :type snapshot_id: str :param snapshot_id: The ID of the snapshot. :type attribute: str :param attribute: The requested attribute. Valid values are: * createVolumePermission :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute` :return: The requested Snapshot attribute """
params = {'Attribute' : attribute} if snapshot_id: params['SnapshotId'] = snapshot_id return self.get_object('DescribeSnapshotAttribute', params, SnapshotAttribute, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): """ Resets an attribute of a snapshot to its default value. :type snapshot_id: string :param snapshot_id: ID of the snapshot :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """
params = {'SnapshotId' : snapshot_id, 'Attribute' : attribute} return self.get_status('ResetSnapshotAttribute', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_key_pairs(self, keynames=None, filters=None): """ Get all key pairs associated with your account. :type keynames: list :param keynames: A list of the names of keypairs to retrieve. If not provided, all key pairs will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.keypair.KeyPair` """
params = {} if keynames: self.build_list_params(params, keynames, 'KeyName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_key_pair(self, key_name): """ Create a new key pair for your account. This will create the key pair within the region you are currently connected to. :type key_name: string :param key_name: The name of the new keypair :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """
params = {'KeyName':key_name} return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_key_pair(self, key_name, public_key_material): """ mports the public key from an RSA key pair that you created with a third-party tool. Supported formats: * OpenSSH public key format (e.g., the format in ~/.ssh/authorized_keys) * Base64 encoded DER format * SSH public key file format as specified in RFC4716 DSA keys are not supported. Make sure your key generator is set up to create RSA keys. Supported lengths: 1024, 2048, and 4096. :type key_name: string :param key_name: The name of the new keypair :type public_key_material: string :param public_key_material: The public key. You must base64 encode the public key material before sending it to AWS. :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """
public_key_material = base64.b64encode(public_key_material) params = {'KeyName' : key_name, 'PublicKeyMaterial' : public_key_material} return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_security_group(self, name=None, group_id=None): """ Delete a security group from your account. :type name: string :param name: The name of the security group to delete. :type group_id: string :param group_id: The ID of the security group to delete within a VPC. :rtype: bool :return: True if successful. """
params = {} if name is not None: params['GroupName'] = name elif group_id is not None: params['GroupId'] = group_id return self.get_status('DeleteSecurityGroup', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authorize_security_group(self, group_name=None, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, group_id=None, src_security_group_group_id=None): """ Add a new rule to an existing security group. You need to pass in either src_security_group_name and src_security_group_owner_id OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are authorizing another group or you are authorizing some ip-based rule. :type group_name: string :param group_name: The name of the security group you are adding the rule to. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are granting access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are granting access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are enabling :type to_port: int :param to_port: The ending port number you are enabling :type cidr_ip: string :param cidr_ip: The CIDR block you are providing access to. See http://goo.gl/Yj5QC :type group_id: string :param group_id: ID of the EC2 or VPC security group to modify. This is required for VPC security groups and can be used instead of group_name for EC2 security groups. :type group_id: string :param group_id: ID of the EC2 or VPC source security group. This is required for VPC security groups and can be used instead of group_name for EC2 security groups. :rtype: bool :return: True if successful. """
if src_security_group_name: if from_port is None and to_port is None and ip_protocol is None: return self.authorize_security_group_deprecated( group_name, src_security_group_name, src_security_group_owner_id) params = {} if group_name: params['GroupName'] = group_name if group_id: params['GroupId'] = group_id if src_security_group_name: param_name = 'IpPermissions.1.Groups.1.GroupName' params[param_name] = src_security_group_name if src_security_group_owner_id: param_name = 'IpPermissions.1.Groups.1.UserId' params[param_name] = src_security_group_owner_id if src_security_group_group_id: param_name = 'IpPermissions.1.Groups.1.GroupId' params[param_name] = src_security_group_group_id if ip_protocol: params['IpPermissions.1.IpProtocol'] = ip_protocol if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if cidr_ip: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('AuthorizeSecurityGroupIngress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authorize_security_group_egress(self, group_id, ip_protocol, from_port=None, to_port=None, src_group_id=None, cidr_ip=None): """ The action adds one or more egress rules to a VPC security group. Specifically, this action permits instances in a security group to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups in the same VPC. """
params = { 'GroupId': group_id, 'IpPermissions.1.IpProtocol': ip_protocol } if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if src_group_id is not None: params['IpPermissions.1.Groups.1.GroupId'] = src_group_id if cidr_ip is not None: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('AuthorizeSecurityGroupEgress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def revoke_security_group(self, group_name=None, src_security_group_name=None, src_security_group_owner_id=None, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, group_id=None, src_security_group_group_id=None): """ Remove an existing rule from an existing security group. You need to pass in either src_security_group_name and src_security_group_owner_id OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are revoking another group or you are revoking some ip-based rule. :type group_name: string :param group_name: The name of the security group you are removing the rule from. :type src_security_group_name: string :param src_security_group_name: The name of the security group you are revoking access to. :type src_security_group_owner_id: string :param src_security_group_owner_id: The ID of the owner of the security group you are revoking access to. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are disabling :type to_port: int :param to_port: The ending port number you are disabling :type cidr_ip: string :param cidr_ip: The CIDR block you are revoking access to. See http://goo.gl/Yj5QC :rtype: bool :return: True if successful. """
if src_security_group_name: if from_port is None and to_port is None and ip_protocol is None: return self.revoke_security_group_deprecated( group_name, src_security_group_name, src_security_group_owner_id) params = {} if group_name is not None: params['GroupName'] = group_name if group_id is not None: params['GroupId'] = group_id if src_security_group_name: param_name = 'IpPermissions.1.Groups.1.GroupName' params[param_name] = src_security_group_name if src_security_group_group_id: param_name = 'IpPermissions.1.Groups.1.GroupId' params[param_name] = src_security_group_group_id if src_security_group_owner_id: param_name = 'IpPermissions.1.Groups.1.UserId' params[param_name] = src_security_group_owner_id if ip_protocol: params['IpPermissions.1.IpProtocol'] = ip_protocol if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if cidr_ip: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('RevokeSecurityGroupIngress', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_regions(self, region_names=None, filters=None): """ Get all available regions for the EC2 service. :type region_names: list of str :param region_names: Names of regions to limit output :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """
params = {} if region_names: self.build_list_params(params, region_names, 'RegionName') if filters: self.build_filter_params(params, filters) regions = self.get_list('DescribeRegions', params, [('item', RegionInfo)], verb='POST') for region in regions: region.connection_cls = EC2Connection return regions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def monitor_instances(self, instance_ids): """ Enable CloudWatch monitoring for the supplied instances. :type instance_id: list of strings :param instance_id: The instance ids :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """
params = {} self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('MonitorInstances', params, [('item', InstanceInfo)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unmonitor_instances(self, instance_ids): """ Disable CloudWatch monitoring for the supplied instance. :type instance_id: list of string :param instance_id: The instance id :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """
params = {} self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('UnmonitorInstances', params, [('item', InstanceInfo)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bundle_instance(self, instance_id, s3_bucket, s3_prefix, s3_upload_policy): """ Bundle Windows instance. :type instance_id: string :param instance_id: The instance id :type s3_bucket: string :param s3_bucket: The bucket in which the AMI should be stored. :type s3_prefix: string :param s3_prefix: The beginning of the file name for the AMI. :type s3_upload_policy: string :param s3_upload_policy: Base64 encoded policy that specifies condition and permissions for Amazon EC2 to upload the user's image into Amazon S3. """
params = {'InstanceId' : instance_id, 'Storage.S3.Bucket' : s3_bucket, 'Storage.S3.Prefix' : s3_prefix, 'Storage.S3.UploadPolicy' : s3_upload_policy} s3auth = boto.auth.get_auth_handler(None, boto.config, self.provider, ['s3']) params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id signature = s3auth.sign_string(s3_upload_policy) params['Storage.S3.UploadPolicySignature'] = signature return self.get_object('BundleInstance', params, BundleInstanceTask, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_bundle_tasks(self, bundle_ids=None, filters=None): """ Retrieve current bundling tasks. If no bundle id is specified, all tasks are retrieved. :type bundle_ids: list :param bundle_ids: A list of strings containing identifiers for previously created bundling tasks. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. """
params = {} if bundle_ids: self.build_list_params(params, bundle_ids, 'BundleId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeBundleTasks', params, [('item', BundleInstanceTask)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cancel_bundle_task(self, bundle_id): """ Cancel a previously submitted bundle task :type bundle_id: string :param bundle_id: The identifier of the bundle task to cancel. """
params = {'BundleId' : bundle_id} return self.get_object('CancelBundleTask', params, BundleInstanceTask, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_password_data(self, instance_id): """ Get encrypted administrator password for a Windows instance. :type instance_id: string :param instance_id: The identifier of the instance to retrieve the password for. """
params = {'InstanceId' : instance_id} rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST') return rs.passwordData
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_placement_groups(self, groupnames=None, filters=None): """ Get all placement groups associated with your account in a region. :type groupnames: list :param groupnames: A list of the names of placement groups to retrieve. If not provided, all placement groups will be returned. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.placementgroup.PlacementGroup` """
params = {} if groupnames: self.build_list_params(params, groupnames, 'GroupName') if filters: self.build_filter_params(params, filters) return self.get_list('DescribePlacementGroups', params, [('item', PlacementGroup)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_placement_group(self, name, strategy='cluster'): """ Create a new placement group for your account. This will create the placement group within the region you are currently connected to. :type name: string :param name: The name of the new placement group :type strategy: string :param strategy: The placement strategy of the new placement group. Currently, the only acceptable value is "cluster". :rtype: bool :return: True if successful """
params = {'GroupName':name, 'Strategy':strategy} group = self.get_status('CreatePlacementGroup', params, verb='POST') return group
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_tags(self, filters=None): """ Retrieve all the metadata tags associated with your account. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: dict :return: A dictionary containing metadata tags """
params = {} if filters: self.build_filter_params(params, filters) return self.get_list('DescribeTags', params, [('item', Tag)], verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_network_interface(self, subnet_id, private_ip_address=None, description=None, groups=None): """ Creates a network interface in the specified subnet. :type subnet_id: str :param subnet_id: The ID of the subnet to associate with the network interface. :type private_ip_address: str :param private_ip_address: The private IP address of the network interface. If not supplied, one will be chosen for you. :type description: str :param description: The description of the network interface. :type groups: list :param groups: Lists the groups for use by the network interface. This can be either a list of group ID's or a list of :class:`boto.ec2.securitygroup.SecurityGroup` objects. :rtype: :class:`boto.ec2.networkinterface.NetworkInterface` :return: The newly created network interface. """
params = {'SubnetId' : subnet_id} if private_ip_address: params['PrivateIpAddress'] = private_ip_address if description: params['Description'] = description if groups: ids = [] for group in groups: if isinstance(group, SecurityGroup): ids.append(group.id) else: ids.append(group) self.build_list_params(params, ids, 'SecurityGroupId') return self.get_object('CreateNetworkInterface', params, NetworkInterface, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach_network_interface(self, network_interface_id, instance_id, device_index): """ Attaches a network interface to an instance. :type network_interface_id: str :param network_interface_id: The ID of the network interface to attach. :type instance_id: str :param instance_id: The ID of the instance that will be attached to the network interface. :type device_index: int :param device_index: The index of the device for the network interface attachment on the instance. """
params = {'NetworkInterfaceId' : network_interface_id, 'InstanceId' : instance_id, 'Deviceindex' : device_index} return self.get_status('AttachNetworkInterface', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detach_network_interface(self, network_interface_id, force=False): """ Detaches a network interface from an instance. :type network_interface_id: str :param network_interface_id: The ID of the network interface to detach. :type force: bool :param force: Set to true to force a detachment. """
params = {'NetworkInterfaceId' : network_interface_id} if force: params['Force'] = 'true' return self.get_status('DetachNetworkInterface', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_capacity(self, capacity): """ Set the desired capacity for the group. """
params = {'AutoScalingGroupName' : self.name, 'DesiredCapacity' : capacity} req = self.connection.get_object('SetDesiredCapacity', params, Request) self.connection.last_request = req return req
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shutdown_instances(self): """ Convenience method which shuts down all instances associated with this group. """
self.min_size = 0 self.max_size = 0 self.desired_capacity = 0 self.update()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, force_delete=False): """ Delete this auto-scaling group if no instances attached or no scaling activities in progress. """
return self.connection.delete_auto_scaling_group(self.name, force_delete)