text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search(self, term): """ Searches the PyPi repository for the given `term` and returns a dictionary of results. New in 2.1.5: returns a dictionary instead of list of tuples """
packages = {} results = self._execute_pip(['search', term], log=False) # Don't want to log searches for result in results.split(linesep): try: name, description = result.split(six.u(' - '), 1) except ValueError: # '-' not in result so unable to split into tuple; # this could be from a multi-line description continue else: name = name.strip() if len(name) == 0: continue packages[name] = description.split(six.u('<br'), 1)[0].strip() return packages
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_zones(self, zones): """ Enable availability zones to this Access Point. All zones must be in the same region as the Access Point. :type zones: string or List of strings :param zones: The name of the zone(s) to add. """
if isinstance(zones, str) or isinstance(zones, unicode): zones = [zones] new_zones = self.connection.enable_availability_zones(self.name, zones) self.availability_zones = new_zones
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disable_zones(self, zones): """ Disable availability zones from this Access Point. :type zones: string or List of strings :param zones: The name of the zone(s) to add. """
if isinstance(zones, str) or isinstance(zones, unicode): zones = [zones] new_zones = self.connection.disable_availability_zones(self.name, zones) self.availability_zones = new_zones
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_instances(self, instances): """ Adds instances to this load balancer. All instances must be in the same region as the load balancer. Adding endpoints that are already registered with the load balancer has no effect. :param list instances: List of instance IDs (strings) that you'd like to add to this load balancer. """
if isinstance(instances, str) or isinstance(instances, unicode): instances = [instances] new_instances = self.connection.register_instances(self.name, instances) self.instances = new_instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frameclass(cls): """Register cls as a class representing an ID3 frame. Sets cls.frameid and cls._version if not present, and registers the new frame in Tag's known_frames dictionary. To be used as a decorator on the class definition: @frameclass class UFID(Frame): _framespec = (NullTerminatedStringSpec("owner"), BinaryDataSpec("data")) """
assert issubclass(cls, Frames.Frame) # Register v2.2 versions of v2.3/v2.4 frames if encoded by inheritance. if len(cls.__name__) == 3: base = cls.__bases__[0] if issubclass(base, Frames.Frame) and base._in_version(3, 4): assert not hasattr(base, "_v2_frame") base._v2_frame = cls # Override frameid from base with v2.2 name if base.frameid == cls.frameid: cls.frameid = cls.__name__ # Add frameid. if not hasattr(cls, "frameid"): cls.frameid = cls.__name__ assert Tag._is_frame_id(cls.frameid) # Supply _version attribute if missing. if len(cls.frameid) == 3: cls._version = 2 if len(cls.frameid) == 4 and not cls._version: cls._version = (3, 4) # Register cls as a known frame. assert cls.frameid not in Tag.known_frames Tag.known_frames[cls.frameid] = cls return cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def key(self, frame): "Return the sort key for the given frame." def keytuple(primary): if frame.frameno is None: return (primary, 1) return (primary, 0, frame.frameno) # Look up frame by exact match if type(frame) in self.frame_keys: return keytuple(self.frame_keys[type(frame)]) # Look up parent frame for v2.2 frames if frame._in_version(2) and type(frame).__bases__[0] in self.frame_keys: return keytuple(self.frame_keys[type(frame).__bases__[0]]) # Try each pattern for (pattern, key) in self.re_keys: if re.match(pattern, frame.frameid): return keytuple(key) return keytuple(self.unknown_key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frames(self, key=None, orig_order=False): """Returns a list of frames in this tag. If KEY is None, returns all frames in the tag; otherwise returns all frames whose frameid matches KEY. If ORIG_ORDER is True, then the frames are returned in their original order. Otherwise the frames are sorted in canonical order according to the frame_order field of this tag. """
if key is not None: # If there are multiple frames, then they are already in original order. key = self._normalize_key(key) if len(self._frames[key]) == 0: raise KeyError("Key not found: " + repr(key)) return self._frames[key] frames = [] for frameid in self._frames.keys(): for frame in self._frames[frameid]: frames.append(frame) if orig_order: key = (lambda frame: (0, frame.frameno) if frame.frameno is not None else (1,)) else: key = self.frame_order.key frames.sort(key=key) return frames
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(cls, filename, offset=0): """Read an ID3v2 tag from a file."""
i = 0 with fileutil.opened(filename, "rb") as file: file.seek(offset) tag = cls() tag._read_header(file) for (frameid, bflags, data) in tag._read_frames(file): if len(data) == 0: warn("{0}: Ignoring empty frame".format(frameid), EmptyFrameWarning) else: frame = tag._decode_frame(frameid, bflags, data, i) if frame is not None: l = tag._frames.setdefault(frame.frameid, []) l.append(frame) if file.tell() > tag.offset + tag.size: break i += 1 try: tag._filename = file.name except AttributeError: pass return tag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_email_grant(self, permission, email_address): """ Convenience method that provides a quick way to add an email grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google account to which you are granting the permission. """
acl = self.get_acl() acl.add_email_grant(permission, email_address) self.set_acl(acl)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_user_grant(self, permission, user_id): """ Convenience method that provides a quick way to add a canonical user grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type user_id: string :param user_id: The canonical user id associated with the GS account to which you are granting the permission. """
acl = self.get_acl() acl.add_user_grant(permission, user_id) self.set_acl(acl)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_group_email_grant(self, permission, email_address, headers=None): """ Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. """
acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_group_grant(self, permission, group_id): """ Convenience method that provides a quick way to add a canonical group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type group_id: string :param group_id: The canonical group id associated with the Google Groups account you are granting the permission to. """
acl = self.get_acl() acl.add_group_grant(permission, group_id) self.set_acl(acl)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). """
provider = self.bucket.connection.provider if res_upload_handler and size: # could use size instead of file_length if provided but... raise BotoClientError('"size" param not supported for resumable uploads.') headers = headers or {} if policy: headers[provider.acl_header] = policy if hasattr(fp, 'name'): self.path = fp.name if self.bucket != None: if not md5: # compute_md5() and also set self.size to actual # size of the bytes read computing the md5. md5 = self.compute_md5(fp, size) # adjust size if required size = self.size elif size: self.size = size else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): return if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_contents_from_string(self, s, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None): """ Store an object in S3 using the name of the Key object as the key in S3 and the string 's' as the contents. See set_contents_from_file method for details about the parameters. :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. """
if isinstance(s, unicode): s = s.encode("utf-8") fp = StringIO.StringIO(s) r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5) fp.close() return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_jobflow(self, jobflow_id): """ Describes a single Elastic MapReduce job flow :type jobflow_id: str :param jobflow_id: The job flow id of interest """
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id]) if jobflows: return jobflows[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_jobflows(self, states=None, jobflow_ids=None, created_after=None, created_before=None): """ Retrieve all the Elastic MapReduce job flows on your account :type states: list :param states: A list of strings with job flow states wanted :type jobflow_ids: list :param jobflow_ids: A list of job flow IDs :type created_after: datetime :param created_after: Bound on job flow creation time :type created_before: datetime :param created_before: Bound on job flow creation time """
params = {} if states: self.build_list_params(params, states, 'JobFlowStates.member') if jobflow_ids: self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') if created_after: params['CreatedAfter'] = created_after.strftime( boto.utils.ISO8601) if created_before: params['CreatedBefore'] = created_before.strftime( boto.utils.ISO8601) return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate_jobflows(self, jobflow_ids): """ Terminate an Elastic MapReduce job flow :type jobflow_ids: list :param jobflow_ids: A list of job flow IDs """
params = {} self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') return self.get_status('TerminateJobFlows', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_jobflow_steps(self, jobflow_id, steps): """ Adds steps to a jobflow :type jobflow_id: str :param jobflow_id: The job flow id :type steps: list(boto.emr.Step) :param steps: A list of steps to add to the job """
if type(steps) != types.ListType: steps = [steps] params = {} params['JobFlowId'] = jobflow_id # Step args step_args = [self._build_step_args(step) for step in steps] params.update(self._build_step_list(step_args)) return self.get_object( 'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_instance_groups(self, jobflow_id, instance_groups): """ Adds instance groups to a running cluster. :type jobflow_id: str :param jobflow_id: The id of the jobflow which will take the new instance groups :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: A list of instance groups to add to the job """
if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} params['JobFlowId'] = jobflow_id params.update(self._build_instance_group_list_args(instance_groups)) return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def modify_instance_groups(self, instance_group_ids, new_sizes): """ Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group """
if type(instance_group_ids) != types.ListType: instance_group_ids = [instance_group_ids] if type(new_sizes) != types.ListType: new_sizes = [new_sizes] instance_groups = zip(instance_group_ids, new_sizes) params = {} for k, ig in enumerate(instance_groups): # could be wrong - the example amazon gives uses # InstanceRequestCount, while the api documentation # says InstanceCount params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_termination_protection(self, jobflow_id, termination_protection_status): """ Set termination protection on specified Elastic MapReduce job flows :type jobflow_ids: list or str :param jobflow_ids: A list of job flow IDs :type termination_protection_status: bool :param termination_protection_status: Termination protection status """
assert termination_protection_status in (True, False) params = {} params['TerminationProtected'] = (termination_protection_status and "true") or "false" self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') return self.get_status('SetTerminationProtection', params, verb='POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_instance_group_args(self, instance_group): """ Takes an InstanceGroup; returns a dict that, when its keys are properly prefixed, can be used for describing InstanceGroups in RunJobFlow or AddInstanceGroups requests. """
params = { 'InstanceCount' : instance_group.num_instances, 'InstanceRole' : instance_group.role, 'InstanceType' : instance_group.type, 'Name' : instance_group.name, 'Market' : instance_group.market } if instance_group.market == 'SPOT': params['BidPrice'] = instance_group.bidprice return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_instance_group_list_args(self, instance_groups): """ Takes a list of InstanceGroups, or a single InstanceGroup. Returns a comparable dict for use in making a RunJobFlow or AddInstanceGroups request. """
if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} for i, instance_group in enumerate(instance_groups): ig_dict = self._build_instance_group_args(instance_group) for key, value in ig_dict.iteritems(): params['InstanceGroups.member.%d.%s' % (i+1, key)] = value return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_hosted_zones(self, start_marker=None, zone_list=None): """ Returns a Python data structure with information about all Hosted Zones defined for the AWS account. :param int start_marker: start marker to pass when fetching additional results after a truncated list :param list zone_list: a HostedZones list to prepend to results """
params = {} if start_marker: params = {'marker': start_marker} response = self.make_request('GET', '/%s/hostedzone' % self.Version, params=params) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element(list_marker='HostedZones', item_marker=('HostedZone',)) h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) if zone_list: e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) while e['ListHostedZonesResponse'].has_key('NextMarker'): next_marker = e['ListHostedZonesResponse']['NextMarker'] zone_list = e['ListHostedZonesResponse']['HostedZones'] e = self.get_all_hosted_zones(next_marker, zone_list) return e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_change(self, change_id): """ Get information about a proposed set of changes, as submitted by the change_rrsets method. Returns a Python data structure with status information about the changes. :type change_id: str :param change_id: The unique identifier for the set of changes. This ID is returned in the response to the change_rrsets method. """
uri = '/%s/change/%s' % (self.Version, change_id) response = self.make_request('GET', uri) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_best_instruction(addr, cpu_name, meta=None): """Given an instruction and meta information this attempts to find the best instruction for the frame. In some circumstances we can fix it up a bit to improve the accuracy. For more information see `symbolize_frame`. """
addr = rv = parse_addr(addr) # In case we're not on the crashing frame we apply a simple heuristic: # since we're most likely dealing with return addresses we just assume # that the call is one instruction behind the current one. if not meta or meta.get('frame_number') != 0: rv = get_previous_instruction(addr, cpu_name) # In case registers are available we can check if the PC register # does not match the given address we have from the first frame. # If that is the case and we got one of a few signals taht are likely # it seems that going with one instruction back is actually the # correct thing to do. else: regs = meta.get('registers') ip = get_ip_register(regs, cpu_name) if ip is not None and ip != addr and \ meta.get('signal') in (SIGILL, SIGBUS, SIGSEGV): rv = get_previous_instruction(addr, cpu_name) # Don't ask me why we do this, but apparently on arm we get better # hits if we look at the end of an instruction in the DWARF file than # the beginning. return round_to_instruction_end(rv, cpu_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self, prefix='', delimiter='', marker='', headers=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. The Key objects returned by the iterator are obtained by parsing the results of a GET on the bucket, also known as the List Objects request. The XML returned by this request contains only a subset of the information about each key. Certain metadata fields such as Content-Type and user metadata are not available in the XML. Therefore, if you want these additional metadata fields you will have to do a HEAD request on the Key in the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """
return BucketListResultSet(self, prefix, delimiter, marker, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_key(self, new_key_name, src_bucket_name, src_key_name, metadata=None, src_version_id=None, storage_class='STANDARD', preserve_acl=False, encrypt_key=False, headers=None, query_args=None): """ Create a new key in the bucket by copying another existing key. :type new_key_name: string :param new_key_name: The name of the new key :type src_bucket_name: string :param src_bucket_name: The name of the source bucket :type src_key_name: string :param src_key_name: The name of the source key :type src_version_id: string :param src_version_id: The version id for the key. This param is optional. If not specified, the newest version of the key will be copied. :type metadata: dict :param metadata: Metadata to be associated with new key. If metadata is supplied, it will replace the metadata of the source key being copied. If no metadata is supplied, the source key's metadata will be copied to the new key. :type storage_class: string :param storage_class: The storage class of the new key. By default, the new key will use the standard storage class. Possible values are: STANDARD | REDUCED_REDUNDANCY :type preserve_acl: bool :param preserve_acl: If True, the ACL from the source key will be copied to the destination key. If False, the destination key will have the default ACL. Note that preserving the ACL in the new key object will require two additional API calls to S3, one to retrieve the current ACL and one to set that ACL on the new object. If you don't care about the ACL, a value of False will be significantly more efficient. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type headers: dict :param headers: A dictionary of header name/value pairs. :type query_args: string :param query_args: A string of additional querystring arguments to append to the request :rtype: :class:`boto.s3.key.Key` or subclass :returns: An instance of the newly created key object """
headers = headers or {} provider = self.connection.provider src_key_name = boto.utils.get_utf8_value(src_key_name) if preserve_acl: if self.name == src_bucket_name: src_bucket = self else: src_bucket = self.connection.get_bucket(src_bucket_name) acl = src_bucket.get_xml_acl(src_key_name) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) if src_version_id: src += '?versionId=%s' % src_version_id headers[provider.copy_source_header] = str(src) # make sure storage_class_header key exists before accessing it if provider.storage_class_header and storage_class: headers[provider.storage_class_header] = storage_class if metadata: headers[provider.metadata_directive_header] = 'REPLACE' headers = boto.utils.merge_meta(headers, metadata, provider) elif not query_args: # Can't use this header with multi-part copy. headers[provider.metadata_directive_header] = 'COPY' response = self.connection.make_request('PUT', self.name, new_key_name, headers=headers, query_args=query_args) body = response.read() if response.status == 200: key = self.new_key(new_key_name) h = handler.XmlHandler(key, self) xml.sax.parseString(body, h) if hasattr(key, 'Error'): raise provider.storage_copy_error(key.Code, key.Message, body) key.handle_version_headers(response) if preserve_acl: self.set_xml_acl(acl, new_key_name) return key else: raise provider.storage_response_error(response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_subresource(self, subresource, value, key_name = '', headers=None, version_id=None): """ Set a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to set. :type value: string :param value: The value of the subresource. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. """
if not subresource: raise TypeError('set_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, data=value.encode('UTF-8'), query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subresource(self, subresource, key_name='', headers=None, version_id=None): """ Get a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to get. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. :rtype: string :returns: The value of the subresource. """
if not subresource: raise TypeError('get_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_location(self): """ Returns the LocationConstraint for the bucket. :rtype: str :return: The LocationConstraint for the bucket or the empty string if no constraint was specified when bucket was created. """
response = self.connection.make_request('GET', self.name, query_args='location') body = response.read() if response.status == 200: rs = ResultSet(self) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs.LocationConstraint else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_versioning_status(self, headers=None): """ Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended. """
response = self.connection.make_request('GET', self.name, query_args='versioning', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: d = {} ver = re.search(self.VersionRE, body) if ver: d['Versioning'] = ver.group(1) mfa = re.search(self.MFADeleteRE, body) if mfa: d['MfaDelete'] = mfa.group(1) return d else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_lifecycle_config(self, headers=None): """ Returns the current lifecycle configuration on the bucket. :rtype: :class:`boto.s3.lifecycle.Lifecycle` :returns: A LifecycleConfig object that describes all current lifecycle rules in effect for the bucket. """
response = self.connection.make_request('GET', self.name, query_args='lifecycle', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: lifecycle = Lifecycle(self) h = handler.XmlHandler(lifecycle, self) xml.sax.parseString(body, h) return lifecycle else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure_website(self, suffix, error_key='', headers=None): """ Configure this bucket to act as a website :type suffix: str :param suffix: Suffix that is appended to a request that is for a "directory" on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not be empty and must not include a slash character. :type error_key: str :param error_key: The object key name to use when a 4XX class error occurs. This is optional. """
if error_key: error_frag = self.WebsiteErrorFragment % error_key else: error_frag = '' body = self.WebsiteBody % (suffix, error_frag) response = self.connection.make_request('PUT', self.name, data=body, query_args='website', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_website_configuration(self, headers=None): """ Returns the current status of website configuration on the bucket. :rtype: dict :returns: A dictionary containing a Python representation of the XML response from S3. The overall structure is: * WebsiteConfiguration * IndexDocument * Suffix : suffix that is appended to request that is for a "directory" on the website endpoint * ErrorDocument * Key : name of object to serve when an error occurs """
response = self.connection.make_request('GET', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_website_configuration(self, headers=None): """ Removes all website configuration from the bucket. """
response = self.connection.make_request('DELETE', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_website_endpoint(self): """ Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not. """
l = [self.name] l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) l.append('.'.join(self.connection.host.split('.')[-2:])) return '.'.join(l)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_policy(self, policy, headers=None): """ Add or replace the JSON policy associated with the bucket. :type policy: str :param policy: The JSON policy as a string. """
response = self.connection.make_request('PUT', self.name, data=policy, query_args='policy', headers=headers) body = response.read() if response.status >= 200 and response.status <= 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initiate_multipart_upload(self, key_name, headers=None, reduced_redundancy=False, metadata=None, encrypt_key=False): """ Start a multipart upload operation. :type key_name: string :param key_name: The name of the key that will ultimately result from this multipart upload operation. This will be exactly as the key appears in the bucket after the upload process has been completed. :type headers: dict :param headers: Additional HTTP headers to send and store with the resulting key in S3. :type reduced_redundancy: boolean :param reduced_redundancy: In multipart uploads, the storage class is specified when initiating the upload, not when uploading individual parts. So if you want the resulting key to use the reduced redundancy storage class set this flag when you initiate the upload. :type metadata: dict :param metadata: Any metadata that you would like to set on the key that results from the multipart upload. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. """
query_args = 'uploads' provider = self.connection.provider if headers is None: headers = {} if reduced_redundancy: storage_class_header = provider.storage_class_header if storage_class_header: headers[storage_class_header] = 'REDUCED_REDUNDANCY' # TODO: what if the provider doesn't support reduced redundancy? # (see boto.s3.key.Key.set_contents_from_file) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' if metadata is None: metadata = {} headers = boto.utils.merge_meta(headers, metadata, self.connection.provider) response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: resp = MultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def complete_multipart_upload(self, key_name, upload_id, xml_body, headers=None): """ Complete a multipart upload operation. """
query_args = 'uploadId=%s' % upload_id if headers is None: headers = {} headers['Content-Type'] = 'text/xml' response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers, data=xml_body) contains_error = False body = response.read() # Some errors will be reported in the body of the response # even though the HTTP response code is 200. This check # does a quick and dirty peek in the body for an error element. if body.find('<Error>') > 0: contains_error = True boto.log.debug(body) if response.status == 200 and not contains_error: resp = CompleteMultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_http_connection(self, host, is_secure): """ Gets a connection from the pool for the named host. Returns None if there is no connection that can be reused. """
if is_secure: return AsyncHTTPSConnection(host, http_client=self._httpclient) else: return AsyncHTTPConnection(host, http_client=self._httpclient)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify(self, secret_key): """ Verifies the authenticity of a notification message. TODO: This is doing a form of authentication and this functionality should really be merged with the pluggable authentication mechanism at some point. """
verification_input = NotificationMessage.SERVICE_NAME verification_input += NotificationMessage.OPERATION_NAME verification_input += self.timestamp h = hmac.new(key=secret_key, digestmod=sha) h.update(verification_input) signature_calc = base64.b64encode(h.digest()) return self.signature == signature_calc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, validate=False): """ Update the image's state information by making a call to fetch the current image attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the image the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """
rs = self.connection.get_all_images([self.id]) if len(rs) > 0: img = rs[0] if img.id == self.id: self._update(img) elif validate: raise ValueError('%s is not a valid Image ID' % self.id) return self.state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_xml(self): """ Returns a string containing the XML version of the Lifecycle configuration as defined by S3. """
s = '<LifecycleConfiguration>' for rule in self: s += rule.to_xml() s += '</LifecycleConfiguration>' return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def download(request): '''Saves image from URL and returns ID for use with AJAX script''' f = FileUpload() f.title = request.GET['title'] or 'untitled' f.description = request.GET['description'] url = urllib.unquote(request.GET['photo']) file_content = urllib.urlopen(url).read() file_name = url.split('/')[-1] f.save_upload_file(file_name, file_content) f.save() return HttpResponse('%s' % (f.id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def slug(cls): """ Return slug suitable for accessing this view in a URLconf. """
slug = cls.__name__.lower() if slug.endswith('view'): slug = slug[:-4] return slug
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def link_text(cls): """ Return link text for this view. """
link = cls.__name__ if link.endswith('View'): link = link[:-4] return link
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, expected_value=None, return_values=None): """ Commits pending updates to Amazon DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name/value pairs before they were updated. Possible values are: None, 'ALL_OLD', 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. If 'ALL_NEW' is specified, then all the attributes of the new version of the item are returned. If 'UPDATED_NEW' is specified, the new versions of only the updated attributes are returned. """
return self.table.layer2.update_item(self, expected_value, return_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, expected_value=None, return_values=None): """ Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """
return self.table.layer2.delete_item(self, expected_value, return_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ Connect to an EC2 Api endpoint. Additional arguments are passed through to connect_ec2. :type url: string :param url: A url for the ec2 api endpoint to connect to :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """
from boto.ec2.regioninfo import RegionInfo purl = urlparse.urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name = purl.hostname, endpoint = purl.hostname) kwargs['aws_access_key_id']=aws_access_key_id kwargs['aws_secret_access_key']=aws_secret_access_key return(connect_ec2(**kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Walrus', is_secure=False, **kwargs): """ Connect to a Walrus service. :type host: string :param host: the host name or ip address of the Walrus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Walrus """
from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat # Check for values in boto config, if not supplied as args if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'walrus_host', None) return S3Connection(aws_access_key_id, aws_secret_access_key, host=host, port=port, path=path, calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, is_secure=False, **kwargs): """ Connect to the Internet Archive via their S3-like API. :type ia_access_key_id: string :param ia_access_key_id: Your IA Access Key ID. This will also look in your boto config file for an entry in the Credentials section called "ia_access_key_id" :type ia_secret_access_key: string :param ia_secret_access_key: Your IA Secret Access Key. This will also look in your boto config file for an entry in the Credentials section called "ia_secret_access_key" :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to the Internet Archive """
from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat access_key = config.get('Credentials', 'ia_access_key_id', ia_access_key_id) secret_key = config.get('Credentials', 'ia_secret_access_key', ia_secret_access_key) return S3Connection(access_key, secret_key, host='s3.us.archive.org', calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def storage_uri(uri_str, default_scheme='file', debug=0, validate=True, bucket_storage_uri_class=BucketStorageUri, suppress_consec_slashes=True): """ Instantiate a StorageUri from a URI string. :type uri_str: string :param uri_str: URI naming bucket + optional object. :type default_scheme: string :param default_scheme: default scheme for scheme-less URIs. :type debug: int :param debug: debug level to pass in to boto connection (range 0..2). :type validate: bool :param validate: whether to check for bucket name validity. :type bucket_storage_uri_class: BucketStorageUri interface. :param bucket_storage_uri_class: Allows mocking for unit tests. :param suppress_consec_slashes: If provided, controls whether consecutive slashes will be suppressed in key paths. We allow validate to be disabled to allow caller to implement bucket-level wildcarding (outside the boto library; see gsutil). :rtype: :class:`boto.StorageUri` subclass :return: StorageUri subclass for given URI. ``uri_str`` must be one of the following formats: * gs://bucket/name * s3://bucket/name * gs://bucket * s3://bucket * filename The last example uses the default scheme ('file', unless overridden) """
# Manually parse URI components instead of using urlparse.urlparse because # what we're calling URIs don't really fit the standard syntax for URIs # (the latter includes an optional host/net location part). end_scheme_idx = uri_str.find('://') if end_scheme_idx == -1: # Check for common error: user specifies gs:bucket instead # of gs://bucket. Some URI parsers allow this, but it can cause # confusion for callers, so we don't. if uri_str.find(':') != -1: raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str) scheme = default_scheme.lower() path = uri_str else: scheme = uri_str[0:end_scheme_idx].lower() path = uri_str[end_scheme_idx + 3:] if scheme not in ['file', 's3', 'gs']: raise InvalidUriError('Unrecognized scheme "%s"' % scheme) if scheme == 'file': # For file URIs we have no bucket name, and use the complete path # (minus 'file://') as the object name. is_stream = False if path == '-': is_stream = True return FileStorageUri(path, debug, is_stream) else: path_parts = path.split('/', 1) bucket_name = path_parts[0] if (validate and bucket_name and # Disallow buckets violating charset or not [3..255] chars total. (not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name) # Disallow buckets with individual DNS labels longer than 63. or re.search('[-_a-z0-9]{64}', bucket_name))): raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str) # If enabled, ensure the bucket name is valid, to avoid possibly # confusing other parts of the code. (For example if we didn't # catch bucket names containing ':', when a user tried to connect to # the server with that name they might get a confusing error about # non-integer port numbers.) object_name = '' if len(path_parts) > 1: object_name = path_parts[1] return bucket_storage_uri_class( scheme, bucket_name, object_name, debug, suppress_consec_slashes=suppress_consec_slashes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def storage_uri_for_key(key): """Returns a StorageUri for the given key. :type key: :class:`boto.s3.key.Key` or subclass :param key: URI naming bucket + optional object. """
if not isinstance(key, boto.s3.key.Key): raise InvalidUriError('Requested key (%s) is not a subclass of ' 'boto.s3.key.Key' % str(type(key))) prov_name = key.bucket.connection.provider.get_provider_name() uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name) return storage_uri(uri_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tags_users(self, id_): """ Get a particular user which are tagged based on the id_ """
return _get_request(_TAGS_USERS.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_startup(self, id_): """ Get startup based on id """
return _get_request(_STARTUP.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_startup_comments(self, id_): """ Retrieve the comments of a particular startup """
return _get_request(_STARTUP_C.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_startups_filtered_by(self, filter_='raising'): """ Get startups based on which companies are raising funding """
url = _STARTUP_RAISING.format(c_api=_C_API_BEGINNING, api=_API_VERSION, filter_=filter_, at=self.access_token) return _get_request(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_status_updates(self, startup_id): """ Get status updates of a startup """
return _get_request(_STATUS_U.format(c_api=_C_API_BEGINNING, api=_API_VERSION, startup_id=startup_id, at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_search_for_slugs(self, slug): """ Search for a particular slug """
return _get_request(_SLUG_SEARCH.format(c_api=_C_API_BEGINNING, api=_API_VERSION, slug=_format_query(slug), at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reviews(self, user_id): """ Get reviews for a particular user """
url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING, api=_API_VERSION, user_id=user_id, at=self.access_token) return _get_request(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_review_id(self, id_): """ Get a particular review id, independent from the user_id and startup_id """
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regions(): """ Get all available regions for the SDB service. :rtype: list :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances """
return [SDBRegionInfo(name='us-east-1', endpoint='sdb.amazonaws.com'), SDBRegionInfo(name='eu-west-1', endpoint='sdb.eu-west-1.amazonaws.com'), SDBRegionInfo(name='us-west-1', endpoint='sdb.us-west-1.amazonaws.com'), SDBRegionInfo(name='sa-east-1', endpoint='sdb.sa-east-1.amazonaws.com'), SDBRegionInfo(name='us-west-2', endpoint='sdb.us-west-2.amazonaws.com'), SDBRegionInfo(name='ap-northeast-1', endpoint='sdb.ap-northeast-1.amazonaws.com'), SDBRegionInfo(name='ap-southeast-1', endpoint='sdb.ap-southeast-1.amazonaws.com') ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_init_script(self, file, name): """ Add this file to the init.d directory """
f_path = os.path.join("/etc/init.d", name) f = open(f_path, "w") f.write(file) f.close() os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC) self.run("/usr/sbin/update-rc.d %s defaults" % name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_user(self, user): """ Create a user on the local system """
self.run("useradd -m %s" % user) usr = getpwnam(user) return usr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sync_object(src_obj, dest_repo, export_context='migrate', overwrite=False, show_progress=False, requires_auth=False, omit_checksums=False, verify=False): '''Copy an object from one repository to another using the Fedora export functionality. :param src_obj: source :class:`~eulfedora.models.DigitalObject` to be copied :param dest_repo: destination :class:`~eulfedora.server.Repository` where the object will be copied to :param export_context: Fedora export format to use, one of "migrate" or "archive"; migrate is generally faster, but requires access from destination repository to source and may result in checksum errors for some content; archive exports take longer to process (default: migrate) :param overwrite: if an object with the same pid is already present in the destination repository, it will be removed only if overwrite is set to true (default: false) :param show_progress: if True, displays a progress bar with content size, progress, speed, and ETA (only applicable to archive exports) :param requires_auth: content datastreams require authentication, and should have credentials patched in (currently only supported in archive-xml export mode) (default: False) :param omit_checksums: scrubs contentDigest -- aka checksums -- from datastreams; helpful for datastreams with Redirect (R) or External (E) contexts (default: False) :returns: result of Fedora ingest on the destination repository on success ''' # NOTE: currently exceptions are expected to be handled by the # calling method; see repo-cp script for an example # if overwrite is not requested, check first and bail out dest_obj = dest_repo.get_object(src_obj.pid) if not overwrite and dest_obj.exists: logger.info('%s exists in destination repo and no overwrite; skipping', src_obj.pid) return False if show_progress and progressbar: # calculate rough estimate of object size size_estimate = estimate_object_size(src_obj, archive=(export_context in ['archive', 'archive-xml'])) # create a new progress bar with current pid and size widgets = [src_obj.pid, ' Estimated size: %s // ' % humanize_file_size(size_estimate), 'Read: ', progressbar.widgets.DataSize(), ' ', progressbar.widgets.AdaptiveTransferSpeed(), ' ', '| Uploaded: ', progressbar.widgets.DataSize(value='upload'), ' // ', # FileTransferSpeed('upload'), currently no way to track upload speed... progressbar.widgets.Timer(), ' | ', progressbar.widgets.AdaptiveETA() ] class DownUpProgressBar(progressbar.ProgressBar): upload = 0 def data(self): data = super(DownUpProgressBar, self).data() data['upload'] = self.upload return data pbar = DownUpProgressBar(widgets=widgets, max_value=size_estimate) else: pbar = None # migrate export can simply be read and uploaded to dest fedora if export_context == 'migrate': response = src_obj.api.export(src_obj, context=export_context, stream=True) export_data = response.iter_content(4096*1024) # archive export needs additional processing to handle large binary content elif export_context in ['archive', 'archive-xml']: export = ArchiveExport(src_obj, dest_repo, progress_bar=pbar, requires_auth=requires_auth, xml_only=(export_context == 'archive-xml'), verify=verify) # NOTE: should be possible to pass BytesIO to be read, but that is failing export_data = export.object_data().getvalue() else: raise Exception('Unsupported export context %s', export_context) # wipe checksums from FOXML if flagged in options if omit_checksums: checksum_re = re.compile(b'<foxml:contentDigest.+?/>') try: # export data is either a string export_data = checksum_re.sub(b'', export_data) except TypeError: export_data = (re.sub(checksum_re, '', chunk) for chunk in export_data) if overwrite and dest_obj.exists: logger.debug('Overwriting %s in destination repository', src_obj.pid) dest_repo.purge_object(src_obj.pid) result = dest_repo.ingest(export_data) if pbar: pbar.finish() return force_text(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def binarycontent_sections(chunk): '''Split a chunk of data into sections by start and end binary content tags.''' # using string split because it is significantly faster than regex. # use common text of start and end tags to split the text # (i.e. without < or </ tag beginning) binary_content_tag = BINARY_CONTENT_START[1:] if binary_content_tag not in chunk: # if no tags are present, don't do any extra work yield chunk else: # split on common portion of foxml:binaryContent sections = chunk.split(binary_content_tag) for sec in sections: extra = b'' # check the end of the section to determine start/end tag if sec.endswith(b'</'): extra = sec[-2:] yield sec[:-2] elif sec.endswith(b'<'): extra = sec[-1:] yield sec[:-1] else: yield sec if extra: # yield the actual binary content tag # (delimiter removed by split, but needed for processing) yield b''.join([extra, binary_content_tag])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def estimate_object_size(obj, archive=True): '''Calculate a rough estimate of object size, based on the sizes of all versions of all datastreams. If archive is true, adjusts the size estimate of managed datastreams for base64 encoded data. ''' size_estimate = 250000 # initial rough estimate for foxml size for ds in obj.ds_list: dsobj = obj.getDatastreamObject(ds) for version in dsobj.history().versions: if archive and version.control_group == 'M': size_estimate += base64_size(version.size) else: size_estimate += version.size return size_estimate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def endswith_partial(text, partial_str): '''Check if the text ends with any partial version of the specified string.''' # at the end of the content # we don't care about complete overlap, so start checking # for matches without the last character test_str = partial_str[:-1] # look for progressively smaller segments while test_str: if text.endswith(test_str): return len(test_str) test_str = test_str[:-1] return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def object_data(self): '''Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls ''' self.foxml_buffer = io.BytesIO() if self.progress_bar: self.progress_bar.start() previous_section = None while True: try: section = self.get_next_section() except StopIteration: break if section == BINARY_CONTENT_START: self.within_file = True # get datastream info from the end of the section just before this one # (needed to provide size to upload request) dsinfo = self.get_datastream_info(previous_section) if dsinfo: logger.info('Found encoded datastream %(id)s (%(mimetype)s, size %(size)s, %(type)s %(digest)s)', dsinfo) else: # error if datastream info is not found, because either # size or version date is required to handle content raise Exception('Failed to find datastream information for %s from \n%s' \ % (self.obj.pid, previous_section)) if self.xml_only and not \ dsinfo['mimetype'] in ['text/xml', 'application/rdf+xml', 'application/xml']: # possibly other mimetypes also? try: dsid = dsinfo['id'].split('.')[0] except ValueError: # if dsid doesn't include a .# (for versioning), # use the id as is. dsid = dsinfo['id'] if self.url_credentials: # if url credentials are set, parse the base fedora api # url so they can be inserted at the right place parsed_url = urlparse(self.obj.api.base_url) # reassemble base url, adding in credentials base_url = ''.join([parsed_url.scheme, '://', self.url_credentials, parsed_url.netloc, parsed_url.path]) else: base_url = self.obj.api.base_url # versioned datastream dissemination url content_location = '%sobjects/%s/datastreams/%s/content?asOfDateTime=%s' % \ (base_url, self.obj.pid, dsid, dsinfo['created']) else: upload_args = {} if self.progress_bar: def upload_callback(monitor): self.progress_bar.upload = monitor.bytes_read upload_args = {'callback': upload_callback} # use upload id as content location content_location = self.dest_repo.api.upload(self.encoded_datastream(), size=int(dsinfo['size']), **upload_args) self.foxml_buffer.write(force_bytes('<foxml:contentLocation REF="%s" TYPE="URL"/>' \ % content_location)) elif section == BINARY_CONTENT_END: # should not occur here; this section will be processed by # encoded_datastream method self.within_file = False elif self.within_file: # should not occur here; this section will be pulled by # encoded_datastream method # binary content within a file - ignore here # (handled by encoded_datastream method) continue else: # not start or end of binary content, and not # within a file, so yield as is (e.g., datastream tags # between small files) self.foxml_buffer.write(section) previous_section = section return self.foxml_buffer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_md5(fp, buf_size=8192, size=None): """ Compute MD5 hash on passed file and return results in a tuple of values. :type fp: file :param fp: File pointer to the file to MD5 hash. The file pointer will be reset to its current location before the method returns. :type buf_size: integer :param buf_size: Number of bytes per read request. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where the file is being split inplace into different parts. Less bytes may be available. :rtype: tuple :return: A tuple containing the hex digest version of the MD5 hash as the first element, the base64 encoded version of the plain digest as the second element and the data size as the third element. """
m = md5() spos = fp.tell() if size and size < buf_size: s = fp.read(size) else: s = fp.read(buf_size) while s: m.update(s) if size: size -= len(s) if size <= 0: break if size and size < buf_size: s = fp.read(size) else: s = fp.read(buf_size) hex_md5 = m.hexdigest() base64md5 = base64.encodestring(m.digest()) if base64md5[-1] == '\n': base64md5 = base64md5[0:-1] # data_size based on bytes read. data_size = fp.tell() - spos fp.seek(spos) return (hex_md5, base64md5, data_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regions(): """ Get all available regions for the CloudWatch service. :rtype: list :return: A list of :class:`boto.RegionInfo` instances """
regions = [] for region_name in RegionData: region = RegionInfo(name=region_name, endpoint=RegionData[region_name], connection_cls=CloudWatchConnection) regions.append(region) return regions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_metric_statistics(self, period, start_time, end_time, metric_name, namespace, statistics, dimensions=None, unit=None): """ Get time-series data for one or more statistics of a given metric. :type period: integer :param period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. The default value is 60. :type start_time: datetime :param start_time: The time stamp to use for determining the first datapoint to return. The value specified is inclusive; results include datapoints with the time stamp specified. :type end_time: datetime :param end_time: The time stamp to use for determining the last datapoint to return. The value specified is exclusive; results will include datapoints up to the time stamp specified. :type metric_name: string :param metric_name: The metric name. :type namespace: string :param namespace: The metric's namespace. :type statistics: list :param statistics: A list of statistics names Valid values: Average | Sum | SampleCount | Maximum | Minimum :type dimensions: dict :param dimensions: A dictionary of dimension key/values where the key is the dimension name and the value is either a scalar value or an iterator of values to be associated with that dimension. :rtype: list """
params = {'Period' : period, 'MetricName' : metric_name, 'Namespace' : namespace, 'StartTime' : start_time.isoformat(), 'EndTime' : end_time.isoformat()} self.build_list_params(params, statistics, 'Statistics.member.%d') if dimensions: self.build_dimension_param(dimensions, params) return self.get_list('GetMetricStatistics', params, [('member', Datapoint)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_metrics(self, next_token=None, dimensions=None, metric_name=None, namespace=None): """ Returns a list of the valid metrics for which there is recorded data available. :type next_token: str :param next_token: A maximum of 500 metrics will be returned at one time. If more results are available, the ResultSet returned will contain a non-Null next_token attribute. Passing that token as a parameter to list_metrics will retrieve the next page of metrics. :type dimension: dict :param dimension_filters: A dictionary containing name/value pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension name that you want to filter on, a list of values to filter on or None if you want all metrics with that Dimension name. :type metric_name: str :param metric_name: The name of the Metric to filter against. If None, all Metric names will be returned. :type namespace: str :param namespace: A Metric namespace to filter against (e.g. AWS/EC2). If None, Metrics from all namespaces will be returned. """
params = {} if next_token: params['NextToken'] = next_token if dimensions: self.build_dimension_param(dimensions, params) if metric_name: params['MetricName'] = metric_name if namespace: params['Namespace'] = namespace return self.get_list('ListMetrics', params, [('member', Metric)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_metric_data(self, namespace, name, value=None, timestamp=None, unit=None, dimensions=None, statistics=None): """ Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. If a list is specified for some, but not all, of the arguments, the remaining arguments are repeated a corresponding number of times. :type namespace: str :param namespace: The namespace of the metric. :type name: str or list :param name: The name of the metric. :type value: float or list :param value: The value for the metric. :type timestamp: datetime or list :param timestamp: The time stamp used for the metric. If not specified, the default value is set to the time the metric data was received. :type unit: string or list :param unit: The unit of the metric. Valid Values: Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | Megabytes | Gigabytes | Terabytes | Bits | Kilobits | Megabits | Gigabits | Terabits | Percent | Count | Bytes/Second | Kilobytes/Second | Megabytes/Second | Gigabytes/Second | Terabytes/Second | Bits/Second | Kilobits/Second | Megabits/Second | Gigabits/Second | Terabits/Second | Count/Second | None :type dimensions: dict :param dimensions: Add extra name value pairs to associate with the metric, i.e.: {'name1': value1, 'name2': (value2, value3)} :type statistics: dict or list :param statistics: Use a statistic set instead of a value, for example:: {'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000} """
params = {'Namespace': namespace} self.build_put_params(params, name, value=value, timestamp=timestamp, unit=unit, dimensions=dimensions, statistics=statistics) return self.get_status('PutMetricData', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, state_value=None, next_token=None): """ Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action. :type action_prefix: string :param action_name: The action name prefix. :type alarm_name_prefix: string :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified if this parameter is specified. :type alarm_names: list :param alarm_names: A list of alarm names to retrieve information for. :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type state_value: string :param state_value: The state value to be used in matching alarms. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list """
params = {} if action_prefix: params['ActionPrefix'] = action_prefix if alarm_name_prefix: params['AlarmNamePrefix'] = alarm_name_prefix elif alarm_names: self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token if state_value: params['StateValue'] = state_value return self.get_list('DescribeAlarms', params, [('MetricAlarms', MetricAlarms)])[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_alarm_history(self, alarm_name=None, start_date=None, end_date=None, max_records=None, history_item_type=None, next_token=None): """ Retrieves history for the specified alarm. Filter alarms by date range or item type. If an alarm name is not specified, Amazon CloudWatch returns histories for all of the owner's alarms. Amazon CloudWatch retains the history of deleted alarms for a period of six weeks. If an alarm has been deleted, its history can still be queried. :type alarm_name: string :param alarm_name: The name of the alarm. :type start_date: datetime :param start_date: The starting date to retrieve alarm history. :type end_date: datetime :param end_date: The starting date to retrieve alarm history. :type history_item_type: string :param history_item_type: The type of alarm histories to retreive (ConfigurationUpdate | StateUpdate | Action) :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list """
params = {} if alarm_name: params['AlarmName'] = alarm_name if start_date: params['StartDate'] = start_date.isoformat() if end_date: params['EndDate'] = end_date.isoformat() if history_item_type: params['HistoryItemType'] = history_item_type if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token return self.get_list('DescribeAlarmHistory', params, [('member', AlarmHistoryItem)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_alarms_for_metric(self, metric_name, namespace, period=None, statistic=None, dimensions=None, unit=None): """ Retrieves all alarms for a single metric. Specify a statistic, period, or unit to filter the set of alarms further. :type metric_name: string :param metric_name: The name of the metric :type namespace: string :param namespace: The namespace of the metric. :type period: int :param period: The period in seconds over which the statistic is applied. :type statistic: string :param statistic: The statistic for the metric. :param dimension_filters: A dictionary containing name/value pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension name that you want to filter on, a list of values to filter on or None if you want all metrics with that Dimension name. :type unit: string :rtype list """
params = {'MetricName' : metric_name, 'Namespace' : namespace} if period: params['Period'] = period if statistic: params['Statistic'] = statistic if dimensions: self.build_dimension_param(dimensions, params) if unit: params['Unit'] = unit return self.get_list('DescribeAlarmsForMetric', params, [('member', MetricAlarm)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_metric_alarm(self, alarm): """ Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm. When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed. When updating an existing alarm, its StateValue is left unchanged. :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm :param alarm: MetricAlarm object. """
params = { 'AlarmName' : alarm.name, 'MetricName' : alarm.metric, 'Namespace' : alarm.namespace, 'Statistic' : alarm.statistic, 'ComparisonOperator' : alarm.comparison, 'Threshold' : alarm.threshold, 'EvaluationPeriods' : alarm.evaluation_periods, 'Period' : alarm.period, } if alarm.actions_enabled is not None: params['ActionsEnabled'] = alarm.actions_enabled if alarm.alarm_actions: self.build_list_params(params, alarm.alarm_actions, 'AlarmActions.member.%s') if alarm.description: params['AlarmDescription'] = alarm.description if alarm.dimensions: self.build_dimension_param(alarm.dimensions, params) if alarm.insufficient_data_actions: self.build_list_params(params, alarm.insufficient_data_actions, 'InsufficientDataActions.member.%s') if alarm.ok_actions: self.build_list_params(params, alarm.ok_actions, 'OKActions.member.%s') if alarm.unit: params['Unit'] = alarm.unit alarm.connection = self return self.get_status('PutMetricAlarm', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_alarms(self, alarms): """ Deletes all specified alarms. In the event of an error, no alarms are deleted. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarms, 'AlarmNames.member.%s') return self.get_status('DeleteAlarms', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_alarm_actions(self, alarm_names): """ Enables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('EnableAlarmActions', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disable_alarm_actions(self, alarm_names): """ Disables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names. """
params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('DisableAlarmActions', params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self): """ Determine how long until the next scheduled time for a Task. Returns the number of seconds until the next scheduled time or zero if the task needs to be run immediately. If it's an hourly task and it's never been run, run it now. If it's a daily task and it's never been run and the hour is right, run it now. """
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)) if self.hourly and not self.last_executed: return 0 if self.daily and not self.last_executed: if int(self.hour) == self.now.hour: return 0 else: return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 delta = self.now - self.last_executed if self.hourly: if delta.seconds >= 60*60: return 0 else: return 60*60 - delta.seconds else: if int(self.hour) == self.now.hour: if delta.days >= 1: return 0 else: return 82800 # 23 hours, just to be safe else: return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __buyIfMeet(self, tick): ''' place buy order if conditions meet ''' # place short sell order ''' if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()): if tick.close/self.__previousMovingLowWeek < 0.95: return if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1): # assume no commission fee for now self.__placeSellShortOrder(tick) elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue(): # assume no commission fee for now self.__placeSellShortOrder(tick) ''' # place buy order if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()): if tick.close / self.__previousMovingLowWeek > 1.05: return if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1): # assume no commission fee for now self.__placeBuyOrder(tick) elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1): # assume no commission fee for now self.__placeBuyOrder(tick)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __placeSellShortOrder(self, tick): ''' place short sell order''' share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close)) sellShortOrder=Order(accountId=self.__strategy.accountId, action=Action.SELL_SHORT, is_market=True, security=self.__security, share=share) if self.__strategy.placeOrder(sellShortOrder): self.__buyOrder=sellShortOrder # place stop order stopOrder=Order(accountId=self.__strategy.accountId, action=Action.BUY_TO_COVER, is_stop=True, security=self.__security, price=tick.close * 1.05, share=0 - share) self.__placeStopOrder(stopOrder)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __placeStopOrder(self, order): ''' place stop order ''' orderId=self.__strategy.placeOrder(order) if orderId: self.__stopOrderId=orderId self.__stopOrder=order else: LOG.error("Can't place stop order %s" % order)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __adjustStopOrder(self, tick): ''' update stop order if needed ''' if not self.__stopOrderId: return if self.__stopOrder.action == Action.SELL: orgStopPrice=self.__buyOrder.price * 0.95 newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85) newStopPrice=min(newStopPrice, tick.close * 0.95) if newStopPrice > self.__stopOrder.price: self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId) stopOrder=Order(accountId=self.__strategy.accountId, action=Action.SELL, is_stop=True, security=self.__security, price=newStopPrice, share=self.__stopOrder.share) self.__placeStopOrder(stopOrder) ''' elif self.__stopOrder.action == Action.BUY_TO_COVER: orgStopPrice=self.__buyOrder.price * 1.05 newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15) newStopPrice=max(newStopPrice, tick.close * 1.05) if newStopPrice < self.__stopOrder.price: self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId) stopOrder=Order(accountId=self.__strategy.accountId, action=Action.BUY_TO_COVER, type=Type.STOP, security=self.__security, price=newStopPrice, share=self.__stopOrder.share) self.__placeStopOrder(stopOrder) '''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __updatePreviousState(self, tick): ''' update previous state ''' self.__previousTick=tick self.__previousSmaShort=self.__smaShort.getLastValue() self.__previousSmaMid=self.__smaMid.getLastValue() self.__previousSmaLong=self.__smaLong.getLastValue() self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue() self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue() self.__previousMovingLowShort=self.__movingLowShort.getLastValue() self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _inc(self, val): """Increment a single value"""
assert(len(val) == self.sequence_length) return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self): """Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True) if val: if val.has_key('timestamp'): self.timestamp = val['timestamp'] if val.has_key('current_value'): self._value = self.item_type(val['current_value']) if val.has_key("last_value") and val['last_value'] != None: self.last_value = self.item_type(val['last_value']) return self._value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """Connect to our domain"""
if not self._db: import boto sdb = boto.connect_sdb() if not self.domain_name: self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) try: self._db = sdb.get_domain(self.domain_name) except SDBResponseError, e: if e.status == 400: self._db = sdb.create_domain(self.domain_name) else: raise return self._db
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_credential_file(self, path): """Load a credential file as is setup like the Java utilities"""
c_data = StringIO.StringIO() c_data.write("[Credentials]\n") for line in open(path, "r").readlines(): c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) c_data.seek(0) self.readfp(c_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_option(self, path, section, option, value): """ Write the specified Section.Option to the config file specified by path. Replace any previous value. If the path doesn't exist, create it. Also add the option the the in-memory config. """
config = ConfigParser.SafeConfigParser() config.read(path) if not config.has_section(section): config.add_section(section) config.set(section, option, value) fp = open(path, 'w') config.write(fp) fp.close() if not self.has_section(section): self.add_section(section) self.set(section, option, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone_replace_name(self, new_name): """Instantiate a BucketStorageUri from the current BucketStorageUri, but replacing the object_name. @type new_name: string @param new_name: new object name """
if not self.bucket_name: raise InvalidUriError('clone_replace_name() on bucket-less URI %s' % self.uri) return BucketStorageUri( self.scheme, self.bucket_name, new_name, self.debug, suppress_consec_slashes=self.suppress_consec_slashes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_acl(self, validate=True, headers=None, version_id=None): """returns a bucket's acl"""
if not self.bucket_name: raise InvalidUriError('get_acl on bucket-less URI (%s)' % self.uri) bucket = self.get_bucket(validate, headers) # This works for both bucket- and object- level ACLs (former passes # key_name=None): acl = bucket.get_acl(self.object_name, headers, version_id) self.check_response(acl, 'acl', self.uri) return acl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_acl(self, acl_or_str, key_name='', validate=True, headers=None, version_id=None): """sets or updates a bucket's acl"""
if not self.bucket_name: raise InvalidUriError('set_acl on bucket-less URI (%s)' % self.uri) self.get_bucket(validate, headers).set_acl(acl_or_str, key_name, headers, version_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_appl(): """Returns stock prices for Apple company."""
yql = YQL('AAPL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price') yql.select('AAPL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_googl(): """Returns stock prices for Google company."""
yql = YQL('GOOGL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price') yql.select('GOOGL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, validate=False): """ Update the data associated with this volume by querying EC2. :type validate: bool :param validate: By default, if EC2 returns no data about the volume the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """
# Check the resultset since Eucalyptus ignores the volumeId param unfiltered_rs = self.connection.get_all_volumes([self.id]) rs = [ x for x in unfiltered_rs if x.id == self.id ] if len(rs) > 0: self._update(rs[0]) elif validate: raise ValueError('%s is not a valid Volume ID' % self.id) return self.status
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach(self, instance_id, device): """ Attach this EBS volume to an EC2 instance. :type instance_id: str :param instance_id: The ID of the EC2 instance to which it will be attached. :type device: str :param device: The device on the instance through which the volume will be exposed (e.g. /dev/sdh) :rtype: bool :return: True if successful """
return self.connection.attach_volume(self.id, instance_id, device)