code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def load_table(self, table): """ Load resources as specified by given table into our db. """ region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) # we use underscore "_" instead of dash "-" for region name but boto3 need dash boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
Load resources as specified by given table into our db.
Below is the the instruction that describes the task: ### Input: Load resources as specified by given table into our db. ### Response: def load_table(self, table): """ Load resources as specified by given table into our db. """ region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) # we use underscore "_" instead of dash "-" for region name but boto3 need dash boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
def _get_invalid_info(granule_data): """Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range """ if issubclass(granule_data.dtype.type, np.integer): msg = ("na:" + str((granule_data == 65535).sum()) + " miss:" + str((granule_data == 65534).sum()) + " obpt:" + str((granule_data == 65533).sum()) + " ogpt:" + str((granule_data == 65532).sum()) + " err:" + str((granule_data == 65531).sum()) + " elint:" + str((granule_data == 65530).sum()) + " vdne:" + str((granule_data == 65529).sum()) + " soub:" + str((granule_data == 65528).sum())) elif issubclass(granule_data.dtype.type, np.floating): msg = ("na:" + str((granule_data == -999.9).sum()) + " miss:" + str((granule_data == -999.8).sum()) + " obpt:" + str((granule_data == -999.7).sum()) + " ogpt:" + str((granule_data == -999.6).sum()) + " err:" + str((granule_data == -999.5).sum()) + " elint:" + str((granule_data == -999.4).sum()) + " vdne:" + str((granule_data == -999.3).sum()) + " soub:" + str((granule_data == -999.2).sum())) return msg
Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range
Below is the the instruction that describes the task: ### Input: Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range ### Response: def _get_invalid_info(granule_data): """Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range """ if issubclass(granule_data.dtype.type, np.integer): msg = ("na:" + str((granule_data == 65535).sum()) + " miss:" + str((granule_data == 65534).sum()) + " obpt:" + str((granule_data == 65533).sum()) + " ogpt:" + str((granule_data == 65532).sum()) + " err:" + str((granule_data == 65531).sum()) + " elint:" + str((granule_data == 65530).sum()) + " vdne:" + str((granule_data == 65529).sum()) + " soub:" + str((granule_data == 65528).sum())) elif issubclass(granule_data.dtype.type, np.floating): msg = ("na:" + str((granule_data == -999.9).sum()) + " miss:" + str((granule_data == -999.8).sum()) + " obpt:" + str((granule_data == -999.7).sum()) + " ogpt:" + str((granule_data == -999.6).sum()) + " err:" + str((granule_data == -999.5).sum()) + " elint:" + str((granule_data == -999.4).sum()) + " vdne:" + str((granule_data == -999.3).sum()) + " soub:" + str((granule_data == -999.2).sum())) return msg
def encode (self): """Encodes this SeqCmd to binary and returns a bytearray.""" return self.attrs.encode() + self.delay.encode() + self.cmd.encode()
Encodes this SeqCmd to binary and returns a bytearray.
Below is the the instruction that describes the task: ### Input: Encodes this SeqCmd to binary and returns a bytearray. ### Response: def encode (self): """Encodes this SeqCmd to binary and returns a bytearray.""" return self.attrs.encode() + self.delay.encode() + self.cmd.encode()
def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 mm -= offset y, m, d, hh, mm, ss, _ = _normalize_datetime( y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0)
Return UTC time tuple compatible with time.gmtime().
Below is the the instruction that describes the task: ### Input: Return UTC time tuple compatible with time.gmtime(). ### Response: def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 mm -= offset y, m, d, hh, mm, ss, _ = _normalize_datetime( y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0)
def as_search_document_update(self, *, index, update_fields): """ Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields) """ if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL: return self.as_search_document(index=index) if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL: # in partial mode we update the intersection of update_fields and # properties found in the mapping file. return { k: getattr(self, k) for k in self.clean_update_fields( index=index, update_fields=update_fields ) }
Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields)
Below is the the instruction that describes the task: ### Input: Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields) ### Response: def as_search_document_update(self, *, index, update_fields): """ Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields) """ if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL: return self.as_search_document(index=index) if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL: # in partial mode we update the intersection of update_fields and # properties found in the mapping file. return { k: getattr(self, k) for k in self.clean_update_fields( index=index, update_fields=update_fields ) }
async def request(self, request): """ Execute a STUN transaction and return the response. """ assert request.transaction_id not in self.transactions if self.integrity_key: self.__add_authentication(request) transaction = stun.Transaction(request, self.server, self) self.transactions[request.transaction_id] = transaction try: return await transaction.run() finally: del self.transactions[request.transaction_id]
Execute a STUN transaction and return the response.
Below is the the instruction that describes the task: ### Input: Execute a STUN transaction and return the response. ### Response: async def request(self, request): """ Execute a STUN transaction and return the response. """ assert request.transaction_id not in self.transactions if self.integrity_key: self.__add_authentication(request) transaction = stun.Transaction(request, self.server, self) self.transactions[request.transaction_id] = transaction try: return await transaction.run() finally: del self.transactions[request.transaction_id]
def list_projects_search(self, searchstring): """List projects with searchstring.""" log.debug('List all projects with: %s' % searchstring) return self.collection('projects/search/%s.json' % quote_plus(searchstring))
List projects with searchstring.
Below is the the instruction that describes the task: ### Input: List projects with searchstring. ### Response: def list_projects_search(self, searchstring): """List projects with searchstring.""" log.debug('List all projects with: %s' % searchstring) return self.collection('projects/search/%s.json' % quote_plus(searchstring))
def gravitational_force(position_a, mass_a, position_b, mass_b): """Returns the gravitational force between the two bodies a and b.""" distance = distance_between(position_a, position_b) # Calculate the direction and magnitude of the force. angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0]) magnitude = G * mass_a * mass_b / (distance**2) # Find the x and y components of the force. # Determine sign based on which one is the larger body. sign = -1 if mass_b > mass_a else 1 x_force = sign * magnitude * math.cos(angle) y_force = sign * magnitude * math.sin(angle) return x_force, y_force
Returns the gravitational force between the two bodies a and b.
Below is the the instruction that describes the task: ### Input: Returns the gravitational force between the two bodies a and b. ### Response: def gravitational_force(position_a, mass_a, position_b, mass_b): """Returns the gravitational force between the two bodies a and b.""" distance = distance_between(position_a, position_b) # Calculate the direction and magnitude of the force. angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0]) magnitude = G * mass_a * mass_b / (distance**2) # Find the x and y components of the force. # Determine sign based on which one is the larger body. sign = -1 if mass_b > mass_a else 1 x_force = sign * magnitude * math.cos(angle) y_force = sign * magnitude * math.sin(angle) return x_force, y_force
def is_frameshift_len(mut_df): """Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts """ # calculate length, 0-based coordinates #indel_len = mut_df['End_Position'] - mut_df['Start_Position'] if 'indel len' in mut_df.columns: indel_len = mut_df['indel len'] else: indel_len = compute_indel_length(mut_df) # only non multiples of 3 are frameshifts is_fs = (indel_len%3)>0 # make sure no single base substitutions are counted is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-') is_fs[~is_indel] = False return is_fs
Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts
Below is the the instruction that describes the task: ### Input: Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts ### Response: def is_frameshift_len(mut_df): """Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts """ # calculate length, 0-based coordinates #indel_len = mut_df['End_Position'] - mut_df['Start_Position'] if 'indel len' in mut_df.columns: indel_len = mut_df['indel len'] else: indel_len = compute_indel_length(mut_df) # only non multiples of 3 are frameshifts is_fs = (indel_len%3)>0 # make sure no single base substitutions are counted is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-') is_fs[~is_indel] = False return is_fs
def is_published(self): """Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True """ citeable = 'publication_info' in self.record and \ is_citeable(self.record['publication_info']) submitted = 'dois' in self.record and any( 'journal_title' in el for el in force_list(self.record.get('publication_info')) ) return citeable or submitted
Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True
Below is the the instruction that describes the task: ### Input: Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True ### Response: def is_published(self): """Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True """ citeable = 'publication_info' in self.record and \ is_citeable(self.record['publication_info']) submitted = 'dois' in self.record and any( 'journal_title' in el for el in force_list(self.record.get('publication_info')) ) return citeable or submitted
def _pyuncompress_sqlitecurve(sqlitecurve, force=False): '''This just uncompresses the sqlitecurve. Should be independent of OS. ''' outfile = sqlitecurve.replace('.gz','') try: if os.path.exists(outfile) and not force: return outfile else: with gzip.open(sqlitecurve,'rb') as infd: with open(outfile,'wb') as outfd: shutil.copyfileobj(infd, outfd) # do not remove the intput file yet if os.path.exists(outfile): return outfile except Exception as e: return None
This just uncompresses the sqlitecurve. Should be independent of OS.
Below is the the instruction that describes the task: ### Input: This just uncompresses the sqlitecurve. Should be independent of OS. ### Response: def _pyuncompress_sqlitecurve(sqlitecurve, force=False): '''This just uncompresses the sqlitecurve. Should be independent of OS. ''' outfile = sqlitecurve.replace('.gz','') try: if os.path.exists(outfile) and not force: return outfile else: with gzip.open(sqlitecurve,'rb') as infd: with open(outfile,'wb') as outfd: shutil.copyfileobj(infd, outfd) # do not remove the intput file yet if os.path.exists(outfile): return outfile except Exception as e: return None
def handle_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position. """ total_leftover_cash = 0 for asset, ratio in splits: if asset in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[asset] leftover_cash = position.handle_split(asset, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position.
Below is the the instruction that describes the task: ### Input: Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position. ### Response: def handle_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position. """ total_leftover_cash = 0 for asset, ratio in splits: if asset in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[asset] leftover_cash = position.handle_split(asset, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
def get_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501 """Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 else: (data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 return data
Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread. ### Response: def get_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501 """Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 else: (data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 return data
def delete_stack(self, stack_name): """Teardown a stack.""" get_stack(stack_name) CLIENT.delete_stack( StackName=stack_name ) DELETE_WAITER.wait(StackName=stack_name)
Teardown a stack.
Below is the the instruction that describes the task: ### Input: Teardown a stack. ### Response: def delete_stack(self, stack_name): """Teardown a stack.""" get_stack(stack_name) CLIENT.delete_stack( StackName=stack_name ) DELETE_WAITER.wait(StackName=stack_name)
def collect(self): """ Collect xfs stats. For an explanation of the following metrics visit http://xfs.org/index.php/Runtime_Stats https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h """ data_structure = { 'extent_alloc': ( 'alloc_extent', 'alloc_block', 'free_extent', 'free_block' ), 'abt': ( 'lookup', 'compare', 'insrec', 'delrec' ), 'blk_map': ( 'read_ops', 'write_ops', 'unmap', 'add_exlist', 'del_exlist', 'look_exlist', 'cmp_exlist' ), 'bmbt': ( 'lookup', 'compare', 'insrec', 'delrec' ), 'dir': ( 'lookup', 'create', 'remove', 'getdents' ), 'trans': ( 'sync', 'async', 'empty' ), 'ig': ( 'ig_attempts', 'ig_found', 'ig_frecycle', 'ig_missed', 'ig_dup', 'ig_reclaims', 'ig_attrchg' ), 'log': ( 'writes', 'blocks', 'noiclogs', 'force', 'force_sleep' ), 'push_ail': ( 'try_logspace', 'sleep_logspace', 'pushes', 'success', 'pushbuf', 'pinned', 'locked', 'flushing', 'restarts', 'flush' ), 'xstrat': ( 'quick', 'split' ), 'rw': ( 'write_calls', 'read_calls' ), 'attr': ( 'get', 'set', 'remove', 'list' ), 'icluster': ( 'iflush_count', 'icluster_flushcnt', 'icluster_flushinode' ), 'vnodes': ( 'vn_active', 'vn_alloc', 'vn_get', 'vn_hold', 'vn_rele', 'vn_reclaim', 'vn_remove', 'vn_free' ), 'buf': ( 'xb_get', 'xb_create', 'xb_get_locked', 'xb_get_locked_waited', 'xb_busy_locked', 'xb_miss_locked', 'xb_page_retries', 'xb_page_found', 'xb_get_read' ), 'abtb2': ( 'xs_abtb_2_lookup', 'xs_abtb_2_compare', 'xs_abtb_2_insrec', 'xs_abtb_2_delrec', 'xs_abtb_2_newroot', 'xs_abtb_2_killroot', 'xs_abtb_2_increment', 'xs_abtb_2_decrement', 'xs_abtb_2_lshift', 'xs_abtb_2_rshift', 'xs_abtb_2_split', 'xs_abtb_2_join', 'xs_abtb_2_alloc', 'xs_abtb_2_free', 'xs_abtb_2_moves' ), 'abtc2': ( 'xs_abtc_2_lookup', 'xs_abtc_2_compare', 'xs_abtc_2_insrec', 'xs_abtc_2_delrec', 'xs_abtc_2_newroot', 'xs_abtc_2_killroot', 'xs_abtc_2_increment', 'xs_abtc_2_decrement', 'xs_abtc_2_lshift', 'xs_abtc_2_rshift', 'xs_abtc_2_split', 'xs_abtc_2_join', 'xs_abtc_2_alloc', 'xs_abtc_2_free', 'xs_abtc_2_moves' ), 'bmbt2': ( 'xs_bmbt_2_lookup', 'xs_bmbt_2_compare', 'xs_bmbt_2_insrec', 'xs_bmbt_2_delrec', 'xs_bmbt_2_newroot', 'xs_bmbt_2_killroot', 'xs_bmbt_2_increment', 'xs_bmbt_2_decrement', 'xs_bmbt_2_lshift', 'xs_bmbt_2_rshift', 'xs_bmbt_2_split', 'xs_bmbt_2_join', 'xs_bmbt_2_alloc', 'xs_bmbt_2_free', 'xs_bmbt_2_moves' ), 'ibt2': ( 'lookup', 'compare', 'insrec', 'delrec', 'newroot', 'killroot', 'increment', 'decrement', 'lshift', 'rshift', 'split', 'join', 'alloc', 'free', 'moves' ), 'fibt2': ( 'lookup', 'compare', 'insrec', 'delrec', 'newroot', 'killroot', 'increment', 'decrement', 'lshift', 'rshift', 'split', 'join', 'alloc', 'free', 'moves' ), 'qm': ( 'xs_qm_dquot', 'xs_qm_dquot_unused' ), 'xpc': ( 'xs_xstrat_bytes', 'xs_write_bytes', 'xs_read_bytes' ), 'debug': ( 'debug', ) } f = open(self.PROC) new_stats = f.readlines() f.close() stats = {} for line in new_stats: items = line.rstrip().split() stats[items[0]] = [int(a) for a in items[1:]] for key in stats.keys(): for item in enumerate(data_structure[key]): metric_name = '.'.join([key, item[1]]) value = stats[key][item[0]] self.publish_counter(metric_name, value)
Collect xfs stats. For an explanation of the following metrics visit http://xfs.org/index.php/Runtime_Stats https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h
Below is the the instruction that describes the task: ### Input: Collect xfs stats. For an explanation of the following metrics visit http://xfs.org/index.php/Runtime_Stats https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h ### Response: def collect(self): """ Collect xfs stats. For an explanation of the following metrics visit http://xfs.org/index.php/Runtime_Stats https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h """ data_structure = { 'extent_alloc': ( 'alloc_extent', 'alloc_block', 'free_extent', 'free_block' ), 'abt': ( 'lookup', 'compare', 'insrec', 'delrec' ), 'blk_map': ( 'read_ops', 'write_ops', 'unmap', 'add_exlist', 'del_exlist', 'look_exlist', 'cmp_exlist' ), 'bmbt': ( 'lookup', 'compare', 'insrec', 'delrec' ), 'dir': ( 'lookup', 'create', 'remove', 'getdents' ), 'trans': ( 'sync', 'async', 'empty' ), 'ig': ( 'ig_attempts', 'ig_found', 'ig_frecycle', 'ig_missed', 'ig_dup', 'ig_reclaims', 'ig_attrchg' ), 'log': ( 'writes', 'blocks', 'noiclogs', 'force', 'force_sleep' ), 'push_ail': ( 'try_logspace', 'sleep_logspace', 'pushes', 'success', 'pushbuf', 'pinned', 'locked', 'flushing', 'restarts', 'flush' ), 'xstrat': ( 'quick', 'split' ), 'rw': ( 'write_calls', 'read_calls' ), 'attr': ( 'get', 'set', 'remove', 'list' ), 'icluster': ( 'iflush_count', 'icluster_flushcnt', 'icluster_flushinode' ), 'vnodes': ( 'vn_active', 'vn_alloc', 'vn_get', 'vn_hold', 'vn_rele', 'vn_reclaim', 'vn_remove', 'vn_free' ), 'buf': ( 'xb_get', 'xb_create', 'xb_get_locked', 'xb_get_locked_waited', 'xb_busy_locked', 'xb_miss_locked', 'xb_page_retries', 'xb_page_found', 'xb_get_read' ), 'abtb2': ( 'xs_abtb_2_lookup', 'xs_abtb_2_compare', 'xs_abtb_2_insrec', 'xs_abtb_2_delrec', 'xs_abtb_2_newroot', 'xs_abtb_2_killroot', 'xs_abtb_2_increment', 'xs_abtb_2_decrement', 'xs_abtb_2_lshift', 'xs_abtb_2_rshift', 'xs_abtb_2_split', 'xs_abtb_2_join', 'xs_abtb_2_alloc', 'xs_abtb_2_free', 'xs_abtb_2_moves' ), 'abtc2': ( 'xs_abtc_2_lookup', 'xs_abtc_2_compare', 'xs_abtc_2_insrec', 'xs_abtc_2_delrec', 'xs_abtc_2_newroot', 'xs_abtc_2_killroot', 'xs_abtc_2_increment', 'xs_abtc_2_decrement', 'xs_abtc_2_lshift', 'xs_abtc_2_rshift', 'xs_abtc_2_split', 'xs_abtc_2_join', 'xs_abtc_2_alloc', 'xs_abtc_2_free', 'xs_abtc_2_moves' ), 'bmbt2': ( 'xs_bmbt_2_lookup', 'xs_bmbt_2_compare', 'xs_bmbt_2_insrec', 'xs_bmbt_2_delrec', 'xs_bmbt_2_newroot', 'xs_bmbt_2_killroot', 'xs_bmbt_2_increment', 'xs_bmbt_2_decrement', 'xs_bmbt_2_lshift', 'xs_bmbt_2_rshift', 'xs_bmbt_2_split', 'xs_bmbt_2_join', 'xs_bmbt_2_alloc', 'xs_bmbt_2_free', 'xs_bmbt_2_moves' ), 'ibt2': ( 'lookup', 'compare', 'insrec', 'delrec', 'newroot', 'killroot', 'increment', 'decrement', 'lshift', 'rshift', 'split', 'join', 'alloc', 'free', 'moves' ), 'fibt2': ( 'lookup', 'compare', 'insrec', 'delrec', 'newroot', 'killroot', 'increment', 'decrement', 'lshift', 'rshift', 'split', 'join', 'alloc', 'free', 'moves' ), 'qm': ( 'xs_qm_dquot', 'xs_qm_dquot_unused' ), 'xpc': ( 'xs_xstrat_bytes', 'xs_write_bytes', 'xs_read_bytes' ), 'debug': ( 'debug', ) } f = open(self.PROC) new_stats = f.readlines() f.close() stats = {} for line in new_stats: items = line.rstrip().split() stats[items[0]] = [int(a) for a in items[1:]] for key in stats.keys(): for item in enumerate(data_structure[key]): metric_name = '.'.join([key, item[1]]) value = stats[key][item[0]] self.publish_counter(metric_name, value)
def print_user(self, user): '''print a relational database user ''' status = "active" token = user.token if token in ['finished', 'revoked']: status = token if token is None: token = '' subid = "%s\t%s[%s]" %(user.id, token, status) print(subid) return subid
print a relational database user
Below is the the instruction that describes the task: ### Input: print a relational database user ### Response: def print_user(self, user): '''print a relational database user ''' status = "active" token = user.token if token in ['finished', 'revoked']: status = token if token is None: token = '' subid = "%s\t%s[%s]" %(user.id, token, status) print(subid) return subid
def list_nodes(full=False, call=None): ''' list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} if POLL_ALL_LOCATIONS: for location in JOYENT_LOCATIONS: result = query(command='my/machines', location=location, method='GET') if result[0] in VALID_RESPONSE_CODES: nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) else: log.error('Invalid response when listing Joyent nodes: %s', result[1]) else: location = get_location() result = query(command='my/machines', location=location, method='GET') nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) return ret
list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q
Below is the the instruction that describes the task: ### Input: list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ### Response: def list_nodes(full=False, call=None): ''' list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} if POLL_ALL_LOCATIONS: for location in JOYENT_LOCATIONS: result = query(command='my/machines', location=location, method='GET') if result[0] in VALID_RESPONSE_CODES: nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) else: log.error('Invalid response when listing Joyent nodes: %s', result[1]) else: location = get_location() result = query(command='my/machines', location=location, method='GET') nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) return ret
def send_audio(chat_id, audio, caption=None, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None, disable_notification=False, parse_mode=None, **kwargs): """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest """ files = None if isinstance(audio, InputFile): files = [audio] audio = None elif not isinstance(audio, str): raise Exception('audio must be instance of InputFile or str') # required args params = dict( chat_id=chat_id, audio=audio ) # optional args params.update( _clean_params( caption=caption, duration=duration, performer=performer, title=title, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, disable_notification=disable_notification, parse_mode=parse_mode, ) ) return TelegramBotRPCRequest('sendAudio', params=params, files=files, on_result=Message.from_result, **kwargs)
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest
Below is the the instruction that describes the task: ### Input: Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest ### Response: def send_audio(chat_id, audio, caption=None, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None, disable_notification=False, parse_mode=None, **kwargs): """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest """ files = None if isinstance(audio, InputFile): files = [audio] audio = None elif not isinstance(audio, str): raise Exception('audio must be instance of InputFile or str') # required args params = dict( chat_id=chat_id, audio=audio ) # optional args params.update( _clean_params( caption=caption, duration=duration, performer=performer, title=title, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, disable_notification=disable_notification, parse_mode=parse_mode, ) ) return TelegramBotRPCRequest('sendAudio', params=params, files=files, on_result=Message.from_result, **kwargs)
def add_reader(self, fd, callback): " Add read file descriptor to the event loop. " fd = fd_to_int(fd) self._read_fds[fd] = callback self.selector.register(fd)
Add read file descriptor to the event loop.
Below is the the instruction that describes the task: ### Input: Add read file descriptor to the event loop. ### Response: def add_reader(self, fd, callback): " Add read file descriptor to the event loop. " fd = fd_to_int(fd) self._read_fds[fd] = callback self.selector.register(fd)
def _join_disease(query, disease_definition, disease_id, disease_name): """helper function to add a query join to Disease model :param sqlalchemy.orm.query.Query query: SQL Alchemy query :param disease_definition: :param str disease_id: see :attr:`models.Disease.disease_id` :param disease_name: :rtype: sqlalchemy.orm.query.Query """ if disease_definition or disease_id or disease_name: query = query.join(models.Disease) if disease_definition: query = query.filter(models.Disease.definition.like(disease_definition)) if disease_id: query = query.filter(models.Disease.disease_id == disease_id) if disease_name: query = query.filter(models.Disease.disease_name.like(disease_name)) return query
helper function to add a query join to Disease model :param sqlalchemy.orm.query.Query query: SQL Alchemy query :param disease_definition: :param str disease_id: see :attr:`models.Disease.disease_id` :param disease_name: :rtype: sqlalchemy.orm.query.Query
Below is the the instruction that describes the task: ### Input: helper function to add a query join to Disease model :param sqlalchemy.orm.query.Query query: SQL Alchemy query :param disease_definition: :param str disease_id: see :attr:`models.Disease.disease_id` :param disease_name: :rtype: sqlalchemy.orm.query.Query ### Response: def _join_disease(query, disease_definition, disease_id, disease_name): """helper function to add a query join to Disease model :param sqlalchemy.orm.query.Query query: SQL Alchemy query :param disease_definition: :param str disease_id: see :attr:`models.Disease.disease_id` :param disease_name: :rtype: sqlalchemy.orm.query.Query """ if disease_definition or disease_id or disease_name: query = query.join(models.Disease) if disease_definition: query = query.filter(models.Disease.definition.like(disease_definition)) if disease_id: query = query.filter(models.Disease.disease_id == disease_id) if disease_name: query = query.filter(models.Disease.disease_name.like(disease_name)) return query
def receive_data(socket): """Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer. """ answer = b"" while True: packet = socket.recv(4096) if not packet: break answer += packet response = pickle.loads(answer) socket.close() return response
Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer.
Below is the the instruction that describes the task: ### Input: Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer. ### Response: def receive_data(socket): """Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer. """ answer = b"" while True: packet = socket.recv(4096) if not packet: break answer += packet response = pickle.loads(answer) socket.close() return response
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta): ''' Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) ''' delta_m = (mmax - mag_value) a_1 = self._get_a1_value(bbar, dbar, slip / 10., beta, mmax) return a_1 * np.exp(bbar * delta_m) * (delta_m > 0.0)
Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
Below is the the instruction that describes the task: ### Input: Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) ### Response: def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta): ''' Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) ''' delta_m = (mmax - mag_value) a_1 = self._get_a1_value(bbar, dbar, slip / 10., beta, mmax) return a_1 * np.exp(bbar * delta_m) * (delta_m > 0.0)
def edit(self): """ Edit the SSH Key """ input_params = { "name": self.name, "public_key": self.public_key, } data = self.get_data( "account/keys/%s" % self.id, type=PUT, params=input_params ) if data: self.id = data['ssh_key']['id']
Edit the SSH Key
Below is the the instruction that describes the task: ### Input: Edit the SSH Key ### Response: def edit(self): """ Edit the SSH Key """ input_params = { "name": self.name, "public_key": self.public_key, } data = self.get_data( "account/keys/%s" % self.id, type=PUT, params=input_params ) if data: self.id = data['ssh_key']['id']
def open(self, *args, **kwargs): """ Works exactly like the Telnet.open() call from the telnetlib module, except SSL/TLS may be transparently negotiated. """ Telnet.open(self, *args, **kwargs) if self.force_ssl: self._start_tls()
Works exactly like the Telnet.open() call from the telnetlib module, except SSL/TLS may be transparently negotiated.
Below is the the instruction that describes the task: ### Input: Works exactly like the Telnet.open() call from the telnetlib module, except SSL/TLS may be transparently negotiated. ### Response: def open(self, *args, **kwargs): """ Works exactly like the Telnet.open() call from the telnetlib module, except SSL/TLS may be transparently negotiated. """ Telnet.open(self, *args, **kwargs) if self.force_ssl: self._start_tls()
def geometric_progression_for_stepsize(x, update, dist, decision_function, current_iteration): """ Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary. """ epsilon = dist / np.sqrt(current_iteration) while True: updated = x + epsilon * update success = decision_function(updated[None])[0] if success: break else: epsilon = epsilon / 2.0 return epsilon
Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary.
Below is the the instruction that describes the task: ### Input: Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary. ### Response: def geometric_progression_for_stepsize(x, update, dist, decision_function, current_iteration): """ Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary. """ epsilon = dist / np.sqrt(current_iteration) while True: updated = x + epsilon * update success = decision_function(updated[None])[0] if success: break else: epsilon = epsilon / 2.0 return epsilon
def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-banded matrix. """ status = _libcublas.cublasDsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-banded matrix.
Below is the the instruction that describes the task: ### Input: Matrix-vector product for real symmetric-banded matrix. ### Response: def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-banded matrix. """ status = _libcublas.cublasDsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
def create(): """Create a new database with information about the films in the specified directory or directories.""" if not all(map(os.path.isdir, ARGS.directory)): exit('Error: One or more of the specified directories does not exist.') with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute('DROP TABLE IF EXISTS Movies') cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT, files BLOB)''') for dir in ARGS.directory: cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)', local_data(dir))
Create a new database with information about the films in the specified directory or directories.
Below is the the instruction that describes the task: ### Input: Create a new database with information about the films in the specified directory or directories. ### Response: def create(): """Create a new database with information about the films in the specified directory or directories.""" if not all(map(os.path.isdir, ARGS.directory)): exit('Error: One or more of the specified directories does not exist.') with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute('DROP TABLE IF EXISTS Movies') cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT, files BLOB)''') for dir in ARGS.directory: cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)', local_data(dir))
def send(self, msg): """Queue `msg` for sending to Nvim.""" debug('sent %s', msg) self.loop.send(self._packer.pack(msg))
Queue `msg` for sending to Nvim.
Below is the the instruction that describes the task: ### Input: Queue `msg` for sending to Nvim. ### Response: def send(self, msg): """Queue `msg` for sending to Nvim.""" debug('sent %s', msg) self.loop.send(self._packer.pack(msg))
def protect(self, password=None, read_protect=False, protect_from=0): """The implementation of :meth:`nfc.tag.Tag.protect` for a generic type 1 tag is limited to setting the NDEF data read-only for tags that are already NDEF formatted. """ return super(Type1Tag, self).protect( password, read_protect, protect_from)
The implementation of :meth:`nfc.tag.Tag.protect` for a generic type 1 tag is limited to setting the NDEF data read-only for tags that are already NDEF formatted.
Below is the the instruction that describes the task: ### Input: The implementation of :meth:`nfc.tag.Tag.protect` for a generic type 1 tag is limited to setting the NDEF data read-only for tags that are already NDEF formatted. ### Response: def protect(self, password=None, read_protect=False, protect_from=0): """The implementation of :meth:`nfc.tag.Tag.protect` for a generic type 1 tag is limited to setting the NDEF data read-only for tags that are already NDEF formatted. """ return super(Type1Tag, self).protect( password, read_protect, protect_from)
def getElementsByName(self, name): """ get element with given name, return list of element objects regarding to 'name' :param name: element name, case sensitive, if elements are auto-generated from LteParser, the name should be lower cased. """ try: return filter(lambda x: x.name == name, self._lattice_eleobjlist) except: return []
get element with given name, return list of element objects regarding to 'name' :param name: element name, case sensitive, if elements are auto-generated from LteParser, the name should be lower cased.
Below is the the instruction that describes the task: ### Input: get element with given name, return list of element objects regarding to 'name' :param name: element name, case sensitive, if elements are auto-generated from LteParser, the name should be lower cased. ### Response: def getElementsByName(self, name): """ get element with given name, return list of element objects regarding to 'name' :param name: element name, case sensitive, if elements are auto-generated from LteParser, the name should be lower cased. """ try: return filter(lambda x: x.name == name, self._lattice_eleobjlist) except: return []
def isPe64(self): """ Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE64: return True return False
Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
Below is the the instruction that describes the task: ### Input: Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. ### Response: def isPe64(self): """ Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE64: return True return False
def permission_required(perm, login_url=None, raise_exception=False): """ Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual. """ def check_perms(user): if not getattr(settings, 'DASHBOARD_REQUIRE_LOGIN', app_settings.REQUIRE_LOGIN): return True # First check if the user has the permission (even anon users) if user.has_perm(perm): return True # In case the 403 handler should be called raise the exception if raise_exception: # pragma: no cover raise PermissionDenied # As the last resort, show the login form return False return user_passes_test(check_perms, login_url=login_url)
Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual.
Below is the the instruction that describes the task: ### Input: Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual. ### Response: def permission_required(perm, login_url=None, raise_exception=False): """ Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual. """ def check_perms(user): if not getattr(settings, 'DASHBOARD_REQUIRE_LOGIN', app_settings.REQUIRE_LOGIN): return True # First check if the user has the permission (even anon users) if user.has_perm(perm): return True # In case the 403 handler should be called raise the exception if raise_exception: # pragma: no cover raise PermissionDenied # As the last resort, show the login form return False return user_passes_test(check_perms, login_url=login_url)
def call_modules(auto_discover=()): """ this is called in project urls.py for registering desired modules (eg.: admin.py) """ for app in settings.INSTALLED_APPS: modules = set(auto_discover) if app in INSTALLED_APPS_REGISTER: modules.update(INSTALLED_APPS_REGISTER[app]) for module in modules: mod = import_module(app) try: import_module('%s.%s' % (app, module)) inst = getattr(mod, '__install__', lambda: None) inst() except: if module_has_submodule(mod, module): raise app_modules_loaded.send(sender=None)
this is called in project urls.py for registering desired modules (eg.: admin.py)
Below is the the instruction that describes the task: ### Input: this is called in project urls.py for registering desired modules (eg.: admin.py) ### Response: def call_modules(auto_discover=()): """ this is called in project urls.py for registering desired modules (eg.: admin.py) """ for app in settings.INSTALLED_APPS: modules = set(auto_discover) if app in INSTALLED_APPS_REGISTER: modules.update(INSTALLED_APPS_REGISTER[app]) for module in modules: mod = import_module(app) try: import_module('%s.%s' % (app, module)) inst = getattr(mod, '__install__', lambda: None) inst() except: if module_has_submodule(mod, module): raise app_modules_loaded.send(sender=None)
def prepare_synteny(tourfile, lastfile, odir, p, opts): """ Prepare synteny plots for movie(). """ qbedfile, sbedfile = get_bed_filenames(lastfile, p, opts) qbedfile = op.abspath(qbedfile) sbedfile = op.abspath(sbedfile) qbed = Bed(qbedfile, sorted=False) contig_to_beds = dict(qbed.sub_beds()) # Create a separate directory for the subplots and movie mkdir(odir, overwrite=True) os.chdir(odir) logging.debug("Change into subdir `{}`".format(odir)) # Make anchorsfile anchorsfile = ".".join(op.basename(lastfile).split(".", 2)[:2]) \ + ".anchors" fw = open(anchorsfile, "w") for b in Blast(lastfile): print("\t".join((gene_name(b.query), gene_name(b.subject), str(int(b.score)))), file=fw) fw.close() # Symlink sbed symlink(sbedfile, op.basename(sbedfile)) return anchorsfile, qbedfile, contig_to_beds
Prepare synteny plots for movie().
Below is the the instruction that describes the task: ### Input: Prepare synteny plots for movie(). ### Response: def prepare_synteny(tourfile, lastfile, odir, p, opts): """ Prepare synteny plots for movie(). """ qbedfile, sbedfile = get_bed_filenames(lastfile, p, opts) qbedfile = op.abspath(qbedfile) sbedfile = op.abspath(sbedfile) qbed = Bed(qbedfile, sorted=False) contig_to_beds = dict(qbed.sub_beds()) # Create a separate directory for the subplots and movie mkdir(odir, overwrite=True) os.chdir(odir) logging.debug("Change into subdir `{}`".format(odir)) # Make anchorsfile anchorsfile = ".".join(op.basename(lastfile).split(".", 2)[:2]) \ + ".anchors" fw = open(anchorsfile, "w") for b in Blast(lastfile): print("\t".join((gene_name(b.query), gene_name(b.subject), str(int(b.score)))), file=fw) fw.close() # Symlink sbed symlink(sbedfile, op.basename(sbedfile)) return anchorsfile, qbedfile, contig_to_beds
def hpx_to_coords(h, shape): """ Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.""" x, z = hpx_to_axes(h, shape) x = np.sqrt(x[0:-1] * x[1:]) z = z[:-1] + 0.5 x = np.ravel(np.ones(shape) * x[:, np.newaxis]) z = np.ravel(np.ones(shape) * z[np.newaxis, :]) return np.vstack((x, z))
Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.
Below is the the instruction that describes the task: ### Input: Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map. ### Response: def hpx_to_coords(h, shape): """ Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.""" x, z = hpx_to_axes(h, shape) x = np.sqrt(x[0:-1] * x[1:]) z = z[:-1] + 0.5 x = np.ravel(np.ones(shape) * x[:, np.newaxis]) z = np.ravel(np.ones(shape) * z[np.newaxis, :]) return np.vstack((x, z))
def getloansurl(idcred, *args, **kwargs): """Request Loans URL. If idcred is set, you'll get a response adequate for a MambuLoan object. If not set, you'll get a response adequate for a MambuLoans object. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * fullDetails * accountState * branchId * centreId * creditOfficerUsername * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future. """ getparams = [] if kwargs: try: if kwargs["fullDetails"] == True: getparams.append("fullDetails=true") else: getparams.append("fullDetails=false") except Exception as ex: pass try: getparams.append("accountState=%s" % kwargs["accountState"]) except Exception as ex: pass try: getparams.append("branchId=%s" % kwargs["branchId"]) except Exception as ex: pass try: getparams.append("centreId=%s" % kwargs["centreId"]) except Exception as ex: pass try: getparams.append("creditOfficerUsername=%s" % kwargs["creditOfficerUsername"]) except Exception as ex: pass try: getparams.append("offset=%s" % kwargs["offset"]) except Exception as ex: pass try: getparams.append("limit=%s" % kwargs["limit"]) except Exception as ex: pass idcredparam = "" if idcred == "" else "/"+idcred url = getmambuurl(*args,**kwargs) + "loans" + idcredparam + ("" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
Request Loans URL. If idcred is set, you'll get a response adequate for a MambuLoan object. If not set, you'll get a response adequate for a MambuLoans object. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * fullDetails * accountState * branchId * centreId * creditOfficerUsername * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future.
Below is the the instruction that describes the task: ### Input: Request Loans URL. If idcred is set, you'll get a response adequate for a MambuLoan object. If not set, you'll get a response adequate for a MambuLoans object. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * fullDetails * accountState * branchId * centreId * creditOfficerUsername * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future. ### Response: def getloansurl(idcred, *args, **kwargs): """Request Loans URL. If idcred is set, you'll get a response adequate for a MambuLoan object. If not set, you'll get a response adequate for a MambuLoans object. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * fullDetails * accountState * branchId * centreId * creditOfficerUsername * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future. """ getparams = [] if kwargs: try: if kwargs["fullDetails"] == True: getparams.append("fullDetails=true") else: getparams.append("fullDetails=false") except Exception as ex: pass try: getparams.append("accountState=%s" % kwargs["accountState"]) except Exception as ex: pass try: getparams.append("branchId=%s" % kwargs["branchId"]) except Exception as ex: pass try: getparams.append("centreId=%s" % kwargs["centreId"]) except Exception as ex: pass try: getparams.append("creditOfficerUsername=%s" % kwargs["creditOfficerUsername"]) except Exception as ex: pass try: getparams.append("offset=%s" % kwargs["offset"]) except Exception as ex: pass try: getparams.append("limit=%s" % kwargs["limit"]) except Exception as ex: pass idcredparam = "" if idcred == "" else "/"+idcred url = getmambuurl(*args,**kwargs) + "loans" + idcredparam + ("" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
def requires_permission(permission): """ View decorator that requires a certain permission to be present in ``current_auth.permissions`` before the view is allowed to proceed. Aborts with ``403 Forbidden`` if the permission is not present. The decorated view will have an ``is_available`` method that can be called to perform the same test. :param permission: Permission that is required. If a collection type is provided, any one permission must be available """ def inner(f): def is_available_here(): if not hasattr(current_auth, 'permissions'): return False elif is_collection(permission): return bool(current_auth.permissions.intersection(permission)) else: return permission in current_auth.permissions def is_available(context=None): result = is_available_here() if result and hasattr(f, 'is_available'): # We passed, but we're wrapping another test, so ask there as well return f.is_available(context) return result @wraps(f) def wrapper(*args, **kwargs): add_auth_attribute('login_required', True) if not is_available_here(): abort(403) return f(*args, **kwargs) wrapper.requires_permission = permission wrapper.is_available = is_available return wrapper return inner
View decorator that requires a certain permission to be present in ``current_auth.permissions`` before the view is allowed to proceed. Aborts with ``403 Forbidden`` if the permission is not present. The decorated view will have an ``is_available`` method that can be called to perform the same test. :param permission: Permission that is required. If a collection type is provided, any one permission must be available
Below is the the instruction that describes the task: ### Input: View decorator that requires a certain permission to be present in ``current_auth.permissions`` before the view is allowed to proceed. Aborts with ``403 Forbidden`` if the permission is not present. The decorated view will have an ``is_available`` method that can be called to perform the same test. :param permission: Permission that is required. If a collection type is provided, any one permission must be available ### Response: def requires_permission(permission): """ View decorator that requires a certain permission to be present in ``current_auth.permissions`` before the view is allowed to proceed. Aborts with ``403 Forbidden`` if the permission is not present. The decorated view will have an ``is_available`` method that can be called to perform the same test. :param permission: Permission that is required. If a collection type is provided, any one permission must be available """ def inner(f): def is_available_here(): if not hasattr(current_auth, 'permissions'): return False elif is_collection(permission): return bool(current_auth.permissions.intersection(permission)) else: return permission in current_auth.permissions def is_available(context=None): result = is_available_here() if result and hasattr(f, 'is_available'): # We passed, but we're wrapping another test, so ask there as well return f.is_available(context) return result @wraps(f) def wrapper(*args, **kwargs): add_auth_attribute('login_required', True) if not is_available_here(): abort(403) return f(*args, **kwargs) wrapper.requires_permission = permission wrapper.is_available = is_available return wrapper return inner
def emit(self, span_datas): """ :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit """ envelopes = [self.span_data_to_envelope(sd) for sd in span_datas] result = self._transmit(envelopes) if result > 0: self.storage.put(envelopes, result)
:type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit
Below is the the instruction that describes the task: ### Input: :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit ### Response: def emit(self, span_datas): """ :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit """ envelopes = [self.span_data_to_envelope(sd) for sd in span_datas] result = self._transmit(envelopes) if result > 0: self.storage.put(envelopes, result)
def __substitute_objects(self, value, context_dict): """ recursively substitute value with the context_dict """ if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn("Could not specialize %s! Error: %s" % (value, e)) return value else: return value
recursively substitute value with the context_dict
Below is the the instruction that describes the task: ### Input: recursively substitute value with the context_dict ### Response: def __substitute_objects(self, value, context_dict): """ recursively substitute value with the context_dict """ if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn("Could not specialize %s! Error: %s" % (value, e)) return value else: return value
def visitAnnotation(self, ctx: ShExDocParser.AnnotationContext): """ annotation: '//' predicate (iri | literal) """ # Annotations apply to the expression, NOT the shape (!) annot = Annotation(self.context.predicate_to_IRI(ctx.predicate())) if ctx.iri(): annot.object = self.context.iri_to_iriref(ctx.iri()) else: annot.object = self.context.literal_to_ObjectLiteral(ctx.literal()) self.annotations.append(annot)
annotation: '//' predicate (iri | literal)
Below is the the instruction that describes the task: ### Input: annotation: '//' predicate (iri | literal) ### Response: def visitAnnotation(self, ctx: ShExDocParser.AnnotationContext): """ annotation: '//' predicate (iri | literal) """ # Annotations apply to the expression, NOT the shape (!) annot = Annotation(self.context.predicate_to_IRI(ctx.predicate())) if ctx.iri(): annot.object = self.context.iri_to_iriref(ctx.iri()) else: annot.object = self.context.literal_to_ObjectLiteral(ctx.literal()) self.annotations.append(annot)
def filter_url(pkg_type, url): """ Returns URL of specified file type 'source', 'egg', or 'all' """ bad_stuff = ["?modtime", "#md5="] for junk in bad_stuff: if junk in url: url = url.split(junk)[0] break #pkg_spec==dev (svn) if url.endswith("-dev"): url = url.split("#egg=")[0] if pkg_type == "all": return url elif pkg_type == "source": valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"] for extension in valid_source_types: if url.lower().endswith(extension): return url elif pkg_type == "egg": if url.lower().endswith(".egg"): return url
Returns URL of specified file type 'source', 'egg', or 'all'
Below is the the instruction that describes the task: ### Input: Returns URL of specified file type 'source', 'egg', or 'all' ### Response: def filter_url(pkg_type, url): """ Returns URL of specified file type 'source', 'egg', or 'all' """ bad_stuff = ["?modtime", "#md5="] for junk in bad_stuff: if junk in url: url = url.split(junk)[0] break #pkg_spec==dev (svn) if url.endswith("-dev"): url = url.split("#egg=")[0] if pkg_type == "all": return url elif pkg_type == "source": valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"] for extension in valid_source_types: if url.lower().endswith(extension): return url elif pkg_type == "egg": if url.lower().endswith(".egg"): return url
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None): """ :param frame: current frame :param birthframe: frame where this animation starts returning something other than None :param startframe: frame where animation starts to evolve :param stopframe: frame where animation is completed :param deathframe: frame where animation starts to return None :return: """ return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
:param frame: current frame :param birthframe: frame where this animation starts returning something other than None :param startframe: frame where animation starts to evolve :param stopframe: frame where animation is completed :param deathframe: frame where animation starts to return None :return:
Below is the the instruction that describes the task: ### Input: :param frame: current frame :param birthframe: frame where this animation starts returning something other than None :param startframe: frame where animation starts to evolve :param stopframe: frame where animation is completed :param deathframe: frame where animation starts to return None :return: ### Response: def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None): """ :param frame: current frame :param birthframe: frame where this animation starts returning something other than None :param startframe: frame where animation starts to evolve :param stopframe: frame where animation is completed :param deathframe: frame where animation starts to return None :return: """ return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
def update(self, identifier, new_instance): """ Update an encryptable field, make sure that: * We won't change the encryption context key * The new value is going to be encrypted * The return instance.plaintext is the updated one Note: Will expunge the returned instance """ old_instance = self.retrieve(identifier) old_encrypted_identifier = old_instance.encrypted_identifier if ( new_instance.encryption_context_key and old_instance.encryption_context_key != new_instance.encryption_context_key ): raise ValueError("Cannot change encryption context key") # If updating a non encrypted field - skip if new_instance.plaintext is None and new_instance.encrypted_relationship is None: result = super().update(identifier, new_instance) self.expunge(result) return result # Verify that the new instance is encrypted if it should be # If it's not - encrypt it with the old key # If it is - save the expected new plaintext if new_instance.plaintext is not None: expected_new_plaintext = new_instance.plaintext new_instance = self.reencrypt_instance(new_instance, old_instance.encryption_context_key) else: decrypt, expected_new_plaintext = decrypt_instance(new_instance) result = super().update(identifier, new_instance) # Delete the old encrypted value (instead of using sqlalchemy cascade) if old_encrypted_identifier != new_instance.encrypted_identifier: self.encrypted_store.delete(old_encrypted_identifier) # Update the return result, super().update() won't do it. self.expunge(result) result.plaintext = expected_new_plaintext return result
Update an encryptable field, make sure that: * We won't change the encryption context key * The new value is going to be encrypted * The return instance.plaintext is the updated one Note: Will expunge the returned instance
Below is the the instruction that describes the task: ### Input: Update an encryptable field, make sure that: * We won't change the encryption context key * The new value is going to be encrypted * The return instance.plaintext is the updated one Note: Will expunge the returned instance ### Response: def update(self, identifier, new_instance): """ Update an encryptable field, make sure that: * We won't change the encryption context key * The new value is going to be encrypted * The return instance.plaintext is the updated one Note: Will expunge the returned instance """ old_instance = self.retrieve(identifier) old_encrypted_identifier = old_instance.encrypted_identifier if ( new_instance.encryption_context_key and old_instance.encryption_context_key != new_instance.encryption_context_key ): raise ValueError("Cannot change encryption context key") # If updating a non encrypted field - skip if new_instance.plaintext is None and new_instance.encrypted_relationship is None: result = super().update(identifier, new_instance) self.expunge(result) return result # Verify that the new instance is encrypted if it should be # If it's not - encrypt it with the old key # If it is - save the expected new plaintext if new_instance.plaintext is not None: expected_new_plaintext = new_instance.plaintext new_instance = self.reencrypt_instance(new_instance, old_instance.encryption_context_key) else: decrypt, expected_new_plaintext = decrypt_instance(new_instance) result = super().update(identifier, new_instance) # Delete the old encrypted value (instead of using sqlalchemy cascade) if old_encrypted_identifier != new_instance.encrypted_identifier: self.encrypted_store.delete(old_encrypted_identifier) # Update the return result, super().update() won't do it. self.expunge(result) result.plaintext = expected_new_plaintext return result
def merge(self): """Merges the rendered blueprint into the application.""" temp_dir = self.temp_dir app_dir = self.application.directory for root, dirs, files in os.walk(temp_dir): for directory in dirs: directory = os.path.join(root, directory) directory = directory.replace(temp_dir, app_dir, 1) try: os.mkdir(directory) except OSError: pass for file in files: source = os.path.join(root, file) target = source.replace(temp_dir, app_dir, 1) relative_target = target.replace(app_dir, '.') action = 'r' if ( os.path.exists(target) and not filecmp.cmp(source, target, shallow=False) and os.stat(target).st_size > 0 ): # target exists, is not empty, and does not # match source if target.endswith('__init__.py'): # default merge __init__.py files # if non-empty, these should only # contain imports from submoduiles action = 'm' elif target.endswith('base.py'): # default skip base.py files # these should be extended by the developer action = 's' else: default = 'm' action = click.prompt( style.prompt( '%s already exists, ' '[r]eplace, [s]kip, or [m]erge?' % ( relative_target ), ), default=style.default(default) ) if self.interactive else default action = click.unstyle(action).lower() if action not in {'r', 'm', 's'}: action = default if action == 's': self.stdout.write( '? %s' % style.white(relative_target), fg='yellow' ) continue if action == 'r': with open(source, 'r') as source_file: with open(target, 'w') as target_file: target_file.write(source_file.read()) self.stdout.write( style.green( '+ %s' % style.white(relative_target) ) ) if action == 'm': with open(target, 'r') as target_file: with open(source, 'r') as source_file: merged = merge( target_file.read(), source_file.read() ) with open(target, 'w') as target_file: target_file.write(merged) self.stdout.write( style.yellow('> %s' % style.white(relative_target)) )
Merges the rendered blueprint into the application.
Below is the the instruction that describes the task: ### Input: Merges the rendered blueprint into the application. ### Response: def merge(self): """Merges the rendered blueprint into the application.""" temp_dir = self.temp_dir app_dir = self.application.directory for root, dirs, files in os.walk(temp_dir): for directory in dirs: directory = os.path.join(root, directory) directory = directory.replace(temp_dir, app_dir, 1) try: os.mkdir(directory) except OSError: pass for file in files: source = os.path.join(root, file) target = source.replace(temp_dir, app_dir, 1) relative_target = target.replace(app_dir, '.') action = 'r' if ( os.path.exists(target) and not filecmp.cmp(source, target, shallow=False) and os.stat(target).st_size > 0 ): # target exists, is not empty, and does not # match source if target.endswith('__init__.py'): # default merge __init__.py files # if non-empty, these should only # contain imports from submoduiles action = 'm' elif target.endswith('base.py'): # default skip base.py files # these should be extended by the developer action = 's' else: default = 'm' action = click.prompt( style.prompt( '%s already exists, ' '[r]eplace, [s]kip, or [m]erge?' % ( relative_target ), ), default=style.default(default) ) if self.interactive else default action = click.unstyle(action).lower() if action not in {'r', 'm', 's'}: action = default if action == 's': self.stdout.write( '? %s' % style.white(relative_target), fg='yellow' ) continue if action == 'r': with open(source, 'r') as source_file: with open(target, 'w') as target_file: target_file.write(source_file.read()) self.stdout.write( style.green( '+ %s' % style.white(relative_target) ) ) if action == 'm': with open(target, 'r') as target_file: with open(source, 'r') as source_file: merged = merge( target_file.read(), source_file.read() ) with open(target, 'w') as target_file: target_file.write(merged) self.stdout.write( style.yellow('> %s' % style.white(relative_target)) )
def sum(self, property): """Getting the sum according to the given property :@param property :@type property: string :@return int/float """ self.__prepare() total = 0 for i in self._json_data: total += i.get(property) return total
Getting the sum according to the given property :@param property :@type property: string :@return int/float
Below is the the instruction that describes the task: ### Input: Getting the sum according to the given property :@param property :@type property: string :@return int/float ### Response: def sum(self, property): """Getting the sum according to the given property :@param property :@type property: string :@return int/float """ self.__prepare() total = 0 for i in self._json_data: total += i.get(property) return total
def get_url_name(self, action_url_name="list"): """ Get full namespaced url name to use for reverse() """ url_name = "{}-{}".format(self.basename, action_url_name) namespace = self.request.resolver_match.namespace if namespace: url_name = "{}:{}".format(namespace, url_name) return url_name
Get full namespaced url name to use for reverse()
Below is the the instruction that describes the task: ### Input: Get full namespaced url name to use for reverse() ### Response: def get_url_name(self, action_url_name="list"): """ Get full namespaced url name to use for reverse() """ url_name = "{}-{}".format(self.basename, action_url_name) namespace = self.request.resolver_match.namespace if namespace: url_name = "{}:{}".format(namespace, url_name) return url_name
def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs): ''' Add a new stream capturer to the manager. Add a new stream capturer to the manager with the provided configuration details. If an existing capturer is monitoring the same address the new handler will be added to it. Args: name: A string defining the new capturer's name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn't provided the root log directory from the manager configuration is used. ''' capture_handler_conf = kwargs if not log_dir_path: log_dir_path = self._mngr_conf['root_log_directory'] log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path)) capture_handler_conf['log_dir'] = log_dir_path capture_handler_conf['name'] = name if 'rotate_log' not in capture_handler_conf: capture_handler_conf['rotate_log'] = True transforms = [] if 'pre_write_transforms' in capture_handler_conf: for transform in capture_handler_conf['pre_write_transforms']: if isinstance(transform, str): if globals().has_key(transform): transforms.append(globals().get(transform)) else: msg = ( 'Unable to load data transformation ' '"{}" for handler "{}"' ).format( transform, capture_handler_conf['name'] ) log.warn(msg) elif hasattr(transform, '__call__'): transforms.append(transform) else: msg = ( 'Unable to determine how to load data transform "{}"' ).format(transform) log.warn(msg) capture_handler_conf['pre_write_transforms'] = transforms address_key = str(address) if address_key in self._stream_capturers: capturer = self._stream_capturers[address_key][0] capturer.add_handler(capture_handler_conf) return socket_logger = SocketStreamCapturer(capture_handler_conf, address, conn_type) greenlet = gevent.spawn(socket_logger.socket_monitor_loop) self._stream_capturers[address_key] = ( socket_logger, greenlet ) self._pool.add(greenlet)
Add a new stream capturer to the manager. Add a new stream capturer to the manager with the provided configuration details. If an existing capturer is monitoring the same address the new handler will be added to it. Args: name: A string defining the new capturer's name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn't provided the root log directory from the manager configuration is used.
Below is the the instruction that describes the task: ### Input: Add a new stream capturer to the manager. Add a new stream capturer to the manager with the provided configuration details. If an existing capturer is monitoring the same address the new handler will be added to it. Args: name: A string defining the new capturer's name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn't provided the root log directory from the manager configuration is used. ### Response: def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs): ''' Add a new stream capturer to the manager. Add a new stream capturer to the manager with the provided configuration details. If an existing capturer is monitoring the same address the new handler will be added to it. Args: name: A string defining the new capturer's name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn't provided the root log directory from the manager configuration is used. ''' capture_handler_conf = kwargs if not log_dir_path: log_dir_path = self._mngr_conf['root_log_directory'] log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path)) capture_handler_conf['log_dir'] = log_dir_path capture_handler_conf['name'] = name if 'rotate_log' not in capture_handler_conf: capture_handler_conf['rotate_log'] = True transforms = [] if 'pre_write_transforms' in capture_handler_conf: for transform in capture_handler_conf['pre_write_transforms']: if isinstance(transform, str): if globals().has_key(transform): transforms.append(globals().get(transform)) else: msg = ( 'Unable to load data transformation ' '"{}" for handler "{}"' ).format( transform, capture_handler_conf['name'] ) log.warn(msg) elif hasattr(transform, '__call__'): transforms.append(transform) else: msg = ( 'Unable to determine how to load data transform "{}"' ).format(transform) log.warn(msg) capture_handler_conf['pre_write_transforms'] = transforms address_key = str(address) if address_key in self._stream_capturers: capturer = self._stream_capturers[address_key][0] capturer.add_handler(capture_handler_conf) return socket_logger = SocketStreamCapturer(capture_handler_conf, address, conn_type) greenlet = gevent.spawn(socket_logger.socket_monitor_loop) self._stream_capturers[address_key] = ( socket_logger, greenlet ) self._pool.add(greenlet)
def enter(self, container_alias): ''' a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None ''' title = '%s.enter' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # compose system command from os import system sys_cmd = 'docker exec -it %s sh' % container_alias if self.localhost.os.sysname in ('Windows'): sys_cmd = 'winpty %s' % sys_cmd # open up terminal system(sys_cmd)
a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None
Below is the the instruction that describes the task: ### Input: a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None ### Response: def enter(self, container_alias): ''' a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None ''' title = '%s.enter' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # compose system command from os import system sys_cmd = 'docker exec -it %s sh' % container_alias if self.localhost.os.sysname in ('Windows'): sys_cmd = 'winpty %s' % sys_cmd # open up terminal system(sys_cmd)
def format_pixels( top, bottom, reset=True, repeat=1 ): """Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1) """ top_src = None if isinstance( top, int ): top_src = top else: top_rgba = colour.normalise_rgba( top ) if top_rgba[3] != 0: top_src = top_rgba bottom_src = None if isinstance( bottom, int ): bottom_src = bottom else: bottom_rgba = colour.normalise_rgba( bottom ) if bottom_rgba[3] != 0: bottom_src = bottom_rgba # short circuit for empty pixel if (top_src is None) and (bottom_src is None): return ' '*repeat string = '▀'*repeat; colour_format = [] if top_src == bottom_src: string = '█'*repeat elif (top_src is None) and (bottom_src is not None): string = '▄'*repeat if (top_src is None) and (bottom_src is not None): if isinstance( bottom_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *bottom_src[:3] ) ) else: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( top_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *top_src[:3] ) ) if top_src is not None and bottom_src is not None and top_src != bottom_src: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_BACKGROUND_CMD.format( *bottom_src[:3] ) ) colour_format = ANSI_FORMAT_BASE.format( ';'.join( colour_format ) ) reset_format = '' if not reset else ANSI_FORMAT_RESET return '{}{}{}'.format( colour_format, string, reset_format )
Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1)
Below is the the instruction that describes the task: ### Input: Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1) ### Response: def format_pixels( top, bottom, reset=True, repeat=1 ): """Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1) """ top_src = None if isinstance( top, int ): top_src = top else: top_rgba = colour.normalise_rgba( top ) if top_rgba[3] != 0: top_src = top_rgba bottom_src = None if isinstance( bottom, int ): bottom_src = bottom else: bottom_rgba = colour.normalise_rgba( bottom ) if bottom_rgba[3] != 0: bottom_src = bottom_rgba # short circuit for empty pixel if (top_src is None) and (bottom_src is None): return ' '*repeat string = '▀'*repeat; colour_format = [] if top_src == bottom_src: string = '█'*repeat elif (top_src is None) and (bottom_src is not None): string = '▄'*repeat if (top_src is None) and (bottom_src is not None): if isinstance( bottom_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *bottom_src[:3] ) ) else: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( top_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *top_src[:3] ) ) if top_src is not None and bottom_src is not None and top_src != bottom_src: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_BACKGROUND_CMD.format( *bottom_src[:3] ) ) colour_format = ANSI_FORMAT_BASE.format( ';'.join( colour_format ) ) reset_format = '' if not reset else ANSI_FORMAT_RESET return '{}{}{}'.format( colour_format, string, reset_format )
def callback(self): '''Run the callback''' self._callback(*self._args, **self._kwargs) self._last_checked = time.time()
Run the callback
Below is the the instruction that describes the task: ### Input: Run the callback ### Response: def callback(self): '''Run the callback''' self._callback(*self._args, **self._kwargs) self._last_checked = time.time()
def propose_unif(self): """Propose a new live point by sampling *uniformly* within the union of ellipsoids.""" while True: # Sample a point from the union of ellipsoids. # Returns the point `u`, ellipsoid index `idx`, and number of # overlapping ellipsoids `q` at position `u`. u, idx, q = self.mell.sample(rstate=self.rstate, return_q=True) # Check if the point is within the unit cube. if unitcheck(u, self.nonperiodic): # Accept the point with probability 1/q to account for # overlapping ellipsoids. if q == 1 or self.rstate.rand() < 1.0 / q: break # if successful, we're done! return u, self.mell.ells[idx].axes
Propose a new live point by sampling *uniformly* within the union of ellipsoids.
Below is the the instruction that describes the task: ### Input: Propose a new live point by sampling *uniformly* within the union of ellipsoids. ### Response: def propose_unif(self): """Propose a new live point by sampling *uniformly* within the union of ellipsoids.""" while True: # Sample a point from the union of ellipsoids. # Returns the point `u`, ellipsoid index `idx`, and number of # overlapping ellipsoids `q` at position `u`. u, idx, q = self.mell.sample(rstate=self.rstate, return_q=True) # Check if the point is within the unit cube. if unitcheck(u, self.nonperiodic): # Accept the point with probability 1/q to account for # overlapping ellipsoids. if q == 1 or self.rstate.rand() < 1.0 / q: break # if successful, we're done! return u, self.mell.ells[idx].axes
def set_windows_permissions(filename): ''' At least on windows 7 if a file is created on an Admin account, Other users will not be given execute or full control. However if a user creates the file himself it will work... So just always change permissions after creating a file on windows Change the permissions for Allusers of the application The Everyone Group Full access http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html ''' #Todo rename this to allow_all, also make international not just for english.. if os.name == 'nt': try: everyone, domain, type = win32security.LookupAccountName( "", "Everyone") except Exception: # Todo fails on non english langauge systesm ... FU WINDOWS # Just allow permission for the current user then... everyone, domain, type = win32security.LookupAccountName ("", win32api.GetUserName()) # ~ user, domain, type = win32security.LookupAccountName ("", win32api.GetUserName()) #~ userx, domain, type = win32security.LookupAccountName ("", "User") #~ usery, domain, type = win32security.LookupAccountName ("", "User Y") sd = win32security.GetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION) # instead of dacl = win32security.ACL() dacl = sd.GetSecurityDescriptorDacl() #~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_GENERIC_READ | con.FILE_GENERIC_WRITE, everyone) #~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_ALL_ACCESS, user) dacl.AddAccessAllowedAce( win32security.ACL_REVISION, con.FILE_ALL_ACCESS, everyone) sd.SetSecurityDescriptorDacl(1, dacl, 0) # may not be necessary win32security.SetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION, sd)
At least on windows 7 if a file is created on an Admin account, Other users will not be given execute or full control. However if a user creates the file himself it will work... So just always change permissions after creating a file on windows Change the permissions for Allusers of the application The Everyone Group Full access http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html
Below is the the instruction that describes the task: ### Input: At least on windows 7 if a file is created on an Admin account, Other users will not be given execute or full control. However if a user creates the file himself it will work... So just always change permissions after creating a file on windows Change the permissions for Allusers of the application The Everyone Group Full access http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html ### Response: def set_windows_permissions(filename): ''' At least on windows 7 if a file is created on an Admin account, Other users will not be given execute or full control. However if a user creates the file himself it will work... So just always change permissions after creating a file on windows Change the permissions for Allusers of the application The Everyone Group Full access http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html ''' #Todo rename this to allow_all, also make international not just for english.. if os.name == 'nt': try: everyone, domain, type = win32security.LookupAccountName( "", "Everyone") except Exception: # Todo fails on non english langauge systesm ... FU WINDOWS # Just allow permission for the current user then... everyone, domain, type = win32security.LookupAccountName ("", win32api.GetUserName()) # ~ user, domain, type = win32security.LookupAccountName ("", win32api.GetUserName()) #~ userx, domain, type = win32security.LookupAccountName ("", "User") #~ usery, domain, type = win32security.LookupAccountName ("", "User Y") sd = win32security.GetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION) # instead of dacl = win32security.ACL() dacl = sd.GetSecurityDescriptorDacl() #~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_GENERIC_READ | con.FILE_GENERIC_WRITE, everyone) #~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_ALL_ACCESS, user) dacl.AddAccessAllowedAce( win32security.ACL_REVISION, con.FILE_ALL_ACCESS, everyone) sd.SetSecurityDescriptorDacl(1, dacl, 0) # may not be necessary win32security.SetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION, sd)
def get_longitude_variables(nc): ''' Returns a list of all variables matching definitions for longitude :param netcdf4.dataset nc: an open netcdf dataset object ''' longitude_variables = [] # standard_name takes precedence for variable in nc.get_variables_by_attributes(standard_name="longitude"): longitude_variables.append(variable.name) # Then axis for variable in nc.get_variables_by_attributes(axis='X'): if variable.name not in longitude_variables: longitude_variables.append(variable.name) check_fn = partial(attr_membership, value_set=VALID_LON_UNITS, modifier_fn=lambda s: s.lower()) for variable in nc.get_variables_by_attributes(units=check_fn): if variable.name not in longitude_variables: longitude_variables.append(variable.name) return longitude_variables
Returns a list of all variables matching definitions for longitude :param netcdf4.dataset nc: an open netcdf dataset object
Below is the the instruction that describes the task: ### Input: Returns a list of all variables matching definitions for longitude :param netcdf4.dataset nc: an open netcdf dataset object ### Response: def get_longitude_variables(nc): ''' Returns a list of all variables matching definitions for longitude :param netcdf4.dataset nc: an open netcdf dataset object ''' longitude_variables = [] # standard_name takes precedence for variable in nc.get_variables_by_attributes(standard_name="longitude"): longitude_variables.append(variable.name) # Then axis for variable in nc.get_variables_by_attributes(axis='X'): if variable.name not in longitude_variables: longitude_variables.append(variable.name) check_fn = partial(attr_membership, value_set=VALID_LON_UNITS, modifier_fn=lambda s: s.lower()) for variable in nc.get_variables_by_attributes(units=check_fn): if variable.name not in longitude_variables: longitude_variables.append(variable.name) return longitude_variables
def ping(self): """ Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string """ msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string
Below is the the instruction that describes the task: ### Input: Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string ### Response: def ping(self): """ Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string """ msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
def default_reset_type(self): """! @brief One of the Target.ResetType enums. @todo Support multiple cores. """ try: resetSequence = self._info.debugs[0].attrib['defaultResetSequence'] if resetSequence == 'ResetHardware': return Target.ResetType.HW elif resetSequence == 'ResetSystem': return Target.ResetType.SW_SYSRESETREQ elif resetSequence == 'ResetProcessor': return Target.ResetType.SW_VECTRESET else: return Target.ResetType.SW except (KeyError, IndexError): return Target.ResetType.SW
! @brief One of the Target.ResetType enums. @todo Support multiple cores.
Below is the the instruction that describes the task: ### Input: ! @brief One of the Target.ResetType enums. @todo Support multiple cores. ### Response: def default_reset_type(self): """! @brief One of the Target.ResetType enums. @todo Support multiple cores. """ try: resetSequence = self._info.debugs[0].attrib['defaultResetSequence'] if resetSequence == 'ResetHardware': return Target.ResetType.HW elif resetSequence == 'ResetSystem': return Target.ResetType.SW_SYSRESETREQ elif resetSequence == 'ResetProcessor': return Target.ResetType.SW_VECTRESET else: return Target.ResetType.SW except (KeyError, IndexError): return Target.ResetType.SW
def close(self): """Cleanly shut down the SSL protocol and close the transport.""" if self._closing or self._handle.closed: return self._closing = True self._write_backlog.append([b'', False]) self._process_write_backlog()
Cleanly shut down the SSL protocol and close the transport.
Below is the the instruction that describes the task: ### Input: Cleanly shut down the SSL protocol and close the transport. ### Response: def close(self): """Cleanly shut down the SSL protocol and close the transport.""" if self._closing or self._handle.closed: return self._closing = True self._write_backlog.append([b'', False]) self._process_write_backlog()
def write_all(self): """ Write out all registered config files. """ [self.write(k) for k in six.iterkeys(self.templates)]
Write out all registered config files.
Below is the the instruction that describes the task: ### Input: Write out all registered config files. ### Response: def write_all(self): """ Write out all registered config files. """ [self.write(k) for k in six.iterkeys(self.templates)]
def set_lan_port(self, port_id, mac=None): """Set LAN port information to configuration. :param port_id: Physical port ID. :param mac: virtual MAC address if virtualization is necessary. """ port_handler = _parse_physical_port_id(port_id) port = self._find_port(port_handler) if port: port_handler.set_lan_port(port, mac) else: self._add_port(port_handler, port_handler.create_lan_port(mac))
Set LAN port information to configuration. :param port_id: Physical port ID. :param mac: virtual MAC address if virtualization is necessary.
Below is the the instruction that describes the task: ### Input: Set LAN port information to configuration. :param port_id: Physical port ID. :param mac: virtual MAC address if virtualization is necessary. ### Response: def set_lan_port(self, port_id, mac=None): """Set LAN port information to configuration. :param port_id: Physical port ID. :param mac: virtual MAC address if virtualization is necessary. """ port_handler = _parse_physical_port_id(port_id) port = self._find_port(port_handler) if port: port_handler.set_lan_port(port, mac) else: self._add_port(port_handler, port_handler.create_lan_port(mac))
def keys(name, basepath='/etc/pki', **kwargs): ''' Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Grab all kwargs to make them available as pillar values # rename them to something hopefully unique to avoid # overriding anything existing pillar_kwargs = {} for key, value in six.iteritems(kwargs): pillar_kwargs['ext_pillar_virt.{0}'.format(key)] = value pillar = __salt__['pillar.ext']({'libvirt': '_'}, pillar_kwargs) paths = { 'serverkey': os.path.join(basepath, 'libvirt', 'private', 'serverkey.pem'), 'servercert': os.path.join(basepath, 'libvirt', 'servercert.pem'), 'clientkey': os.path.join(basepath, 'libvirt', 'private', 'clientkey.pem'), 'clientcert': os.path.join(basepath, 'libvirt', 'clientcert.pem'), 'cacert': os.path.join(basepath, 'CA', 'cacert.pem') } for key in paths: p_key = 'libvirt.{0}.pem'.format(key) if p_key not in pillar: continue if not os.path.exists(os.path.dirname(paths[key])): os.makedirs(os.path.dirname(paths[key])) if os.path.isfile(paths[key]): with salt.utils.files.fopen(paths[key], 'r') as fp_: if salt.utils.stringutils.to_unicode(fp_.read()) != pillar[p_key]: ret['changes'][key] = 'update' else: ret['changes'][key] = 'new' if not ret['changes']: ret['comment'] = 'All keys are correct' elif __opts__['test']: ret['result'] = None ret['comment'] = 'Libvirt keys are set to be updated' ret['changes'] = {} else: for key in ret['changes']: with salt.utils.files.fopen(paths[key], 'w+') as fp_: fp_.write( salt.utils.stringutils.to_str( pillar['libvirt.{0}.pem'.format(key)] ) ) ret['comment'] = 'Updated libvirt certs and keys' return ret
Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0
Below is the the instruction that describes the task: ### Input: Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0 ### Response: def keys(name, basepath='/etc/pki', **kwargs): ''' Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Grab all kwargs to make them available as pillar values # rename them to something hopefully unique to avoid # overriding anything existing pillar_kwargs = {} for key, value in six.iteritems(kwargs): pillar_kwargs['ext_pillar_virt.{0}'.format(key)] = value pillar = __salt__['pillar.ext']({'libvirt': '_'}, pillar_kwargs) paths = { 'serverkey': os.path.join(basepath, 'libvirt', 'private', 'serverkey.pem'), 'servercert': os.path.join(basepath, 'libvirt', 'servercert.pem'), 'clientkey': os.path.join(basepath, 'libvirt', 'private', 'clientkey.pem'), 'clientcert': os.path.join(basepath, 'libvirt', 'clientcert.pem'), 'cacert': os.path.join(basepath, 'CA', 'cacert.pem') } for key in paths: p_key = 'libvirt.{0}.pem'.format(key) if p_key not in pillar: continue if not os.path.exists(os.path.dirname(paths[key])): os.makedirs(os.path.dirname(paths[key])) if os.path.isfile(paths[key]): with salt.utils.files.fopen(paths[key], 'r') as fp_: if salt.utils.stringutils.to_unicode(fp_.read()) != pillar[p_key]: ret['changes'][key] = 'update' else: ret['changes'][key] = 'new' if not ret['changes']: ret['comment'] = 'All keys are correct' elif __opts__['test']: ret['result'] = None ret['comment'] = 'Libvirt keys are set to be updated' ret['changes'] = {} else: for key in ret['changes']: with salt.utils.files.fopen(paths[key], 'w+') as fp_: fp_.write( salt.utils.stringutils.to_str( pillar['libvirt.{0}.pem'.format(key)] ) ) ret['comment'] = 'Updated libvirt certs and keys' return ret
def get_episode_title(episode: Episode) -> int: """Get the episode title. Japanese title is prioritized. """ for title in episode.titles: if title.lang == 'ja': return title.title else: return episode.titles[0].title
Get the episode title. Japanese title is prioritized.
Below is the the instruction that describes the task: ### Input: Get the episode title. Japanese title is prioritized. ### Response: def get_episode_title(episode: Episode) -> int: """Get the episode title. Japanese title is prioritized. """ for title in episode.titles: if title.lang == 'ja': return title.title else: return episode.titles[0].title
def schema(self, sources=None, tables=None, clean=False, force=False, use_pipeline=False): """ Generate destination schemas. :param sources: If specified, build only destination tables for these sources :param tables: If specified, build only these tables :param clean: Delete tables and partitions first :param force: Population tables even if the table isn't empty :param use_pipeline: If True, use the build pipeline to determine columns. If False, :return: True on success. """ from itertools import groupby from operator import attrgetter from ambry.etl import Collect, Head from ambry.orm.exc import NotFoundError self.dstate = self.STATES.BUILDING self.commit() # Workaround for https://github.com/CivicKnowledge/ambry/issues/171 self.log('---- Schema ----') resolved_sources = self._resolve_sources(sources, tables, predicate=lambda s: s.is_processable) if clean: self.dataset.delete_tables_partitions() self.commit() # Group the sources by the destination table name keyfunc = attrgetter('dest_table') for t, table_sources in groupby(sorted(resolved_sources, key=keyfunc), keyfunc): if use_pipeline: for source in table_sources: pl = self.pipeline(source) pl.cast = [ambry.etl.CastSourceColumns] pl.select_partition = [] pl.write = [Head, Collect] pl.final = [] self.log_pipeline(pl) pl.run() pl.phase = 'build_schema' self.log_pipeline(pl) for h, c in zip(pl.write[Collect].headers, pl.write[Collect].rows[1]): c = t.add_column(name=h, datatype=type(c).__name__ if c is not None else 'str', update_existing=True) self.log("Populated destination table '{}' from pipeline '{}'" .format(t.name, pl.name)) else: # Get all of the header names, for each source, associating the header position in the table # with the header, then sort on the postition. This will produce a stream of header names # that may have duplicates, but which is generally in the order the headers appear in the # sources. The duplicates are properly handled when we add the columns in add_column() self.commit() def source_cols(source): if source.is_partition and not source.source_table_exists: return enumerate(source.partition.table.columns) else: return enumerate(source.source_table.columns) columns = sorted(set([(i, col.dest_header, col.datatype, col.description, col.has_codes) for source in table_sources for i, col in source_cols(source)])) initial_count = len(t.columns) for pos, name, datatype, desc, has_codes in columns: kwds = dict( name=name, datatype=datatype, description=desc, update_existing=True ) try: extant = t.column(name) except NotFoundError: extant = None if extant is None or not extant.description: kwds['description'] = desc c = t.add_column(**kwds) final_count = len(t.columns) if final_count > initial_count: diff = final_count - initial_count self.log("Populated destination table '{}' from source table '{}' with {} columns" .format(t.name, source.source_table.name, diff)) self.commit() return True
Generate destination schemas. :param sources: If specified, build only destination tables for these sources :param tables: If specified, build only these tables :param clean: Delete tables and partitions first :param force: Population tables even if the table isn't empty :param use_pipeline: If True, use the build pipeline to determine columns. If False, :return: True on success.
Below is the the instruction that describes the task: ### Input: Generate destination schemas. :param sources: If specified, build only destination tables for these sources :param tables: If specified, build only these tables :param clean: Delete tables and partitions first :param force: Population tables even if the table isn't empty :param use_pipeline: If True, use the build pipeline to determine columns. If False, :return: True on success. ### Response: def schema(self, sources=None, tables=None, clean=False, force=False, use_pipeline=False): """ Generate destination schemas. :param sources: If specified, build only destination tables for these sources :param tables: If specified, build only these tables :param clean: Delete tables and partitions first :param force: Population tables even if the table isn't empty :param use_pipeline: If True, use the build pipeline to determine columns. If False, :return: True on success. """ from itertools import groupby from operator import attrgetter from ambry.etl import Collect, Head from ambry.orm.exc import NotFoundError self.dstate = self.STATES.BUILDING self.commit() # Workaround for https://github.com/CivicKnowledge/ambry/issues/171 self.log('---- Schema ----') resolved_sources = self._resolve_sources(sources, tables, predicate=lambda s: s.is_processable) if clean: self.dataset.delete_tables_partitions() self.commit() # Group the sources by the destination table name keyfunc = attrgetter('dest_table') for t, table_sources in groupby(sorted(resolved_sources, key=keyfunc), keyfunc): if use_pipeline: for source in table_sources: pl = self.pipeline(source) pl.cast = [ambry.etl.CastSourceColumns] pl.select_partition = [] pl.write = [Head, Collect] pl.final = [] self.log_pipeline(pl) pl.run() pl.phase = 'build_schema' self.log_pipeline(pl) for h, c in zip(pl.write[Collect].headers, pl.write[Collect].rows[1]): c = t.add_column(name=h, datatype=type(c).__name__ if c is not None else 'str', update_existing=True) self.log("Populated destination table '{}' from pipeline '{}'" .format(t.name, pl.name)) else: # Get all of the header names, for each source, associating the header position in the table # with the header, then sort on the postition. This will produce a stream of header names # that may have duplicates, but which is generally in the order the headers appear in the # sources. The duplicates are properly handled when we add the columns in add_column() self.commit() def source_cols(source): if source.is_partition and not source.source_table_exists: return enumerate(source.partition.table.columns) else: return enumerate(source.source_table.columns) columns = sorted(set([(i, col.dest_header, col.datatype, col.description, col.has_codes) for source in table_sources for i, col in source_cols(source)])) initial_count = len(t.columns) for pos, name, datatype, desc, has_codes in columns: kwds = dict( name=name, datatype=datatype, description=desc, update_existing=True ) try: extant = t.column(name) except NotFoundError: extant = None if extant is None or not extant.description: kwds['description'] = desc c = t.add_column(**kwds) final_count = len(t.columns) if final_count > initial_count: diff = final_count - initial_count self.log("Populated destination table '{}' from source table '{}' with {} columns" .format(t.name, source.source_table.name, diff)) self.commit() return True
def update(self, attributes=None): """ Updates the entry with attributes. """ if attributes is None: attributes = {} attributes['content_type_id'] = self.sys['content_type'].id return super(Entry, self).update(attributes)
Updates the entry with attributes.
Below is the the instruction that describes the task: ### Input: Updates the entry with attributes. ### Response: def update(self, attributes=None): """ Updates the entry with attributes. """ if attributes is None: attributes = {} attributes['content_type_id'] = self.sys['content_type'].id return super(Entry, self).update(attributes)
def time(value): """ Returns a time literal if value is likely coercible to a time Parameters ---------- value : time value as string Returns -------- result : TimeScalar """ if isinstance(value, str): value = to_time(value) return literal(value, type=dt.time)
Returns a time literal if value is likely coercible to a time Parameters ---------- value : time value as string Returns -------- result : TimeScalar
Below is the the instruction that describes the task: ### Input: Returns a time literal if value is likely coercible to a time Parameters ---------- value : time value as string Returns -------- result : TimeScalar ### Response: def time(value): """ Returns a time literal if value is likely coercible to a time Parameters ---------- value : time value as string Returns -------- result : TimeScalar """ if isinstance(value, str): value = to_time(value) return literal(value, type=dt.time)
def _fmt_structured(d): """Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2' Output is lexically sorted, *except* the time and pid always come first, to assist with human scanning of the data. """ timeEntry = datetime.datetime.utcnow().strftime( "time=%Y-%m-%dT%H:%M:%S.%f-00") pidEntry = "pid=" + str(os.getpid()) rest = sorted('='.join([str(k), str(v)]) for (k, v) in list(d.items())) return ' '.join([timeEntry, pidEntry] + rest)
Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2' Output is lexically sorted, *except* the time and pid always come first, to assist with human scanning of the data.
Below is the the instruction that describes the task: ### Input: Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2' Output is lexically sorted, *except* the time and pid always come first, to assist with human scanning of the data. ### Response: def _fmt_structured(d): """Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2' Output is lexically sorted, *except* the time and pid always come first, to assist with human scanning of the data. """ timeEntry = datetime.datetime.utcnow().strftime( "time=%Y-%m-%dT%H:%M:%S.%f-00") pidEntry = "pid=" + str(os.getpid()) rest = sorted('='.join([str(k), str(v)]) for (k, v) in list(d.items())) return ' '.join([timeEntry, pidEntry] + rest)
def execute(self, conn, primary_ds_name="", primary_ds_type="", transaction=False): """ Lists all primary datasets if pattern is not provided. """ sql = self.sql binds = {} #import pdb #pdb.set_trace() if primary_ds_name and primary_ds_type in ('', None, '%'): op = ("=", "like")["%" in primary_ds_name] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name" % op binds.update(primary_ds_name=primary_ds_name) elif primary_ds_type and primary_ds_name in ('', None, '%'): op = ("=", "like")["%" in primary_ds_type] sql += "WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type" % op binds.update(primary_ds_type=primary_ds_type) elif primary_ds_name and primary_ds_type: op = ("=", "like")["%" in primary_ds_name] op1 = ("=", "like")["%" in primary_ds_type] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type"\ %(op, op1) binds.update(primary_ds_name=primary_ds_name) binds.update(primary_ds_type=primary_ds_type) else: pass cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
Lists all primary datasets if pattern is not provided.
Below is the the instruction that describes the task: ### Input: Lists all primary datasets if pattern is not provided. ### Response: def execute(self, conn, primary_ds_name="", primary_ds_type="", transaction=False): """ Lists all primary datasets if pattern is not provided. """ sql = self.sql binds = {} #import pdb #pdb.set_trace() if primary_ds_name and primary_ds_type in ('', None, '%'): op = ("=", "like")["%" in primary_ds_name] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name" % op binds.update(primary_ds_name=primary_ds_name) elif primary_ds_type and primary_ds_name in ('', None, '%'): op = ("=", "like")["%" in primary_ds_type] sql += "WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type" % op binds.update(primary_ds_type=primary_ds_type) elif primary_ds_name and primary_ds_type: op = ("=", "like")["%" in primary_ds_name] op1 = ("=", "like")["%" in primary_ds_type] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type"\ %(op, op1) binds.update(primary_ds_name=primary_ds_name) binds.update(primary_ds_type=primary_ds_type) else: pass cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
def batch_parameters(self, saa, sza, p, x, y, g, s, z): """Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m)""" self.saa_list = saa self.sza_list = sza self.p_list = p self.x_list = x self.y_list = y self.g_list = g self.s_list = s self.z_list = z
Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m)
Below is the the instruction that describes the task: ### Input: Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m) ### Response: def batch_parameters(self, saa, sza, p, x, y, g, s, z): """Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m)""" self.saa_list = saa self.sza_list = sza self.p_list = p self.x_list = x self.y_list = y self.g_list = g self.s_list = s self.z_list = z
def set_random_starting_grid(lfe): """ generate a random grid for game of life using a set of patterns (just to make it interesting) """ cls_patterns = mod_grid.GameOfLifePatterns(25) print(cls_patterns) exit(0) patterns = cls_patterns.get_patterns() for pattern in patterns: lfe.set_tile(pattern[0], pattern[1], 1)
generate a random grid for game of life using a set of patterns (just to make it interesting)
Below is the the instruction that describes the task: ### Input: generate a random grid for game of life using a set of patterns (just to make it interesting) ### Response: def set_random_starting_grid(lfe): """ generate a random grid for game of life using a set of patterns (just to make it interesting) """ cls_patterns = mod_grid.GameOfLifePatterns(25) print(cls_patterns) exit(0) patterns = cls_patterns.get_patterns() for pattern in patterns: lfe.set_tile(pattern[0], pattern[1], 1)
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): """Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram """ from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] elif len(dimension) > 1: layout = Layout(hists) else: layout = hists[0] return layout
Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram
Below is the the instruction that describes the task: ### Input: Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram ### Response: def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): """Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram """ from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] elif len(dimension) > 1: layout = Layout(hists) else: layout = hists[0] return layout
def CollectData(self): """Return some current samples. Call StartDataCollection() first. """ while 1: # loop until we get data or a timeout _bytes = self._ReadPacket() if not _bytes: return None if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F: logging.warning("Wanted data, dropped type=0x%02x, len=%d", _bytes[0], len(_bytes)) continue seq, _type, x, y = struct.unpack("BBBB", _bytes[:4]) data = [ struct.unpack(">hhhh", _bytes[x:x + 8]) for x in range(4, len(_bytes) - 8, 8) ] if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF: logging.warning("Data sequence skipped, lost packet?") self._last_seq = seq if _type == 0: if not self._coarse_scale or not self._fine_scale: logging.warning( "Waiting for calibration, dropped data packet.") continue out = [] for main, usb, aux, voltage in data: if main & 1: coarse = ((main & ~1) - self._coarse_zero) out.append(coarse * self._coarse_scale) else: out.append((main - self._fine_zero) * self._fine_scale) return out elif _type == 1: self._fine_zero = data[0][0] self._coarse_zero = data[1][0] elif _type == 2: self._fine_ref = data[0][0] self._coarse_ref = data[1][0] else: logging.warning("Discarding data packet type=0x%02x", _type) continue # See http://wiki/Main/MonsoonProtocol for details on these values. if self._coarse_ref != self._coarse_zero: self._coarse_scale = 2.88 / ( self._coarse_ref - self._coarse_zero) if self._fine_ref != self._fine_zero: self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
Return some current samples. Call StartDataCollection() first.
Below is the the instruction that describes the task: ### Input: Return some current samples. Call StartDataCollection() first. ### Response: def CollectData(self): """Return some current samples. Call StartDataCollection() first. """ while 1: # loop until we get data or a timeout _bytes = self._ReadPacket() if not _bytes: return None if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F: logging.warning("Wanted data, dropped type=0x%02x, len=%d", _bytes[0], len(_bytes)) continue seq, _type, x, y = struct.unpack("BBBB", _bytes[:4]) data = [ struct.unpack(">hhhh", _bytes[x:x + 8]) for x in range(4, len(_bytes) - 8, 8) ] if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF: logging.warning("Data sequence skipped, lost packet?") self._last_seq = seq if _type == 0: if not self._coarse_scale or not self._fine_scale: logging.warning( "Waiting for calibration, dropped data packet.") continue out = [] for main, usb, aux, voltage in data: if main & 1: coarse = ((main & ~1) - self._coarse_zero) out.append(coarse * self._coarse_scale) else: out.append((main - self._fine_zero) * self._fine_scale) return out elif _type == 1: self._fine_zero = data[0][0] self._coarse_zero = data[1][0] elif _type == 2: self._fine_ref = data[0][0] self._coarse_ref = data[1][0] else: logging.warning("Discarding data packet type=0x%02x", _type) continue # See http://wiki/Main/MonsoonProtocol for details on these values. if self._coarse_ref != self._coarse_zero: self._coarse_scale = 2.88 / ( self._coarse_ref - self._coarse_zero) if self._fine_ref != self._fine_zero: self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
def __retrieve(self, key): ''' Retrieve file location from cache DB ''' with self.get_conn() as conn: try: c = conn.cursor() if key is None: c.execute("SELECT value FROM cache_entries WHERE key IS NULL") else: c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,)) result = c.fetchone() if result is None or len(result) != 1: getLogger().info("There's no entry with key={key}".format(key=key)) return None else: return result[0] except: getLogger().exception("Cannot retrieve") return None
Retrieve file location from cache DB
Below is the the instruction that describes the task: ### Input: Retrieve file location from cache DB ### Response: def __retrieve(self, key): ''' Retrieve file location from cache DB ''' with self.get_conn() as conn: try: c = conn.cursor() if key is None: c.execute("SELECT value FROM cache_entries WHERE key IS NULL") else: c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,)) result = c.fetchone() if result is None or len(result) != 1: getLogger().info("There's no entry with key={key}".format(key=key)) return None else: return result[0] except: getLogger().exception("Cannot retrieve") return None
def setup_logging(): """Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages. """ fmt = 'DBG<0>%(pathname)s:%(lineno)d %(funcName)s: %(message)s' handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(fmt)) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) root_logger.addHandler(handler_stderr)
Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages.
Below is the the instruction that describes the task: ### Input: Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages. ### Response: def setup_logging(): """Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages. """ fmt = 'DBG<0>%(pathname)s:%(lineno)d %(funcName)s: %(message)s' handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(fmt)) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) root_logger.addHandler(handler_stderr)
def run(self, world: int) -> IO: """Run IO Action""" filename, func = self._get_value() f = self.open_func(filename) action = func(f.read()) return action(world=world + 1)
Run IO Action
Below is the the instruction that describes the task: ### Input: Run IO Action ### Response: def run(self, world: int) -> IO: """Run IO Action""" filename, func = self._get_value() f = self.open_func(filename) action = func(f.read()) return action(world=world + 1)
def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled.""" raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
Raise an exception that this node has not been handled.
Below is the the instruction that describes the task: ### Input: Raise an exception that this node has not been handled. ### Response: def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled.""" raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
def profile_create(name, config=None, devices=None, description=None, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10 ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) config, devices = normalize_input_values( config, devices ) try: profile = client.profiles.create(name, config, devices) except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) if description is not None: profile.description = description pylxd_save_object(profile) return _pylxd_model_to_dict(profile)
Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10
Below is the the instruction that describes the task: ### Input: Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10 ### Response: def profile_create(name, config=None, devices=None, description=None, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10 ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) config, devices = normalize_input_values( config, devices ) try: profile = client.profiles.create(name, config, devices) except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) if description is not None: profile.description = description pylxd_save_object(profile) return _pylxd_model_to_dict(profile)
def centroids(self, window_size=5): """ Detects peaks in raw data. :param mzs: sorted array of m/z values :param intensities: array of corresponding intensities :param window_size: size of m/z averaging window :returns: isotope pattern containing the centroids :rtype: CentroidedSpectrum """ self.sortByMass() mzs = _cffi_buffer(self.masses, 'd') intensities = _cffi_buffer(self.intensities, 'f') n = self.size p = ims.spectrum_new_from_raw(n, mzs.ptr, intensities.ptr, int(window_size)) return _new_spectrum(CentroidedSpectrum, p)
Detects peaks in raw data. :param mzs: sorted array of m/z values :param intensities: array of corresponding intensities :param window_size: size of m/z averaging window :returns: isotope pattern containing the centroids :rtype: CentroidedSpectrum
Below is the the instruction that describes the task: ### Input: Detects peaks in raw data. :param mzs: sorted array of m/z values :param intensities: array of corresponding intensities :param window_size: size of m/z averaging window :returns: isotope pattern containing the centroids :rtype: CentroidedSpectrum ### Response: def centroids(self, window_size=5): """ Detects peaks in raw data. :param mzs: sorted array of m/z values :param intensities: array of corresponding intensities :param window_size: size of m/z averaging window :returns: isotope pattern containing the centroids :rtype: CentroidedSpectrum """ self.sortByMass() mzs = _cffi_buffer(self.masses, 'd') intensities = _cffi_buffer(self.intensities, 'f') n = self.size p = ims.spectrum_new_from_raw(n, mzs.ptr, intensities.ptr, int(window_size)) return _new_spectrum(CentroidedSpectrum, p)
def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response('command_error') if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
Write data into the device input buffer. :param data: single element byte :type data: bytes
Below is the the instruction that describes the task: ### Input: Write data into the device input buffer. :param data: single element byte :type data: bytes ### Response: def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response('command_error') if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
def is_time_included(self, time): """Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period """ if self._timestamps_data is None: self._calculate_timestamps() # time filtering in Ladybug Tools is slightly different than "normal" # filtering since start hour and end hour will be applied for every day. # For instance 2/20 9am to 2/22 5pm means hour between 9-17 # during 20, 21 and 22 of Feb. return time.moy in self._timestamps_data
Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period
Below is the the instruction that describes the task: ### Input: Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period ### Response: def is_time_included(self, time): """Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period """ if self._timestamps_data is None: self._calculate_timestamps() # time filtering in Ladybug Tools is slightly different than "normal" # filtering since start hour and end hour will be applied for every day. # For instance 2/20 9am to 2/22 5pm means hour between 9-17 # during 20, 21 and 22 of Feb. return time.moy in self._timestamps_data
def get_all_locations(self, timeout: int=None): """Get a list of all locations Parameters ---------- timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.LOCATIONS return self._get_model(url, timeout=timeout)
Get a list of all locations Parameters ---------- timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
Below is the the instruction that describes the task: ### Input: Get a list of all locations Parameters ---------- timeout: Optional[int] = None Custom timeout that overwrites Client.timeout ### Response: def get_all_locations(self, timeout: int=None): """Get a list of all locations Parameters ---------- timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.LOCATIONS return self._get_model(url, timeout=timeout)
def read_csv(text, sep="\t"): """Create a DataFrame from CSV text""" import pandas as pd # no top level load to make a faster import of db return pd.read_csv(StringIO(text), sep="\t")
Create a DataFrame from CSV text
Below is the the instruction that describes the task: ### Input: Create a DataFrame from CSV text ### Response: def read_csv(text, sep="\t"): """Create a DataFrame from CSV text""" import pandas as pd # no top level load to make a faster import of db return pd.read_csv(StringIO(text), sep="\t")
def run(self, files, working_area): """ Run checks concurrently. Returns a list of CheckResults ordered by declaration order of the checks in the imported module """ # Ensure that dictionary is ordered by check declaration order (via self.check_names) # NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict. results = {name: None for name in self.check_names} checks_root = working_area.parent with futures.ProcessPoolExecutor() as executor: # Start all checks that have no dependencies not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root)) for name, _ in self.child_map[None]) not_passed = [] while not_done: done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED) for future in done: # Get result from completed check result, state = future.result() results[result.name] = result if result.passed: # Dispatch dependent checks for child_name, _ in self.child_map[result.name]: not_done.add(executor.submit( run_check(child_name, self.checks_spec, checks_root, state))) else: not_passed.append(result.name) for name in not_passed: self._skip_children(name, results) return results.values()
Run checks concurrently. Returns a list of CheckResults ordered by declaration order of the checks in the imported module
Below is the the instruction that describes the task: ### Input: Run checks concurrently. Returns a list of CheckResults ordered by declaration order of the checks in the imported module ### Response: def run(self, files, working_area): """ Run checks concurrently. Returns a list of CheckResults ordered by declaration order of the checks in the imported module """ # Ensure that dictionary is ordered by check declaration order (via self.check_names) # NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict. results = {name: None for name in self.check_names} checks_root = working_area.parent with futures.ProcessPoolExecutor() as executor: # Start all checks that have no dependencies not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root)) for name, _ in self.child_map[None]) not_passed = [] while not_done: done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED) for future in done: # Get result from completed check result, state = future.result() results[result.name] = result if result.passed: # Dispatch dependent checks for child_name, _ in self.child_map[result.name]: not_done.add(executor.submit( run_check(child_name, self.checks_spec, checks_root, state))) else: not_passed.append(result.name) for name in not_passed: self._skip_children(name, results) return results.values()
def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\n', encoding='utf-8', skiprows=0): """fileobj can be a StringIO in Py3, but should be a BytesIO in Py2.""" # Python 3 version if sys.version_info[0] >= 3: # Next, get the csv reader, with unicode delimiter and quotechar csv_reader = csv.reader(fileobj, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator) # Now, return the (already decoded) unicode csv_reader generator # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield row # Python 2 version else: # Next, get the csv reader, passing delimiter and quotechar as # bytestrings rather than unicode csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator) # Iterate over the file and decode each string into unicode # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield [cell.decode(encoding) for cell in row]
fileobj can be a StringIO in Py3, but should be a BytesIO in Py2.
Below is the the instruction that describes the task: ### Input: fileobj can be a StringIO in Py3, but should be a BytesIO in Py2. ### Response: def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\n', encoding='utf-8', skiprows=0): """fileobj can be a StringIO in Py3, but should be a BytesIO in Py2.""" # Python 3 version if sys.version_info[0] >= 3: # Next, get the csv reader, with unicode delimiter and quotechar csv_reader = csv.reader(fileobj, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator) # Now, return the (already decoded) unicode csv_reader generator # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield row # Python 2 version else: # Next, get the csv reader, passing delimiter and quotechar as # bytestrings rather than unicode csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator) # Iterate over the file and decode each string into unicode # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield [cell.decode(encoding) for cell in row]
def _pnorm_diagweight(x, p, w): """Diagonally weighted p-norm implementation.""" # Ravel both in the same order (w is a numpy array) order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C' # This is faster than first applying the weights and then summing with # BLAS dot or nrm2 xp = np.abs(x.data.ravel(order)) if p == float('inf'): xp *= w.ravel(order) return np.max(xp) else: xp = np.power(xp, p, out=xp) xp *= w.ravel(order) return np.sum(xp) ** (1 / p)
Diagonally weighted p-norm implementation.
Below is the the instruction that describes the task: ### Input: Diagonally weighted p-norm implementation. ### Response: def _pnorm_diagweight(x, p, w): """Diagonally weighted p-norm implementation.""" # Ravel both in the same order (w is a numpy array) order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C' # This is faster than first applying the weights and then summing with # BLAS dot or nrm2 xp = np.abs(x.data.ravel(order)) if p == float('inf'): xp *= w.ravel(order) return np.max(xp) else: xp = np.power(xp, p, out=xp) xp *= w.ravel(order) return np.sum(xp) ** (1 / p)
def upload_image(self, subreddit, image_path, name=None, header=False, upload_as=None): """Upload an image to the subreddit. :param image_path: A path to the jpg or png image you want to upload. :param name: The name to provide the image. When None the name will be filename less any extension. :param header: When True, upload the image as the subreddit header. :param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this will match the format of the image itself. In all cases where both this value and the image format is not png, reddit will also convert the image mode to RGBA. reddit optimizes the image according to this value. :returns: A link to the uploaded image. Raises an exception otherwise. """ if name and header: raise TypeError('Both name and header cannot be set.') if upload_as not in (None, 'png', 'jpg'): raise TypeError("upload_as must be 'jpg', 'png', or None.") with open(image_path, 'rb') as image: image_type = upload_as or _image_type(image) data = {'r': six.text_type(subreddit), 'img_type': image_type} if header: data['header'] = 1 else: if not name: name = os.path.splitext(os.path.basename(image.name))[0] data['name'] = name response = json.loads(self._request( self.config['upload_image'], data=data, files={'file': image}, method=to_native_string('POST'), retry_on_error=False)) if response['errors']: raise errors.APIException(response['errors'], None) return response['img_src']
Upload an image to the subreddit. :param image_path: A path to the jpg or png image you want to upload. :param name: The name to provide the image. When None the name will be filename less any extension. :param header: When True, upload the image as the subreddit header. :param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this will match the format of the image itself. In all cases where both this value and the image format is not png, reddit will also convert the image mode to RGBA. reddit optimizes the image according to this value. :returns: A link to the uploaded image. Raises an exception otherwise.
Below is the the instruction that describes the task: ### Input: Upload an image to the subreddit. :param image_path: A path to the jpg or png image you want to upload. :param name: The name to provide the image. When None the name will be filename less any extension. :param header: When True, upload the image as the subreddit header. :param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this will match the format of the image itself. In all cases where both this value and the image format is not png, reddit will also convert the image mode to RGBA. reddit optimizes the image according to this value. :returns: A link to the uploaded image. Raises an exception otherwise. ### Response: def upload_image(self, subreddit, image_path, name=None, header=False, upload_as=None): """Upload an image to the subreddit. :param image_path: A path to the jpg or png image you want to upload. :param name: The name to provide the image. When None the name will be filename less any extension. :param header: When True, upload the image as the subreddit header. :param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this will match the format of the image itself. In all cases where both this value and the image format is not png, reddit will also convert the image mode to RGBA. reddit optimizes the image according to this value. :returns: A link to the uploaded image. Raises an exception otherwise. """ if name and header: raise TypeError('Both name and header cannot be set.') if upload_as not in (None, 'png', 'jpg'): raise TypeError("upload_as must be 'jpg', 'png', or None.") with open(image_path, 'rb') as image: image_type = upload_as or _image_type(image) data = {'r': six.text_type(subreddit), 'img_type': image_type} if header: data['header'] = 1 else: if not name: name = os.path.splitext(os.path.basename(image.name))[0] data['name'] = name response = json.loads(self._request( self.config['upload_image'], data=data, files={'file': image}, method=to_native_string('POST'), retry_on_error=False)) if response['errors']: raise errors.APIException(response['errors'], None) return response['img_src']
def get_departures(self, stop_id, route, destination, api_key): """Get the latest data from Transport NSW.""" self.stop_id = stop_id self.route = route self.destination = destination self.api_key = api_key # Build the URL including the STOP_ID and the API key url = \ 'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' \ 'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' \ 'mode=direct&type_dm=stop&name_dm=' \ + self.stop_id \ + '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42' auth = 'apikey ' + self.api_key header = {'Accept': 'application/json', 'Authorization': auth} # Send query or return error try: response = requests.get(url, headers=header, timeout=10) except: logger.warning("Network or Timeout error") return self.info # If there is no valid request if response.status_code != 200: logger.warning("Error with the request sent; check api key") return self.info # Parse the result as a JSON object result = response.json() # If there is no stop events for the query try: result['stopEvents'] except KeyError: logger.warning("No stop events for this query") return self.info # Set variables maxresults = 1 monitor = [] if self.destination != '': for i in range(len(result['stopEvents'])): destination = result['stopEvents'][i]['transportation']['destination']['name'] if destination == self.destination: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break elif self.route != '': # Find the next stop events for a specific route for i in range(len(result['stopEvents'])): number = result['stopEvents'][i]['transportation']['number'] if number == self.route: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break else: # No route defined, find any route leaving next for i in range(0, maxresults): event = self.parseEvent(result, i) if event != None: monitor.append(event) if monitor: self.info = { ATTR_STOP_ID: self.stop_id, ATTR_ROUTE: monitor[0][0], ATTR_DUE_IN: monitor[0][1], ATTR_DELAY: monitor[0][2], ATTR_REALTIME: monitor[0][5], ATTR_DESTINATION: monitor[0][6], ATTR_MODE: monitor[0][7] } return self.info
Get the latest data from Transport NSW.
Below is the the instruction that describes the task: ### Input: Get the latest data from Transport NSW. ### Response: def get_departures(self, stop_id, route, destination, api_key): """Get the latest data from Transport NSW.""" self.stop_id = stop_id self.route = route self.destination = destination self.api_key = api_key # Build the URL including the STOP_ID and the API key url = \ 'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' \ 'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' \ 'mode=direct&type_dm=stop&name_dm=' \ + self.stop_id \ + '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42' auth = 'apikey ' + self.api_key header = {'Accept': 'application/json', 'Authorization': auth} # Send query or return error try: response = requests.get(url, headers=header, timeout=10) except: logger.warning("Network or Timeout error") return self.info # If there is no valid request if response.status_code != 200: logger.warning("Error with the request sent; check api key") return self.info # Parse the result as a JSON object result = response.json() # If there is no stop events for the query try: result['stopEvents'] except KeyError: logger.warning("No stop events for this query") return self.info # Set variables maxresults = 1 monitor = [] if self.destination != '': for i in range(len(result['stopEvents'])): destination = result['stopEvents'][i]['transportation']['destination']['name'] if destination == self.destination: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break elif self.route != '': # Find the next stop events for a specific route for i in range(len(result['stopEvents'])): number = result['stopEvents'][i]['transportation']['number'] if number == self.route: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break else: # No route defined, find any route leaving next for i in range(0, maxresults): event = self.parseEvent(result, i) if event != None: monitor.append(event) if monitor: self.info = { ATTR_STOP_ID: self.stop_id, ATTR_ROUTE: monitor[0][0], ATTR_DUE_IN: monitor[0][1], ATTR_DELAY: monitor[0][2], ATTR_REALTIME: monitor[0][5], ATTR_DESTINATION: monitor[0][6], ATTR_MODE: monitor[0][7] } return self.info
def linkify(self, modules): """Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None """ logger.debug("Linkify %s with %s", self, modules) self.linkify_s_by_module(modules)
Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None
Below is the the instruction that describes the task: ### Input: Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None ### Response: def linkify(self, modules): """Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None """ logger.debug("Linkify %s with %s", self, modules) self.linkify_s_by_module(modules)
def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """ # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations
Below is the the instruction that describes the task: ### Input: Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations ### Response: def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """ # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
def _count(dicts): """ Merge a list of dicts, summing their values. """ counts = defaultdict(int) for d in dicts: for k, v in d.items(): counts[k] += v return counts
Merge a list of dicts, summing their values.
Below is the the instruction that describes the task: ### Input: Merge a list of dicts, summing their values. ### Response: def _count(dicts): """ Merge a list of dicts, summing their values. """ counts = defaultdict(int) for d in dicts: for k, v in d.items(): counts[k] += v return counts
def iplot_hys(fignum, B, M, s): """ function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name """ if fignum != 0: plt.figure(num=fignum) plt.clf() hpars = {} # close up loop Npts = len(M) B70 = 0.7 * B[0] # 70 percent of maximum field for b in B: if b < B70: break Nint = B.index(b) - 1 if Nint > 30: Nint = 30 if Nint < 10: Nint = 10 Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], [] Mazero = "" m_init = 0.5 * (M[0] + M[1]) m_fin = 0.5 * (M[-1] + M[-2]) diff = m_fin - m_init Bmin = 0. for k in range(Npts): frac = old_div(float(k), float(Npts - 1)) Mfix.append((M[k] - diff * frac)) if Bzero == "" and B[k] < 0: Bzero = k if B[k] < Bmin: Bmin = B[k] kmin = k # adjust slope with first 30 data points (throwing out first 3) Bslop = B[2:Nint + 2] Mslop = Mfix[2:Nint + 2] polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points # adjust slope with first 30 points of ascending branch Bslop = B[kmin:kmin + (Nint + 1)] Mslop = Mfix[kmin:kmin + (Nint + 1)] polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes # convert B to A/m, high field slope in m^3 hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7) meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence Moff = [] for k in range(Npts): # take out linear slope and offset (makes symmetric about origin) Moff.append((Mfix[k] - xhf * B[k] - meanint)) if Mzero == "" and Moff[k] < 0: Mzero = k if Mzero != "" and Mazero == "" and Moff[k] > 0: Mazero = k hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2 # # split into upper and lower loops for splining Mupper, Bupper, Mlower, Blower = [], [], [], [] deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm for k in range(kmin - 2, 0, -2): Mupper.append(old_div(Moff[k], Msat)) Bupper.append(B[k]) for k in range(kmin + 2, len(B)-1): Mlower.append(Moff[k] / Msat) Blower.append(B[k]) Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down Ilower = spline.Spline(Blower, Mlower) # get splines for lower for b in np.arange(B[0]): # get range of field values Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B Mneg = ((Iupper(-b) - Ilower(-b))) Bdm.append(b) deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M print('whew') for k in range(Npts): MadjN.append(old_div(Moff[k], Msat)) Mnorm.append(old_div(M[k], Msat)) # find Mr : average of two spline fits evaluted at B=0 (times Msat) Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.)) hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr) # find Bc (x intercept), interpolate between two bounding points Bz = B[Mzero - 1:Mzero + 1] Mz = Moff[Mzero - 1:Mzero + 1] Baz = B[Mazero - 1:Mazero + 1] Maz = Moff[Mazero - 1:Mazero + 1] try: poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points Bc = old_div(-poly[1], poly[0]) # x intercept # best fit line through two bounding points poly = polyfit(Baz, Maz, 1) Bac = old_div(-poly[1], poly[0]) # x intercept hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac))) except: hpars['hysteresis_bc'] = '0' return hpars, deltaM, Bdm, B, Mnorm, MadjN
function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name
Below is the the instruction that describes the task: ### Input: function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name ### Response: def iplot_hys(fignum, B, M, s): """ function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name """ if fignum != 0: plt.figure(num=fignum) plt.clf() hpars = {} # close up loop Npts = len(M) B70 = 0.7 * B[0] # 70 percent of maximum field for b in B: if b < B70: break Nint = B.index(b) - 1 if Nint > 30: Nint = 30 if Nint < 10: Nint = 10 Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], [] Mazero = "" m_init = 0.5 * (M[0] + M[1]) m_fin = 0.5 * (M[-1] + M[-2]) diff = m_fin - m_init Bmin = 0. for k in range(Npts): frac = old_div(float(k), float(Npts - 1)) Mfix.append((M[k] - diff * frac)) if Bzero == "" and B[k] < 0: Bzero = k if B[k] < Bmin: Bmin = B[k] kmin = k # adjust slope with first 30 data points (throwing out first 3) Bslop = B[2:Nint + 2] Mslop = Mfix[2:Nint + 2] polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points # adjust slope with first 30 points of ascending branch Bslop = B[kmin:kmin + (Nint + 1)] Mslop = Mfix[kmin:kmin + (Nint + 1)] polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes # convert B to A/m, high field slope in m^3 hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7) meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence Moff = [] for k in range(Npts): # take out linear slope and offset (makes symmetric about origin) Moff.append((Mfix[k] - xhf * B[k] - meanint)) if Mzero == "" and Moff[k] < 0: Mzero = k if Mzero != "" and Mazero == "" and Moff[k] > 0: Mazero = k hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2 # # split into upper and lower loops for splining Mupper, Bupper, Mlower, Blower = [], [], [], [] deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm for k in range(kmin - 2, 0, -2): Mupper.append(old_div(Moff[k], Msat)) Bupper.append(B[k]) for k in range(kmin + 2, len(B)-1): Mlower.append(Moff[k] / Msat) Blower.append(B[k]) Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down Ilower = spline.Spline(Blower, Mlower) # get splines for lower for b in np.arange(B[0]): # get range of field values Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B Mneg = ((Iupper(-b) - Ilower(-b))) Bdm.append(b) deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M print('whew') for k in range(Npts): MadjN.append(old_div(Moff[k], Msat)) Mnorm.append(old_div(M[k], Msat)) # find Mr : average of two spline fits evaluted at B=0 (times Msat) Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.)) hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr) # find Bc (x intercept), interpolate between two bounding points Bz = B[Mzero - 1:Mzero + 1] Mz = Moff[Mzero - 1:Mzero + 1] Baz = B[Mazero - 1:Mazero + 1] Maz = Moff[Mazero - 1:Mazero + 1] try: poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points Bc = old_div(-poly[1], poly[0]) # x intercept # best fit line through two bounding points poly = polyfit(Baz, Maz, 1) Bac = old_div(-poly[1], poly[0]) # x intercept hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac))) except: hpars['hysteresis_bc'] = '0' return hpars, deltaM, Bdm, B, Mnorm, MadjN
def setText(self, label, default='', description='Set Text', format='text'): """ Set text in a notebook pipeline (via interaction or with nbconvert) """ obj = self.load(label) if obj == None: obj=default self.save(obj, label) # initialize with default textw = Text(value=obj, description=description) hndl = interact(self.save, obj=textw, label=fixed(label), format=fixed(format))
Set text in a notebook pipeline (via interaction or with nbconvert)
Below is the the instruction that describes the task: ### Input: Set text in a notebook pipeline (via interaction or with nbconvert) ### Response: def setText(self, label, default='', description='Set Text', format='text'): """ Set text in a notebook pipeline (via interaction or with nbconvert) """ obj = self.load(label) if obj == None: obj=default self.save(obj, label) # initialize with default textw = Text(value=obj, description=description) hndl = interact(self.save, obj=textw, label=fixed(label), format=fixed(format))
def historical_identifier(self): """Scopus IDs of previous profiles now compromising this profile.""" hist = chained_get(self._json, ["coredata", 'historical-identifier'], []) return [d['$'].split(":")[-1] for d in hist] or None
Scopus IDs of previous profiles now compromising this profile.
Below is the the instruction that describes the task: ### Input: Scopus IDs of previous profiles now compromising this profile. ### Response: def historical_identifier(self): """Scopus IDs of previous profiles now compromising this profile.""" hist = chained_get(self._json, ["coredata", 'historical-identifier'], []) return [d['$'].split(":")[-1] for d in hist] or None
def port_knock_tcp(self, host="localhost", port=22, timeout=15): """Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed """ # Resolve host name if possible try: connect_host = socket.gethostbyname(host) host_human = "{} ({})".format(connect_host, host) except socket.error as e: self.log.warn('Unable to resolve address: ' '{} ({}) Trying anyway!'.format(host, e)) connect_host = host host_human = connect_host # Attempt socket connection try: knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) knock.settimeout(timeout) knock.connect((connect_host, port)) knock.close() self.log.debug('Socket connect OK for host ' '{} on port {}.'.format(host_human, port)) return True except socket.error as e: self.log.debug('Socket connect FAIL for' ' {} port {} ({})'.format(host_human, port, e)) return False
Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed
Below is the the instruction that describes the task: ### Input: Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed ### Response: def port_knock_tcp(self, host="localhost", port=22, timeout=15): """Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed """ # Resolve host name if possible try: connect_host = socket.gethostbyname(host) host_human = "{} ({})".format(connect_host, host) except socket.error as e: self.log.warn('Unable to resolve address: ' '{} ({}) Trying anyway!'.format(host, e)) connect_host = host host_human = connect_host # Attempt socket connection try: knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) knock.settimeout(timeout) knock.connect((connect_host, port)) knock.close() self.log.debug('Socket connect OK for host ' '{} on port {}.'.format(host_human, port)) return True except socket.error as e: self.log.debug('Socket connect FAIL for' ' {} port {} ({})'.format(host_human, port, e)) return False
def make_raw_content_url(repo_slug, git_ref, file_path): """Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree. """ if isinstance(repo_slug, RepoSlug): slug_str = repo_slug.full else: slug_str = repo_slug if file_path.startswith('/'): file_path = file_path.lstrip('/') template = 'https://raw.githubusercontent.com/{slug}/{git_ref}/{path}' return template.format( slug=slug_str, git_ref=git_ref, path=file_path)
Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree.
Below is the the instruction that describes the task: ### Input: Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree. ### Response: def make_raw_content_url(repo_slug, git_ref, file_path): """Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree. """ if isinstance(repo_slug, RepoSlug): slug_str = repo_slug.full else: slug_str = repo_slug if file_path.startswith('/'): file_path = file_path.lstrip('/') template = 'https://raw.githubusercontent.com/{slug}/{git_ref}/{path}' return template.format( slug=slug_str, git_ref=git_ref, path=file_path)
async def create_server( self, host: Optional[str] = None, port: Optional[int] = None, debug: bool = False, ssl: Union[dict, SSLContext, None] = None, sock: Optional[socket] = None, protocol: Type[Protocol] = None, backlog: int = 100, stop_event: Any = None, access_log: Optional[bool] = None, return_asyncio_server=False, asyncio_server_kwargs=None, ) -> None: """ Asynchronous version of :func:`run`. This method will take care of the operations necessary to invoke the *before_start* events via :func:`trigger_events` method invocation before starting the *sanic* app in Async mode. .. note:: This does not support multiprocessing and is not the preferred way to run a :class:`Sanic` application. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param access_log: Enables writing access logs (slows server) :type access_log: bool :param return_asyncio_server: flag that defines whether there's a need to return asyncio.Server or start it serving right away :type return_asyncio_server: bool :param asyncio_server_kwargs: key-value arguments for asyncio/uvloop create_server method :type asyncio_server_kwargs: dict :return: Nothing """ if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = ( WebSocketProtocol if self.websocket_enabled else HttpProtocol ) if stop_event is not None: if debug: warnings.simplefilter("default") warnings.warn( "stop_event will be removed from future versions.", DeprecationWarning, ) # if access_log is passed explicitly change config.ACCESS_LOG if access_log is not None: self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, loop=get_event_loop(), protocol=protocol, backlog=backlog, run_async=return_asyncio_server, ) # Trigger before_start events await self.trigger_events( server_settings.get("before_start", []), server_settings.get("loop"), ) return await serve( asyncio_server_kwargs=asyncio_server_kwargs, **server_settings )
Asynchronous version of :func:`run`. This method will take care of the operations necessary to invoke the *before_start* events via :func:`trigger_events` method invocation before starting the *sanic* app in Async mode. .. note:: This does not support multiprocessing and is not the preferred way to run a :class:`Sanic` application. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param access_log: Enables writing access logs (slows server) :type access_log: bool :param return_asyncio_server: flag that defines whether there's a need to return asyncio.Server or start it serving right away :type return_asyncio_server: bool :param asyncio_server_kwargs: key-value arguments for asyncio/uvloop create_server method :type asyncio_server_kwargs: dict :return: Nothing
Below is the the instruction that describes the task: ### Input: Asynchronous version of :func:`run`. This method will take care of the operations necessary to invoke the *before_start* events via :func:`trigger_events` method invocation before starting the *sanic* app in Async mode. .. note:: This does not support multiprocessing and is not the preferred way to run a :class:`Sanic` application. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param access_log: Enables writing access logs (slows server) :type access_log: bool :param return_asyncio_server: flag that defines whether there's a need to return asyncio.Server or start it serving right away :type return_asyncio_server: bool :param asyncio_server_kwargs: key-value arguments for asyncio/uvloop create_server method :type asyncio_server_kwargs: dict :return: Nothing ### Response: async def create_server( self, host: Optional[str] = None, port: Optional[int] = None, debug: bool = False, ssl: Union[dict, SSLContext, None] = None, sock: Optional[socket] = None, protocol: Type[Protocol] = None, backlog: int = 100, stop_event: Any = None, access_log: Optional[bool] = None, return_asyncio_server=False, asyncio_server_kwargs=None, ) -> None: """ Asynchronous version of :func:`run`. This method will take care of the operations necessary to invoke the *before_start* events via :func:`trigger_events` method invocation before starting the *sanic* app in Async mode. .. note:: This does not support multiprocessing and is not the preferred way to run a :class:`Sanic` application. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param access_log: Enables writing access logs (slows server) :type access_log: bool :param return_asyncio_server: flag that defines whether there's a need to return asyncio.Server or start it serving right away :type return_asyncio_server: bool :param asyncio_server_kwargs: key-value arguments for asyncio/uvloop create_server method :type asyncio_server_kwargs: dict :return: Nothing """ if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = ( WebSocketProtocol if self.websocket_enabled else HttpProtocol ) if stop_event is not None: if debug: warnings.simplefilter("default") warnings.warn( "stop_event will be removed from future versions.", DeprecationWarning, ) # if access_log is passed explicitly change config.ACCESS_LOG if access_log is not None: self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, loop=get_event_loop(), protocol=protocol, backlog=backlog, run_async=return_asyncio_server, ) # Trigger before_start events await self.trigger_events( server_settings.get("before_start", []), server_settings.get("loop"), ) return await serve( asyncio_server_kwargs=asyncio_server_kwargs, **server_settings )
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
Below is the the instruction that describes the task: ### Input: Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. ### Response: def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
def _get_encodings(): """ Just a simple function to return the system encoding (defaults to utf-8) """ stdout_encoding = sys.stdout.encoding if sys.stdout.encoding else 'utf-8' stderr_encoding = sys.stderr.encoding if sys.stderr.encoding else 'utf-8' return stdout_encoding, stderr_encoding
Just a simple function to return the system encoding (defaults to utf-8)
Below is the the instruction that describes the task: ### Input: Just a simple function to return the system encoding (defaults to utf-8) ### Response: def _get_encodings(): """ Just a simple function to return the system encoding (defaults to utf-8) """ stdout_encoding = sys.stdout.encoding if sys.stdout.encoding else 'utf-8' stderr_encoding = sys.stderr.encoding if sys.stderr.encoding else 'utf-8' return stdout_encoding, stderr_encoding
def init_from_files( vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1e6, reserved_tokens=None): """Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object """ if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS if tf.gfile.Exists(vocab_file): tf.logging.info("Vocab file already exists (%s)" % vocab_file) else: tf.logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) tf.logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file)
Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object
Below is the the instruction that describes the task: ### Input: Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object ### Response: def init_from_files( vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1e6, reserved_tokens=None): """Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object """ if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS if tf.gfile.Exists(vocab_file): tf.logging.info("Vocab file already exists (%s)" % vocab_file) else: tf.logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) tf.logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file)
def _merge_lib_dict(d1, d2): """ Merges lib_dict `d2` into lib_dict `d1` """ for required, requirings in d2.items(): if required in d1: d1[required].update(requirings) else: d1[required] = requirings return None
Merges lib_dict `d2` into lib_dict `d1`
Below is the the instruction that describes the task: ### Input: Merges lib_dict `d2` into lib_dict `d1` ### Response: def _merge_lib_dict(d1, d2): """ Merges lib_dict `d2` into lib_dict `d1` """ for required, requirings in d2.items(): if required in d1: d1[required].update(requirings) else: d1[required] = requirings return None
def _get_param_iterator(self): """Return ParameterSampler instance for the given distributions""" return model_selection.ParameterSampler( self.param_distributions, self.n_iter, random_state=self.random_state )
Return ParameterSampler instance for the given distributions
Below is the the instruction that describes the task: ### Input: Return ParameterSampler instance for the given distributions ### Response: def _get_param_iterator(self): """Return ParameterSampler instance for the given distributions""" return model_selection.ParameterSampler( self.param_distributions, self.n_iter, random_state=self.random_state )
def reset_terminal(): ''' Reset the terminal/console screen. (Also aliased to cls.) Greater than a fullscreen terminal clear, also clears the scrollback buffer. May expose bugs in dumb terminals. ''' if os.name == 'nt': from .windows import cls cls() else: text = sc.reset _write(text) return text
Reset the terminal/console screen. (Also aliased to cls.) Greater than a fullscreen terminal clear, also clears the scrollback buffer. May expose bugs in dumb terminals.
Below is the the instruction that describes the task: ### Input: Reset the terminal/console screen. (Also aliased to cls.) Greater than a fullscreen terminal clear, also clears the scrollback buffer. May expose bugs in dumb terminals. ### Response: def reset_terminal(): ''' Reset the terminal/console screen. (Also aliased to cls.) Greater than a fullscreen terminal clear, also clears the scrollback buffer. May expose bugs in dumb terminals. ''' if os.name == 'nt': from .windows import cls cls() else: text = sc.reset _write(text) return text
def create_shortcuts(self): """Create local shortcuts""" # --- Configurable shortcuts inspect = config_shortcut(self.inspect_current_object, context='Editor', name='Inspect current object', parent=self) set_breakpoint = config_shortcut(self.set_or_clear_breakpoint, context='Editor', name='Breakpoint', parent=self) set_cond_breakpoint = config_shortcut( self.set_or_edit_conditional_breakpoint, context='Editor', name='Conditional breakpoint', parent=self) gotoline = config_shortcut(self.go_to_line, context='Editor', name='Go to line', parent=self) tab = config_shortcut(lambda: self.tab_navigation_mru(forward=False), context='Editor', name='Go to previous file', parent=self) tabshift = config_shortcut(self.tab_navigation_mru, context='Editor', name='Go to next file', parent=self) prevtab = config_shortcut(lambda: self.tabs.tab_navigate(-1), context='Editor', name='Cycle to previous file', parent=self) nexttab = config_shortcut(lambda: self.tabs.tab_navigate(1), context='Editor', name='Cycle to next file', parent=self) run_selection = config_shortcut(self.run_selection, context='Editor', name='Run selection', parent=self) new_file = config_shortcut(lambda : self.sig_new_file[()].emit(), context='Editor', name='New file', parent=self) open_file = config_shortcut(lambda : self.plugin_load[()].emit(), context='Editor', name='Open file', parent=self) save_file = config_shortcut(self.save, context='Editor', name='Save file', parent=self) save_all = config_shortcut(self.save_all, context='Editor', name='Save all', parent=self) save_as = config_shortcut(lambda : self.sig_save_as.emit(), context='Editor', name='Save As', parent=self) close_all = config_shortcut(self.close_all_files, context='Editor', name='Close all', parent=self) prev_edit_pos = config_shortcut(lambda : self.sig_prev_edit_pos.emit(), context="Editor", name="Last edit location", parent=self) prev_cursor = config_shortcut(lambda : self.sig_prev_cursor.emit(), context="Editor", name="Previous cursor position", parent=self) next_cursor = config_shortcut(lambda : self.sig_next_cursor.emit(), context="Editor", name="Next cursor position", parent=self) zoom_in_1 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 1", parent=self) zoom_in_2 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 2", parent=self) zoom_out = config_shortcut(lambda : self.zoom_out.emit(), context="Editor", name="zoom out", parent=self) zoom_reset = config_shortcut(lambda: self.zoom_reset.emit(), context="Editor", name="zoom reset", parent=self) close_file_1 = config_shortcut(self.close_file, context="Editor", name="close file 1", parent=self) close_file_2 = config_shortcut(self.close_file, context="Editor", name="close file 2", parent=self) run_cell = config_shortcut(self.run_cell, context="Editor", name="run cell", parent=self) run_cell_and_advance = config_shortcut(self.run_cell_and_advance, context="Editor", name="run cell and advance", parent=self) go_to_next_cell = config_shortcut(self.advance_cell, context="Editor", name="go to next cell", parent=self) go_to_previous_cell = config_shortcut(lambda: self.advance_cell(reverse=True), context="Editor", name="go to previous cell", parent=self) re_run_last_cell = config_shortcut(self.re_run_last_cell, context="Editor", name="re-run last cell", parent=self) prev_warning = config_shortcut(lambda: self.sig_prev_warning.emit(), context="Editor", name="Previous warning", parent=self) next_warning = config_shortcut(lambda: self.sig_next_warning.emit(), context="Editor", name="Next warning", parent=self) split_vertically = config_shortcut(lambda: self.sig_split_vertically.emit(), context="Editor", name="split vertically", parent=self) split_horizontally = config_shortcut(lambda: self.sig_split_horizontally.emit(), context="Editor", name="split horizontally", parent=self) close_split = config_shortcut(self.close_split, context="Editor", name="close split panel", parent=self) # Return configurable ones return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab, tabshift, run_selection, new_file, open_file, save_file, save_all, save_as, close_all, prev_edit_pos, prev_cursor, next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset, close_file_1, close_file_2, run_cell, run_cell_and_advance, go_to_next_cell, go_to_previous_cell, re_run_last_cell, prev_warning, next_warning, split_vertically, split_horizontally, close_split, prevtab, nexttab]
Create local shortcuts
Below is the the instruction that describes the task: ### Input: Create local shortcuts ### Response: def create_shortcuts(self): """Create local shortcuts""" # --- Configurable shortcuts inspect = config_shortcut(self.inspect_current_object, context='Editor', name='Inspect current object', parent=self) set_breakpoint = config_shortcut(self.set_or_clear_breakpoint, context='Editor', name='Breakpoint', parent=self) set_cond_breakpoint = config_shortcut( self.set_or_edit_conditional_breakpoint, context='Editor', name='Conditional breakpoint', parent=self) gotoline = config_shortcut(self.go_to_line, context='Editor', name='Go to line', parent=self) tab = config_shortcut(lambda: self.tab_navigation_mru(forward=False), context='Editor', name='Go to previous file', parent=self) tabshift = config_shortcut(self.tab_navigation_mru, context='Editor', name='Go to next file', parent=self) prevtab = config_shortcut(lambda: self.tabs.tab_navigate(-1), context='Editor', name='Cycle to previous file', parent=self) nexttab = config_shortcut(lambda: self.tabs.tab_navigate(1), context='Editor', name='Cycle to next file', parent=self) run_selection = config_shortcut(self.run_selection, context='Editor', name='Run selection', parent=self) new_file = config_shortcut(lambda : self.sig_new_file[()].emit(), context='Editor', name='New file', parent=self) open_file = config_shortcut(lambda : self.plugin_load[()].emit(), context='Editor', name='Open file', parent=self) save_file = config_shortcut(self.save, context='Editor', name='Save file', parent=self) save_all = config_shortcut(self.save_all, context='Editor', name='Save all', parent=self) save_as = config_shortcut(lambda : self.sig_save_as.emit(), context='Editor', name='Save As', parent=self) close_all = config_shortcut(self.close_all_files, context='Editor', name='Close all', parent=self) prev_edit_pos = config_shortcut(lambda : self.sig_prev_edit_pos.emit(), context="Editor", name="Last edit location", parent=self) prev_cursor = config_shortcut(lambda : self.sig_prev_cursor.emit(), context="Editor", name="Previous cursor position", parent=self) next_cursor = config_shortcut(lambda : self.sig_next_cursor.emit(), context="Editor", name="Next cursor position", parent=self) zoom_in_1 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 1", parent=self) zoom_in_2 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 2", parent=self) zoom_out = config_shortcut(lambda : self.zoom_out.emit(), context="Editor", name="zoom out", parent=self) zoom_reset = config_shortcut(lambda: self.zoom_reset.emit(), context="Editor", name="zoom reset", parent=self) close_file_1 = config_shortcut(self.close_file, context="Editor", name="close file 1", parent=self) close_file_2 = config_shortcut(self.close_file, context="Editor", name="close file 2", parent=self) run_cell = config_shortcut(self.run_cell, context="Editor", name="run cell", parent=self) run_cell_and_advance = config_shortcut(self.run_cell_and_advance, context="Editor", name="run cell and advance", parent=self) go_to_next_cell = config_shortcut(self.advance_cell, context="Editor", name="go to next cell", parent=self) go_to_previous_cell = config_shortcut(lambda: self.advance_cell(reverse=True), context="Editor", name="go to previous cell", parent=self) re_run_last_cell = config_shortcut(self.re_run_last_cell, context="Editor", name="re-run last cell", parent=self) prev_warning = config_shortcut(lambda: self.sig_prev_warning.emit(), context="Editor", name="Previous warning", parent=self) next_warning = config_shortcut(lambda: self.sig_next_warning.emit(), context="Editor", name="Next warning", parent=self) split_vertically = config_shortcut(lambda: self.sig_split_vertically.emit(), context="Editor", name="split vertically", parent=self) split_horizontally = config_shortcut(lambda: self.sig_split_horizontally.emit(), context="Editor", name="split horizontally", parent=self) close_split = config_shortcut(self.close_split, context="Editor", name="close split panel", parent=self) # Return configurable ones return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab, tabshift, run_selection, new_file, open_file, save_file, save_all, save_as, close_all, prev_edit_pos, prev_cursor, next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset, close_file_1, close_file_2, run_cell, run_cell_and_advance, go_to_next_cell, go_to_previous_cell, re_run_last_cell, prev_warning, next_warning, split_vertically, split_horizontally, close_split, prevtab, nexttab]