code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def is_running(self): """ Checks if the QEMU process is running :returns: True or False """ if self._process: if self._process.returncode is None: return True else: self._process = None return False
Checks if the QEMU process is running :returns: True or False
def GetEventTypeString(self, event_type): """Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type. """ if 0 <= event_type < len(self._EVENT_TYPES): return self._EVENT_TYPES[event_type] return 'Unknown {0:d}'.format(event_type)
Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type.
def to_funset(self, lname="clamping", cname="clamped"): """ Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, clamping in enumerate(self): fs.add(gringo.Fun(lname, [i])) fs = fs.union(clamping.to_funset(i, cname)) return fs
Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
def singular(plural): """ Take a plural English word and turn it into singular Obviously, this doesn't work in general. It know just enough words to generate XML tag names for list items. For example, if we have an element called 'tracks' in the response, it will be serialized as a list without named items in JSON, but we need names for items in XML, so those will be called 'track'. """ if plural.endswith('ies'): return plural[:-3] + 'y' if plural.endswith('s'): return plural[:-1] raise ValueError('unknown plural form %r' % (plural,))
Take a plural English word and turn it into singular Obviously, this doesn't work in general. It know just enough words to generate XML tag names for list items. For example, if we have an element called 'tracks' in the response, it will be serialized as a list without named items in JSON, but we need names for items in XML, so those will be called 'track'.
def collect(self): """ Collect libvirt data """ if libvirt is None: self.log.error('Unable to import either libvirt') return {} # Open a restricted (non-root) connection to the hypervisor conn = libvirt.openReadOnly(None) # Get hardware info conninfo = conn.getInfo() # Initialize variables memallocated = 0 coresallocated = 0 totalcores = 0 results = {} domIds = conn.listDomainsID() if 0 in domIds: # Total cores domU = conn.lookupByID(0) totalcores = domU.info()[3] # Free Space s = os.statvfs('/') freeSpace = (s.f_bavail * s.f_frsize) / 1024 # Calculate allocated memory and cores for i in domIds: # Ignore 0 if i == 0: continue domU = conn.lookupByID(i) dominfo = domU.info() memallocated += dominfo[2] if i > 0: coresallocated += dominfo[3] results = { 'InstalledMem': conninfo[1], 'MemAllocated': memallocated / 1024, 'MemFree': conninfo[1] - (memallocated / 1024), 'AllocatedCores': coresallocated, 'DiskFree': freeSpace, 'TotalCores': totalcores, 'FreeCores': (totalcores - coresallocated) } for k in results.keys(): self.publish(k, results[k], 0)
Collect libvirt data
def get_time(self): """ :return: the machine's time """ command = const.CMD_GET_TIME response_size = 1032 cmd_response = self.__send_command(command, b'', response_size) if cmd_response.get('status'): return self.__decode_time(self.__data[:4]) else: raise ZKErrorResponse("can't get time")
:return: the machine's time
def delete_shifts(self, shifts): """ Delete existing shifts. http://dev.wheniwork.com/#delete-shift """ url = "/2/shifts/?%s" % urlencode( {'ids': ",".join(str(s) for s in shifts)}) data = self._delete_resource(url) return data
Delete existing shifts. http://dev.wheniwork.com/#delete-shift
def add_how(voevent, descriptions=None, references=None): """Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof). """ if not voevent.xpath('How'): etree.SubElement(voevent, 'How') if descriptions is not None: for desc in _listify(descriptions): # d = etree.SubElement(voevent.How, 'Description') # voevent.How.Description[voevent.How.index(d)] = desc ##Simpler: etree.SubElement(voevent.How, 'Description') voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof).
def add(self, key, column_parent, column, consistency_level): """ Increment or decrement a counter. Parameters: - key - column_parent - column - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_add(key, column_parent, column, consistency_level) return d
Increment or decrement a counter. Parameters: - key - column_parent - column - consistency_level
def export(request, page_id, export_unpublished=False): """ API endpoint of this source site to export a part of the page tree rooted at page_id Requests are made by a destination site's import_from_api view. """ try: if export_unpublished: root_page = Page.objects.get(id=page_id) else: root_page = Page.objects.get(id=page_id, live=True) except Page.DoesNotExist: return JsonResponse({'error': _('page not found')}) payload = export_pages(root_page, export_unpublished=export_unpublished) return JsonResponse(payload)
API endpoint of this source site to export a part of the page tree rooted at page_id Requests are made by a destination site's import_from_api view.
def summarycanvas(args): """ %prog summarycanvas output.vcf.gz Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output. """ p = OptionParser(summarycanvas.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) for vcffile in args: counter = get_gain_loss_summary(vcffile) pf = op.basename(vcffile).split(".")[0] print(pf + " " + " ".join("{}:{}".format(k, v) for k, v in sorted(counter.items())))
%prog summarycanvas output.vcf.gz Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
def byte_adaptor(fbuffer): """ provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer """ if six.PY3: strings = fbuffer.read().decode('latin-1') fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
def paintNormal( self, painter ): """ Paints this item as the normal look. :param painter | <QPainter> """ # generate the rect rect = self.rect() x = 0 y = self.padding() w = rect.width() h = rect.height() - (2 * self.padding()) - 1 radius = self.borderRadius() # grab the color options color = self.color() alt_color = self.alternateColor() if ( self.isSelected() ): color = self.highlightColor() alt_color = self.alternateHighlightColor() # create the background brush gradient = QLinearGradient() gradient.setStart(0, 0) gradient.setFinalStop(0, h) gradient.setColorAt(0, color) gradient.setColorAt(0.8, alt_color) gradient.setColorAt(1, color) painter.setPen(self.borderColor()) if ( radius ): painter.setRenderHint(painter.Antialiasing) pen = painter.pen() pen.setWidthF(0.5) painter.setPen(pen) painter.setBrush(QBrush(gradient)) painter.drawRoundedRect(x, y, w, h, radius, radius) # create the progress brush if ( self.showProgress() ): gradient = QLinearGradient() gradient.setStart(0, 0) gradient.setFinalStop(0, h) gradient.setColorAt(0, self.progressColor()) gradient.setColorAt(0.8, self.alternateProgressColor()) gradient.setColorAt(1, self.progressColor()) prog_w = (w - 4) * (self._percentComplete/100.0) radius -= 2 painter.setPen(Qt.NoPen) painter.setBrush(QBrush(gradient)) painter.drawRoundedRect(x + 2, y + 2, prog_w, h - 4, radius, radius) # draw the text on this item if ( self.text() ): painter.setPen(self.textColor()) painter.drawText(x, y, w, h, Qt.AlignCenter, self.text())
Paints this item as the normal look. :param painter | <QPainter>
def toString(self): """ Returns a string representation of Layer instance. """ string = "Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % ( self.name, self.kind, self.size, self.active, self.frozen) if (self.type == 'Output'): string += toStringArray('Target ', self.target, self.displayWidth) string += toStringArray('Activation', self.activation, self.displayWidth) if (self.type != 'Input' and self._verbosity > 1): string += toStringArray('Error ', self.error, self.displayWidth) if (self._verbosity > 4 and self.type != 'Input'): string += toStringArray('weight ', self.weight, self.displayWidth) string += toStringArray('dweight ', self.dweight, self.displayWidth) string += toStringArray('delta ', self.delta, self.displayWidth) string += toStringArray('netinput ', self.netinput, self.displayWidth) string += toStringArray('wed ', self.wed, self.displayWidth) return string
Returns a string representation of Layer instance.
def _as_versioned_jar(self, internal_target): """Fetches the jar representation of the given target, and applies the latest pushdb version.""" jar, _ = internal_target.get_artifact_info() pushdb_entry = self._get_db(internal_target).get_entry(internal_target) return jar.copy(rev=pushdb_entry.version().version())
Fetches the jar representation of the given target, and applies the latest pushdb version.
def _draw(self, prev_angle = None, prev_length = None): """ Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected. """ if (prev_angle is None) or (prev_length is None): (length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')), self.firstLenAng_shape) angle = angle-((self.firstLenAng_shape[1]-1)/2) angle += 0.5 length += 0.5 length *= self.fm.pixels_per_degree else: ind = int(floor(prev_length/self.fm.pixels_per_degree)) while ind >= len(self.probability_cumsum): ind -= 1 while not(self.probability_cumsum[ind]).any(): ind -= 1 J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))), self.full_H1[ind].shape) angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle) angle += 0.5 length = J+0.5 length *= self.fm.pixels_per_degree return angle, length
Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected.
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset): """ This function is used to name a insertion mutation based on the HGVS recommendation. """ start_codon_no = codon_no - 1 if len(sbjct_nucs) == 3: start_codon_no = codon_no start_codon = get_codon(sbjct_seq, start_codon_no, start_offset) end_codon = get_codon(sbjct_seq, codon_no, start_offset) pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt) return pos_name
This function is used to name a insertion mutation based on the HGVS recommendation.
def convertFsDirWavToWav(dirName, Fs, nC): ''' This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files ''' types = (dirName+os.sep+'*.wav',) # the tuple of file types filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) newDir = dirName + os.sep + "Fs" + str(Fs) + "_" + "NC"+str(nC) if os.path.exists(newDir) and newDir!=".": shutil.rmtree(newDir) os.makedirs(newDir) for f in filesToProcess: _, wavFileName = ntpath.split(f) command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + newDir + os.sep + wavFileName + "\""; print(command) os.system(command)
This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files
def month_name_to_number(month, to_int=False): """ Convert a month name (MMM) to its number (01-12). Args: month (str): 3-letters string describing month. to_int (bool): cast number to int or not. Returns: str/int: the month's number (between 01 and 12). """ number = { 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12', }.get(month) return int(number) if to_int else number
Convert a month name (MMM) to its number (01-12). Args: month (str): 3-letters string describing month. to_int (bool): cast number to int or not. Returns: str/int: the month's number (between 01 and 12).
def figsize(x=8, y=7., aspect=1.): """ manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar """ # update rcparams with adjusted figsize params mpl.rcParams.update({'figure.figsize': (x*aspect, y)})
manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar
def read_ndk_version(ndk_dir): """Read the NDK version from the NDK dir, if possible""" try: with open(join(ndk_dir, 'source.properties')) as fileh: ndk_data = fileh.read() except IOError: info('Could not determine NDK version, no source.properties ' 'in the NDK dir') return for line in ndk_data.split('\n'): if line.startswith('Pkg.Revision'): break else: info('Could not parse $NDK_DIR/source.properties, not checking ' 'NDK version') return # Line should have the form "Pkg.Revision = ..." ndk_version = LooseVersion(line.split('=')[-1].strip()) return ndk_version
Read the NDK version from the NDK dir, if possible
def get_as_nullable_datetime(self, key): """ Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported. """ value = self.get(key) return DateTimeConverter.to_nullable_datetime(value)
Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported.
def _do_highlight(content, query, tag='em'): """ Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted """ for term in query: term = term.decode('utf-8') for match in re.findall('[^A-Z]+', term): # Ignore field identifiers match_re = re.compile(match, re.I) content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content) return content
Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted
def get_json_results(self, response): ''' Parses the request result and returns the JSON object. Handles all errors. ''' try: # return the proper JSON object, or error code if request didn't go through. self.most_recent_json = response.json() json_results = response.json() if response.status_code in [401, 403]: #401 is invalid key, 403 is out of monthly quota. raise PyMsCognitiveWebSearchException("CODE {code}: {message}".format(code=response.status_code,message=json_results["message"]) ) elif response.status_code in [429]: #429 means try again in x seconds. message = json_results['message'] try: # extract time out seconds from response timeout = int(re.search('in (.+?) seconds', message).group(1)) + 1 print ("CODE 429, sleeping for {timeout} seconds").format(timeout=str(timeout)) time.sleep(timeout) except (AttributeError, ValueError) as e: if not self.silent_fail: raise PyMsCognitiveWebSearchException("CODE 429. Failed to auto-sleep: {message}".format(code=response.status_code,message=json_results["message"]) ) else: print ("CODE 429. Failed to auto-sleep: {message}. Trying again in 5 seconds.".format(code=response.status_code,message=json_results["message"])) time.sleep(5) except ValueError as vE: if not self.silent_fail: raise PyMsCognitiveWebSearchException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) return json_results
Parses the request result and returns the JSON object. Handles all errors.
def _index_idiom(el_name, index, alt=None): """ Generate string where `el_name` is indexed by `index` if there are enough items or `alt` is returned. Args: el_name (str): Name of the `container` which is indexed. index (int): Index of the item you want to obtain from container. alt (whatever, default None): Alternative value. Returns: str: Python code. Live example:: >>> import generator as g >>> print g._index_idiom("xex", 0) # pick element from list xex = xex[0] if xex else None >>> print g._index_idiom("xex", 1, "something") # pick element from list xex = xex[1] if len(xex) - 1 >= 1 else 'something' """ el_index = "%s[%d]" % (el_name, index) if index == 0: cond = "%s" % el_name else: cond = "len(%s) - 1 >= %d" % (el_name, index) output = IND + "# pick element from list\n" return output + IND + "%s = %s if %s else %s\n\n" % ( el_name, el_index, cond, repr(alt) )
Generate string where `el_name` is indexed by `index` if there are enough items or `alt` is returned. Args: el_name (str): Name of the `container` which is indexed. index (int): Index of the item you want to obtain from container. alt (whatever, default None): Alternative value. Returns: str: Python code. Live example:: >>> import generator as g >>> print g._index_idiom("xex", 0) # pick element from list xex = xex[0] if xex else None >>> print g._index_idiom("xex", 1, "something") # pick element from list xex = xex[1] if len(xex) - 1 >= 1 else 'something'
def register_date_conversion_handler(date_specifier_patterns): """Decorator for registering handlers that convert text dates to dates. Args: date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered """ def _decorator(func): global DATE_SPECIFIERS_CONVERSION_HANDLERS DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func return func return _decorator
Decorator for registering handlers that convert text dates to dates. Args: date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
def http_method(self, method): """ Execute the given HTTP method and returns if it's success or not and the response as a string if not success and as python object after unjson if it's success. """ self.build_url() try: response = self.get_http_method(method) is_success = response.ok try: response_message = response.json() except ValueError: response_message = response.text except requests.exceptions.RequestException as exc: is_success = False response_message = exc.args return is_success, response_message
Execute the given HTTP method and returns if it's success or not and the response as a string if not success and as python object after unjson if it's success.
def _get_cache_key(self, args, kwargs): """ Returns key to be used in cache """ hash_input = json.dumps({'name': self.name, 'args': args, 'kwargs': kwargs}, sort_keys=True) # md5 is used for internal caching, not need to care about security return hashlib.md5(hash_input).hexdigest()
Returns key to be used in cache
def get_evidence(self, relation): """Return the Evidence object for the INDRA Statment.""" provenance = relation.get('provenance') # First try looking up the full sentence through provenance text = None context = None if provenance: sentence_tag = provenance[0].get('sentence') if sentence_tag and '@id' in sentence_tag: sentence_id = sentence_tag['@id'] sentence = self.doc.sentences.get(sentence_id) if sentence is not None: text = _sanitize(sentence['text']) # Get temporal constraints if available timexes = sentence.get('timexes', []) if timexes: # We currently handle just one timex per statement timex = timexes[0] tc = time_context_from_timex(timex) context = WorldContext(time=tc) # Get geolocation if available geolocs = sentence.get('geolocs', []) if geolocs: geoloc = geolocs[0] rc = ref_context_from_geoloc(geoloc) if context: context.geo_location = rc else: context = WorldContext(geo_location=rc) # Here we try to get the title of the document and set it # in the provenance doc_id = provenance[0].get('document', {}).get('@id') if doc_id: title = self.doc.documents.get(doc_id, {}).get('title') if title: provenance[0]['document']['title'] = title annotations = {'found_by': relation.get('rule'), 'provenance': provenance} if self.doc.dct is not None: annotations['document_creation_time'] = self.doc.dct.to_json() epistemics = {} negations = self.get_negation(relation) hedgings = self.get_hedging(relation) if hedgings: epistemics['hedgings'] = hedgings if negations: # This is the INDRA standard to show negation epistemics['negated'] = True # But we can also save the texts associated with the negation # under annotations, just in case it's needed annotations['negated_texts'] = negations # If that fails, we can still get the text of the relation if text is None: text = _sanitize(event.get('text')) ev = Evidence(source_api='eidos', text=text, annotations=annotations, context=context, epistemics=epistemics) return ev
Return the Evidence object for the INDRA Statment.
def get_macs(vm_): ''' Return a list off MAC addresses from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_macs <vm name> ''' macs = [] nics = get_nics(vm_) if nics is None: return None for nic in nics: macs.append(nic) return macs
Return a list off MAC addresses from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_macs <vm name>
def launch_job(job_spec): """Launch job on ML Engine.""" project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discovery=False) request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) request.execute()
Launch job on ML Engine.
def tokenize_paragraphs(cls, text): """Convert an input string into a list of paragraphs.""" paragraphs = [] paragraphs_first_pass = text.split('\n') for p in paragraphs_first_pass: paragraphs_second_pass = re.split('\s{4,}', p) paragraphs += paragraphs_second_pass # Remove empty strings from list paragraphs = [p for p in paragraphs if p] return paragraphs
Convert an input string into a list of paragraphs.
def validate_url(url): """validate a url for zeromq""" if not isinstance(url, basestring): raise TypeError("url must be a string, not %r"%type(url)) url = url.lower() proto_addr = url.split('://') assert len(proto_addr) == 2, 'Invalid url: %r'%url proto, addr = proto_addr assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391 # author: Remi Sabourin pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$') if proto == 'tcp': lis = addr.split(':') assert len(lis) == 2, 'Invalid url: %r'%url addr,s_port = lis try: port = int(s_port) except ValueError: raise AssertionError("Invalid port %r in url: %r"%(port, url)) assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url else: # only validate tcp urls currently pass return True
validate a url for zeromq
def _etextno_to_uri_subdirectory(etextno): """Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453' """ str_etextno = str(etextno).zfill(2) all_but_last_digit = list(str_etextno[:-1]) subdir_part = "/".join(all_but_last_digit) subdir = "{}/{}".format(subdir_part, etextno) # etextno not zfilled return subdir
Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453'
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
Custom date timestamp constructor. ditto
def tag_wordnet(self, **kwargs): """Create wordnet attribute in ``words`` layer. See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method for applicable keyword arguments. """ global wordnet_tagger if wordnet_tagger is None: # cached wn tagger wordnet_tagger = WordnetTagger() self.__wordnet_tagger = wordnet_tagger if len(kwargs) > 0: return self.__wordnet_tagger.tag_text(self, **kwargs) return self.__wordnet_tagger.tag_text(self, **self.__kwargs)
Create wordnet attribute in ``words`` layer. See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method for applicable keyword arguments.
def get_lldp_neighbor_detail_input_request_type_get_request_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail input = ET.SubElement(get_lldp_neighbor_detail, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get(self, name): """Retrieves the cluster with the given name. :param str name: name of the cluster (identifier) :return: :py:class:`elasticluster.cluster.Cluster` """ path = self._get_cluster_storage_path(name) try: with open(path, 'r') as storage: cluster = self.load(storage) # Compatibility with previous version of Node for node in sum(cluster.nodes.values(), []): if not hasattr(node, 'ips'): log.debug("Monkey patching old version of `Node` class: %s", node.name) node.ips = [node.ip_public, node.ip_private] node.preferred_ip = None cluster.storage_file = path return cluster except IOError as ex: raise ClusterNotFound("Error accessing storage file %s: %s" % (path, ex))
Retrieves the cluster with the given name. :param str name: name of the cluster (identifier) :return: :py:class:`elasticluster.cluster.Cluster`
def send(remote_host=None): """ Send my facts to a remote host if remote_host is provided, data will be sent to that host. Otherwise it will be sent to master. """ my_facts = get() if not remote_host: remote_host = nago.extensions.settings.get('server') remote_node = nago.core.get_node(remote_host) if not remote_node: raise Exception("Remote host with token='%s' not found" % remote_host) response = remote_node.send_command('facts', 'post', host_token=remote_node.token, **my_facts) result = {} result['server_response'] = response result['message'] = "sent %s facts to remote node '%s'" % (len(my_facts), remote_node.get('host_name')) return result
Send my facts to a remote host if remote_host is provided, data will be sent to that host. Otherwise it will be sent to master.
def register_entrypoints(self): """Look through the `setup_tools` `entry_points` and load all of the formats. """ for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
Look through the `setup_tools` `entry_points` and load all of the formats.
def add_info(self, data): """add info to a build""" for key in data: # verboten if key in ('status','state','name','id','application','services','release'): raise ValueError("Sorry, cannot set build info with key of {}".format(key)) self.obj[key] = data[key] self.changes.append("Adding build info") return self
add info to a build
def remove_entity_tags(self): ''' Returns ------- A new TermDocumentMatrix consisting of only terms in the current TermDocumentMatrix that aren't spaCy entity tags. Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...). ''' terms_to_remove = [term for term in self._term_idx_store._i2val if any([word in SPACY_ENTITY_TAGS for word in term.split()])] return self.remove_terms(terms_to_remove)
Returns ------- A new TermDocumentMatrix consisting of only terms in the current TermDocumentMatrix that aren't spaCy entity tags. Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...).
def _binary_enable_zero_disable_one_conversion(cls, val, **kwargs): ''' converts a binary 0/1 to Disabled/Enabled ''' try: if val is not None: if ord(val) == 0: return 'Disabled' elif ord(val) == 1: return 'Enabled' else: return 'Invalid Value: {0!r}'.format(val) # pylint: disable=repr-flag-used-in-string else: return 'Not Defined' except TypeError: return 'Invalid Value'
converts a binary 0/1 to Disabled/Enabled
def transform_cur_commands_interactive(_, **kwargs): """ Transform any aliases in current commands in interactive into their respective commands. """ event_payload = kwargs.get('event_payload', {}) # text_split = current commands typed in the interactive shell without any unfinished word # text = current commands typed in the interactive shell cur_commands = event_payload.get('text', '').split(' ') _transform_cur_commands(cur_commands) event_payload.update({ 'text': ' '.join(cur_commands) })
Transform any aliases in current commands in interactive into their respective commands.
def get(*args, **kwargs): """Get users.""" from invenio.modules.oauth2server.models import Client q = Client.query return q.count(), q.all()
Get users.
def radii_of_curvature(self): """The radius of curvature at each point on the Polymer primitive. Notes ----- Each element of the returned list is the radius of curvature, at a point on the Polymer primitive. Element i is the radius of the circumcircle formed from indices [i-1, i, i+1] of the primitve. The first and final values are None. """ rocs = [] for i, _ in enumerate(self): if 0 < i < len(self) - 1: rocs.append(radius_of_circumcircle( self[i - 1]['CA'], self[i]['CA'], self[i + 1]['CA'])) else: rocs.append(None) return rocs
The radius of curvature at each point on the Polymer primitive. Notes ----- Each element of the returned list is the radius of curvature, at a point on the Polymer primitive. Element i is the radius of the circumcircle formed from indices [i-1, i, i+1] of the primitve. The first and final values are None.
def add_option(self, section, option, value=None): """ Creates an option for a section. If the section does not exist, it will create the section. """ # check if section exists; create if not if not self.config.has_section(section): message = self.add_section(section) if not message[0]: return message if not self.config.has_option(section, option): if value: self.config.set(section, option, value) else: self.config.set(section, option) return(True, self.config.options(section)) return(False, 'Option: {} already exists @ {}'.format(option, section))
Creates an option for a section. If the section does not exist, it will create the section.
def compile(cls, code, path=None, libraries=None, contract_name='', extra_args=None): """ Return the binary of last contract in code. """ result = cls._code_or_path( code, path, contract_name, libraries, 'bin', extra_args) return result['bin']
Return the binary of last contract in code.
def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, 'aclImdb'), subdirectories=(('train', 'test'), ('pos', 'neg', 'unsup'))): """ Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper... """ dfs = {} for subdirs in tqdm(list(product(*subdirectories))): urlspath = os.path.join(dirpath, subdirs[0], 'urls_{}.txt'.format(subdirs[1])) if not os.path.isfile(urlspath): if subdirs != ('test', 'unsup'): # test/ dir doesn't usually have an unsup subdirectory logger.warning('Unable to find expected IMDB review list of URLs: {}'.format(urlspath)) continue df = pd.read_csv(urlspath, header=None, names=['url']) # df.index.name = 'id' df['url'] = series_strip(df.url, endswith='/usercomments') textsdir = os.path.join(dirpath, subdirs[0], subdirs[1]) if not os.path.isdir(textsdir): logger.warning('Unable to find expected IMDB review text subdirectory: {}'.format(textsdir)) continue filenames = [fn for fn in os.listdir(textsdir) if fn.lower().endswith('.txt')] df['index0'] = subdirs[0] # TODO: column names more generic so will work on other datasets df['index1'] = subdirs[1] df['index2'] = np.array([int(fn[:-4].split('_')[0]) for fn in filenames]) df['rating'] = np.array([int(fn[:-4].split('_')[1]) for fn in filenames]) texts = [] for fn in filenames: with ensure_open(os.path.join(textsdir, fn)) as f: texts.append(f.read()) df['text'] = np.array(texts) del texts df.set_index('index0 index1 index2'.split(), inplace=True) df.sort_index(inplace=True) dfs[subdirs] = df return pd.concat(dfs.values())
Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper...
def call_temperature(*args, **kwargs): ''' Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3 ''' res = dict() if 'value' not in kwargs: raise CommandExecutionError("Parameter 'value' (150~500) is missing") try: value = max(min(int(kwargs['value']), 500), 150) except Exception as err: raise CommandExecutionError("Parameter 'value' does not contains an integer") devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"ct": value}) return res
Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3
def display_dataset(self): """Update the widget with information about the dataset.""" header = self.dataset.header self.parent.setWindowTitle(basename(self.filename)) short_filename = short_strings(basename(self.filename)) self.idx_filename.setText(short_filename) self.idx_s_freq.setText(str(header['s_freq'])) self.idx_n_chan.setText(str(len(header['chan_name']))) start_time = header['start_time'].strftime('%b-%d %H:%M:%S') self.idx_start_time.setText(start_time) end_time = (header['start_time'] + timedelta(seconds=header['n_samples'] / header['s_freq'])) self.idx_end_time.setText(end_time.strftime('%b-%d %H:%M:%S'))
Update the widget with information about the dataset.
def insert_mass_range_option_group(parser,nonSpin=False): """ Adds the options used to specify mass ranges in the bank generation codes to an argparser as an OptionGroup. This should be used if you want to use these options in your code. Parameters ----------- parser : object OptionParser instance. nonSpin : boolean, optional (default=False) If this is provided the spin-related options will not be added. """ massOpts = parser.add_argument_group("Options related to mass and spin " "limits for bank generation") massOpts.add_argument("--min-mass1", action="store", type=positive_float, required=True, help="Minimum mass1: must be >= min-mass2. " "REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-mass1", action="store", type=positive_float, required=True, help="Maximum mass1: must be >= max-mass2. " "REQUIRED. UNITS=Solar mass") massOpts.add_argument("--min-mass2", action="store", type=positive_float, required=True, help="Minimum mass2. REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-mass2", action="store", type=positive_float, required=True, help="Maximum mass2. REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-total-mass", action="store", type=positive_float, default=None, help="Maximum total mass. OPTIONAL, if not provided " "the max total mass is determined by the component " "masses. UNITS=Solar mass") massOpts.add_argument("--min-total-mass", action="store", type=positive_float, default=None, help="Minimum total mass. OPTIONAL, if not provided the " "min total mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--max-chirp-mass", action="store", type=positive_float, default=None, help="Maximum chirp mass. OPTIONAL, if not provided the " "max chirp mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--min-chirp-mass", action="store", type=positive_float, default=None, help="Minimum total mass. OPTIONAL, if not provided the " "min chirp mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--max-eta", action="store", type=positive_float, default=0.25, help="Maximum symmetric mass ratio. OPTIONAL, no upper bound" " on eta will be imposed if not provided. " "UNITS=Solar mass.") massOpts.add_argument("--min-eta", action="store", type=nonnegative_float, default=0., help="Minimum symmetric mass ratio. OPTIONAL, no lower bound" " on eta will be imposed if not provided. " "UNITS=Solar mass.") massOpts.add_argument("--ns-eos", action="store", default=None, help="Select the EOS to be used for the NS when calculating " "the remnant disk mass. Only 2H is currently supported. " "OPTIONAL") massOpts.add_argument("--remnant-mass-threshold", action="store", type=nonnegative_float, default=None, help="Setting this filters EM dim NS-BH binaries: if the " "remnant disk mass does not exceed this value, the NS-BH " "binary is dropped from the target parameter space. " "When it is set to None (default value) the EM dim " "filter is not activated. OPTIONAL") massOpts.add_argument("--use-eos-max-ns-mass", action="store_true", default=False, help="Cut the mass range of the smaller object to the maximum " "mass allowed by EOS. " "OPTIONAL") massOpts.add_argument("--delta-bh-spin", action="store", type=positive_float, default=None, help="Grid spacing used for the BH spin z component when " "generating the surface of the minumum minimum symmetric " "mass ratio as a function of BH spin and NS mass required " "to produce a remnant disk mass that exceeds the threshold " "specificed in --remnant-mass-threshold. " "OPTIONAL (0.1 by default) ") massOpts.add_argument("--delta-ns-mass", action="store", type=positive_float, default=None, help="Grid spacing used for the NS mass when generating the " "surface of the minumum minimum symmetric mass ratio as " "a function of BH spin and NS mass required to produce " "a remnant disk mass that exceeds the thrsehold specified " "in --remnant-mass-threshold. " "OPTIONAL (0.1 by default) ") if nonSpin: parser.add_argument_group(massOpts) return massOpts massOpts.add_argument("--max-ns-spin-mag", action="store", type=nonnegative_float, default=None, help="Maximum neutron star spin magnitude. Neutron stars " "are defined as components lighter than the NS-BH " "boundary (3 Msun by default). REQUIRED if min-mass2 " "< ns-bh-boundary-mass") massOpts.add_argument("--max-bh-spin-mag", action="store", type=nonnegative_float, default=None, help="Maximum black hole spin magnitude. Black holes are " "defined as components at or above the NS-BH boundary " "(3 Msun by default). REQUIRED if max-mass1 >= " "ns-bh-boundary-mass") # Mutually exclusive group prevents both options being set on command line # If --nsbh-flag is True then spinning bank generation must ignore the # default value of ns-bh-boundary-mass. action = massOpts.add_mutually_exclusive_group(required=False) action.add_argument("--ns-bh-boundary-mass", action='store', type=positive_float, help="Mass boundary between neutron stars and black holes. " "Components below this mass are considered neutron " "stars and are subject to the neutron star spin limits. " "Components at/above are subject to the black hole spin " "limits. OPTIONAL, default=%f. UNITS=Solar mass" \ % massRangeParameters.default_nsbh_boundary_mass) action.add_argument("--nsbh-flag", action="store_true", default=False, help="Set this flag if generating a bank that contains only " "systems with 1 black hole and 1 neutron star. With " "this flag set the heavier body will always be subject " "to the black hole spin restriction and the lighter " "to the neutron star spin restriction, regardless of " "mass. OPTIONAL. If set, the value of " "--ns-bh-boundary-mass will be ignored.") return massOpts
Adds the options used to specify mass ranges in the bank generation codes to an argparser as an OptionGroup. This should be used if you want to use these options in your code. Parameters ----------- parser : object OptionParser instance. nonSpin : boolean, optional (default=False) If this is provided the spin-related options will not be added.
def main(): """ Main function""" parser = argparse.ArgumentParser(description='JSON Web Key (JWK) Generator') parser.add_argument('--kty', dest='kty', metavar='type', help='Key type', required=True) parser.add_argument('--size', dest='keysize', type=int, metavar='size', help='Key size') parser.add_argument('--crv', dest='crv', metavar='curve', help='EC curve', choices=NIST2SEC.keys(), default=DEFAULT_EC_CURVE) parser.add_argument('--exp', dest='rsa_exp', type=int, metavar='exponent', help=f'RSA public key exponent (default {DEFAULT_RSA_EXP})', default=DEFAULT_RSA_EXP) parser.add_argument('--kid', dest='kid', metavar='id', help='Key ID') args = parser.parse_args() if args.kty.upper() == 'RSA': if args.keysize is None: args.keysize = DEFAULT_RSA_KEYSIZE jwk = new_rsa_key(public_exponent=args.rsa_exp, key_size=args.keysize, kid=args.kid) elif args.kty.upper() == 'EC': if args.crv not in NIST2SEC: print("Unknown curve: {0}".format(args.crv), file=sys.stderr) exit(1) jwk = new_ec_key(crv=args.crv, kid=args.kid) elif args.kty.upper() == 'SYM': if args.keysize is None: args.keysize = DEFAULT_SYM_KEYSIZE randomkey = os.urandom(args.keysize) jwk = SYMKey(key=randomkey, kid=args.kid) else: print(f"Unknown key type: {args.kty}", file=sys.stderr) exit(1) jwk_dict = jwk.serialize(private=True) print(json.dumps(jwk_dict, sort_keys=True, indent=4)) print("SHA-256: " + jwk.thumbprint('SHA-256').decode(), file=sys.stderr)
Main function
def cli(env): """List all zones.""" manager = SoftLayer.DNSManager(env.client) zones = manager.list_zones() table = formatting.Table(['id', 'zone', 'serial', 'updated']) table.align['serial'] = 'c' table.align['updated'] = 'c' for zone in zones: table.add_row([ zone['id'], zone['name'], zone['serial'], zone['updateDate'], ]) env.fout(table)
List all zones.
def start(self): """ Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine. """ service_names = ', '.join(self.service_names) _log.info('starting services: %s', service_names) SpawningProxy(self.containers).start() _log.debug('services started: %s', service_names)
Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine.
def map_clusters(self, size, sampled, clusters): """ Translate cluster identity back to original data size. Parameters ---------- size : int size of original dataset sampled : array-like integer array describing location of finite values in original data. clusters : array-like integer array of cluster identities Returns ------- list of cluster identities the same length as original data. Where original data are non-finite, returns -2. """ ids = np.zeros(size, dtype=int) ids[:] = -2 ids[sampled] = clusters return ids
Translate cluster identity back to original data size. Parameters ---------- size : int size of original dataset sampled : array-like integer array describing location of finite values in original data. clusters : array-like integer array of cluster identities Returns ------- list of cluster identities the same length as original data. Where original data are non-finite, returns -2.
def recursively_preempt_states(self): """Preempt the state """ self.preempted = True self.paused = False self.started = False
Preempt the state
def trace(self, urls=None, **overrides): """Sets the acceptable HTTP method to TRACE""" if urls is not None: overrides['urls'] = urls return self.where(accept='TRACE', **overrides)
Sets the acceptable HTTP method to TRACE
def _data(self, received_data): """Sends data to listener, if False is returned; socket is closed. :param received_data: Decoded data received from socket. """ if self.listener.on_data(received_data) is False: self.stop() raise ListenerError(self.listener.connection_id, received_data)
Sends data to listener, if False is returned; socket is closed. :param received_data: Decoded data received from socket.
def create_db_instance_read_replica(DBInstanceIdentifier=None, SourceDBInstanceIdentifier=None, DBInstanceClass=None, AvailabilityZone=None, Port=None, AutoMinorVersionUpgrade=None, Iops=None, OptionGroupName=None, PubliclyAccessible=None, Tags=None, DBSubnetGroupName=None, StorageType=None, CopyTagsToSnapshot=None, MonitoringInterval=None, MonitoringRoleArn=None, KmsKeyId=None, PreSignedUrl=None, EnableIAMDatabaseAuthentication=None, SourceRegion=None): """ Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance. All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below. You can create an encrypted Read Replica in a different AWS Region than the source DB instance. In that case, the region where you call the CreateDBInstanceReadReplica action is the destination region of the encrypted Read Replica. The source DB instance must be encrypted. To create an encrypted Read Replica in another AWS Region, you must provide the following values: To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . See also: AWS API Documentation Examples This example creates a DB instance read replica. Expected Output: :example: response = client.create_db_instance_read_replica( DBInstanceIdentifier='string', SourceDBInstanceIdentifier='string', DBInstanceClass='string', AvailabilityZone='string', Port=123, AutoMinorVersionUpgrade=True|False, Iops=123, OptionGroupName='string', PubliclyAccessible=True|False, Tags=[ { 'Key': 'string', 'Value': 'string' }, ], DBSubnetGroupName='string', StorageType='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, MonitoringRoleArn='string', KmsKeyId='string', EnableIAMDatabaseAuthentication=True|False, SourceRegion='string' ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. :type SourceDBInstanceIdentifier: string :param SourceDBInstanceIdentifier: [REQUIRED] The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas. Constraints: Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance. Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later. The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0. If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier. If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN) . :type DBInstanceClass: string :param DBInstanceClass: The compute and memory capacity of the Read Replica. Note that not all instance classes are available in all regions for all DB engines. Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large Default: Inherits from the source DB instance. :type AvailabilityZone: string :param AvailabilityZone: The Amazon EC2 Availability Zone that the Read Replica will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: us-east-1d :type Port: integer :param Port: The port number that the DB instance uses for connections. Default: Inherits from the source DB instance Valid Values: 1150-65535 :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default: Inherits from the source DB instance :type Iops: integer :param Iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. :type OptionGroupName: string :param OptionGroupName: The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used. :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. Default VPC: true VPC: false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type Tags: list :param Tags: A list of tags. (dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair. Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type DBSubnetGroupName: string :param DBSubnetGroupName: Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC. Constraints: Can only be specified if the source DB instance identifier specifies a DB instance in another region. The specified DB subnet group must be in the same region in which the operation is running. All Read Replicas in one region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC. Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default. Example: mySubnetgroup :type StorageType: string :param StorageType: Specifies the storage type to be associated with the Read Replica. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the Read Replica to snapshots of the Read Replica; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type KmsKeyId: string :param KmsKeyId: The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key. If you create an unencrypted Read Replica and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target Read Replica using the specified KMS encryption key. If you create an encrypted Read Replica from your AWS account, you can specify a value for KmsKeyId to encrypt the Read Replica with a new KMS encryption key. If you don't specify a value for KmsKeyId , then the Read Replica is encrypted with the same KMS key as the source DB instance. If you create an encrypted Read Replica in a different AWS region, then you must specify a KMS key for the destination AWS region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region. :type PreSignedUrl: string :param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the AWS region that contains the source DB instance. The PreSignedUrl parameter must be used when encrypting a Read Replica from another AWS region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source region that contains the encrypted DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The AWS Region that the Read Replica is created in. This region is the same one where the CreateDBInstanceReadReplica action is called that contains this presigned URL. For example, if you create an encrypted Read Replica in the us-east-1 region, and the source DB instance is in the west-2 region, then you call the CreateDBInstanceReadReplica action in the us-east-1 region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica action in the us-west-2 region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 region. KmsKeyId - The KMS key identifier for the key to use to encrypt the Read Replica in the destination region. This is the same identifier for both the CreateDBInstanceReadReplica action that is called in the destination region, and the action contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica to be created. This identifier must be in the Amazon Resource Name (ARN) format for the source region. For example, if you create an encrypted Read Replica from a DB instance in the us-west-2 region, then your SourceDBInstanceIdentifier would look like this example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-instance-20161115 . To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher. Default: false :type SourceRegion: string :param SourceRegion: The ID of the region that contains the source for the read replica. :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: DBInstanceIdentifier - The identifier for the encrypted Read Replica in the destination region. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica. This identifier must be in the ARN format for the source region and is the same value as the SourceDBInstanceIdentifier in the presigned URL. """ pass
Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance. All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below. You can create an encrypted Read Replica in a different AWS Region than the source DB instance. In that case, the region where you call the CreateDBInstanceReadReplica action is the destination region of the encrypted Read Replica. The source DB instance must be encrypted. To create an encrypted Read Replica in another AWS Region, you must provide the following values: To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . See also: AWS API Documentation Examples This example creates a DB instance read replica. Expected Output: :example: response = client.create_db_instance_read_replica( DBInstanceIdentifier='string', SourceDBInstanceIdentifier='string', DBInstanceClass='string', AvailabilityZone='string', Port=123, AutoMinorVersionUpgrade=True|False, Iops=123, OptionGroupName='string', PubliclyAccessible=True|False, Tags=[ { 'Key': 'string', 'Value': 'string' }, ], DBSubnetGroupName='string', StorageType='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, MonitoringRoleArn='string', KmsKeyId='string', EnableIAMDatabaseAuthentication=True|False, SourceRegion='string' ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. :type SourceDBInstanceIdentifier: string :param SourceDBInstanceIdentifier: [REQUIRED] The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas. Constraints: Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance. Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later. The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0. If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier. If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN) . :type DBInstanceClass: string :param DBInstanceClass: The compute and memory capacity of the Read Replica. Note that not all instance classes are available in all regions for all DB engines. Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large Default: Inherits from the source DB instance. :type AvailabilityZone: string :param AvailabilityZone: The Amazon EC2 Availability Zone that the Read Replica will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: us-east-1d :type Port: integer :param Port: The port number that the DB instance uses for connections. Default: Inherits from the source DB instance Valid Values: 1150-65535 :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default: Inherits from the source DB instance :type Iops: integer :param Iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. :type OptionGroupName: string :param OptionGroupName: The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used. :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. Default VPC: true VPC: false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type Tags: list :param Tags: A list of tags. (dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair. Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type DBSubnetGroupName: string :param DBSubnetGroupName: Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC. Constraints: Can only be specified if the source DB instance identifier specifies a DB instance in another region. The specified DB subnet group must be in the same region in which the operation is running. All Read Replicas in one region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC. Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default. Example: mySubnetgroup :type StorageType: string :param StorageType: Specifies the storage type to be associated with the Read Replica. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the Read Replica to snapshots of the Read Replica; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type KmsKeyId: string :param KmsKeyId: The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key. If you create an unencrypted Read Replica and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target Read Replica using the specified KMS encryption key. If you create an encrypted Read Replica from your AWS account, you can specify a value for KmsKeyId to encrypt the Read Replica with a new KMS encryption key. If you don't specify a value for KmsKeyId , then the Read Replica is encrypted with the same KMS key as the source DB instance. If you create an encrypted Read Replica in a different AWS region, then you must specify a KMS key for the destination AWS region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region. :type PreSignedUrl: string :param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the AWS region that contains the source DB instance. The PreSignedUrl parameter must be used when encrypting a Read Replica from another AWS region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source region that contains the encrypted DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The AWS Region that the Read Replica is created in. This region is the same one where the CreateDBInstanceReadReplica action is called that contains this presigned URL. For example, if you create an encrypted Read Replica in the us-east-1 region, and the source DB instance is in the west-2 region, then you call the CreateDBInstanceReadReplica action in the us-east-1 region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica action in the us-west-2 region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 region. KmsKeyId - The KMS key identifier for the key to use to encrypt the Read Replica in the destination region. This is the same identifier for both the CreateDBInstanceReadReplica action that is called in the destination region, and the action contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica to be created. This identifier must be in the Amazon Resource Name (ARN) format for the source region. For example, if you create an encrypted Read Replica from a DB instance in the us-west-2 region, then your SourceDBInstanceIdentifier would look like this example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-instance-20161115 . To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher. Default: false :type SourceRegion: string :param SourceRegion: The ID of the region that contains the source for the read replica. :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: DBInstanceIdentifier - The identifier for the encrypted Read Replica in the destination region. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica. This identifier must be in the ARN format for the source region and is the same value as the SourceDBInstanceIdentifier in the presigned URL.
def parse_localclasspath(self, tup_tree): """ Parse a LOCALCLASSPATH element and return the class path it represents as a CIMClassName object. :: <!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)> """ self.check_node(tup_tree, 'LOCALCLASSPATH') k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(LOCALNAMESPACEPATH, CLASSNAME))", name(tup_tree), k), conn_id=self.conn_id) namespace = self.parse_localnamespacepath(k[0]) class_path = self.parse_classname(k[1]) class_path.namespace = namespace return class_path
Parse a LOCALCLASSPATH element and return the class path it represents as a CIMClassName object. :: <!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)>
def mmi_ramp_roman(raster_layer): """Generate an mmi ramp using range of 1-10 on roman. A standarised range is used so that two shakemaps of different intensities can be properly compared visually with colours stretched accross the same range. The colours used are the 'standard' colours commonly shown for the mercalli scale e.g. on wikipedia and other sources. :param raster_layer: A raster layer that will have an mmi style applied. :type raster_layer: QgsRasterLayer .. versionadded:: 4.0 """ items = [] sorted_mmi_scale = sorted( earthquake_mmi_scale['classes'], key=itemgetter('value')) for class_max in sorted_mmi_scale: colour = class_max['color'] label = '%s' % class_max['key'] ramp_item = QgsColorRampShader.ColorRampItem( class_max['value'], colour, label) items.append(ramp_item) raster_shader = QgsRasterShader() ramp_shader = QgsColorRampShader() ramp_shader.setColorRampType(QgsColorRampShader.Interpolated) ramp_shader.setColorRampItemList(items) raster_shader.setRasterShaderFunction(ramp_shader) band = 1 renderer = QgsSingleBandPseudoColorRenderer( raster_layer.dataProvider(), band, raster_shader) raster_layer.setRenderer(renderer)
Generate an mmi ramp using range of 1-10 on roman. A standarised range is used so that two shakemaps of different intensities can be properly compared visually with colours stretched accross the same range. The colours used are the 'standard' colours commonly shown for the mercalli scale e.g. on wikipedia and other sources. :param raster_layer: A raster layer that will have an mmi style applied. :type raster_layer: QgsRasterLayer .. versionadded:: 4.0
def valid(*things): '''Return True if all tasks or files are valid. Valid tasks have been completed already. Valid files exist on the disk.''' for thing in things: if type(thing) is str and not os.path.exists(thing): return False if thing.valid is None: return False return True
Return True if all tasks or files are valid. Valid tasks have been completed already. Valid files exist on the disk.
def randoffset(self, rstate=None): """Return a random offset from the center of the ellipsoid.""" if rstate is None: rstate = np.random return np.dot(self.axes, randsphere(self.n, rstate=rstate))
Return a random offset from the center of the ellipsoid.
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find the value of the object under the cursor.""" q = gcl.SourceQuery(filename, line, col) rootpath = ast_tree.find_tokens(q) rootpath = path_until(rootpath, is_thunk) if len(rootpath) <= 1: # Just the file tuple itself, or some non-thunk element at the top level return None tup = inflate_context_tuple(rootpath, root_env) try: if isinstance(rootpath[-1], ast.Inherit): # Special case handling of 'Inherit' nodes, show the value that's being # inherited. return tup[rootpath[-1].name] return rootpath[-1].eval(tup.env(tup)) except gcl.EvaluationError as e: return e
Find the value of the object under the cursor.
def is_cursor_on_first_line(self): """Return True if cursor is on the first line""" cursor = self.textCursor() cursor.movePosition(QTextCursor.StartOfBlock) return cursor.atStart()
Return True if cursor is on the first line
def uninstall_host(trg_queue, *hosts, **kwargs): '''Idempotently uninstall a host queue, should you want to subvert FSQ_ROOT settings, merely pass in an abolute path''' # immediately down the queue item_user = kwargs.pop('item_user', None) item_group = kwargs.pop('item_group', None) item_mode = kwargs.pop('item_mode', None) for host in hosts: # immediately down the queue try: down_host(trg_queue, host, user=item_user, group=item_group, mode=(_c.FSQ_ITEM_MODE if item_mode is None else item_mode)) except FSQError, e: raise FSQInstallError(e.errno, wrap_io_os_err(e)) tmp_full, tmp_queue = _tmp_trg(host, fsq_path.hosts(trg_queue)) _remove_dir(fsq_path.base(trg_queue, host), tmp_full, trg_queue)
Idempotently uninstall a host queue, should you want to subvert FSQ_ROOT settings, merely pass in an abolute path
def sentiment(self, text, method = "vocabulary"): """ determine the sentiment of the provided text in English language @param text: input text to categorize @param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis) and "rnn" (neural network based sentiment classification) @returns: dict """ assert method == "vocabulary" or method == "rnn" endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN" return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text })
determine the sentiment of the provided text in English language @param text: input text to categorize @param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis) and "rnn" (neural network based sentiment classification) @returns: dict
def sg_summary_image(tensor, prefix=None, name=None): r"""Register `tensor` to summary report as `image` Args: tensor: A tensor to log as image prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics if not tf.get_variable_scope().reuse: tf.summary.image(name + '-im', tensor)
r"""Register `tensor` to summary report as `image` Args: tensor: A tensor to log as image prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
def schema_completer(prefix): """ For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned. """ from c7n import schema load_resources() components = prefix.split('.') if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) provider_resources = provider.resources(cloud_provider) else: cloud_provider = 'aws' provider_resources = provider.resources('aws') components[0] = "aws.%s" % components[0] # Completions for resource if len(components) == 1: choices = [r for r in provider.resources().keys() if r.startswith(components[0])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices if components[0] not in provider_resources.keys(): return [] # Completions for category if len(components) == 2: choices = ['{}.{}'.format(components[0], x) for x in ('actions', 'filters') if x.startswith(components[1])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices # Completions for item elif len(components) == 3: resource_mapping = schema.resource_vocabulary(cloud_provider) return ['{}.{}.{}'.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]]] return []
For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned.
def check_status_code(response, codes=None): """Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid """ codes = codes or [httplib.OK] checker = ( codes if callable(codes) else lambda resp: resp.status_code in codes ) if not checker(response): raise exceptions.ApiError(response, response.json())
Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid
def age(self): """RDFDatetime at which the object was created.""" # TODO(user) move up to AFF4Object after some analysis of how .age is # used in the codebase. aff4_type = self.Get(self.Schema.TYPE) if aff4_type: return aff4_type.age else: # If there is no type attribute yet, we have only just been created and # not flushed yet, so just set timestamp to now. return rdfvalue.RDFDatetime.Now()
RDFDatetime at which the object was created.
def align(self, referencewords, datatuple): """align the reference sentence with the tagged data""" targetwords = [] for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])): if word: subwords = word.split("_") for w in subwords: #split multiword expressions targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword? referencewords = [ w.lower() for w in referencewords ] alignment = [] for i, referenceword in enumerate(referencewords): found = False best = 0 distance = 999999 for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords): if referenceword == targetword and abs(i-j) < distance: found = True best = j distance = abs(i-j) if found: alignment.append(targetwords[best]) else: alignment.append((None,None,None,None,False)) #no alignment found return alignment
align the reference sentence with the tagged data
def adjust_opts(in_opts, config): """Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively. """ memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg=arg, val=adjust_memory(opt[4:], memory_adjust.get("magnitude", 1), memory_adjust.get("direction"), maximum=memory_adjust.get("maximum"))) out_opts.append(opt) return out_opts
Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively.
def transform(self, data, data_type='S3Prefix', content_type=None, compression_type=None, split_type=None, job_name=None): """Start a new transform job. Args: data (str): Input data location in S3. data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values: * 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as inputs for the transform job. * 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as an input for the transform job. content_type (str): MIME type of the input data (default: None). compression_type (str): Compression type of the input data, if compressed (default: None). Valid values: 'Gzip', None. split_type (str): The record delimiter for the input object (default: 'None'). Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'. job_name (str): job name (default: None). If not specified, one will be generated. """ local_mode = self.sagemaker_session.local_mode if not local_mode and not data.startswith('s3://'): raise ValueError('Invalid S3 URI: {}'.format(data)) if job_name is not None: self._current_job_name = job_name else: base_name = self.base_transform_job_name or base_name_from_image(self._retrieve_image_name()) self._current_job_name = name_from_base(base_name) if self.output_path is None: self.output_path = 's3://{}/{}'.format(self.sagemaker_session.default_bucket(), self._current_job_name) self.latest_transform_job = _TransformJob.start_new(self, data, data_type, content_type, compression_type, split_type)
Start a new transform job. Args: data (str): Input data location in S3. data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values: * 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as inputs for the transform job. * 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as an input for the transform job. content_type (str): MIME type of the input data (default: None). compression_type (str): Compression type of the input data, if compressed (default: None). Valid values: 'Gzip', None. split_type (str): The record delimiter for the input object (default: 'None'). Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'. job_name (str): job name (default: None). If not specified, one will be generated.
def parse(self, data, doctype): ''' Parse an input string, and return an AST doctype must have WCADocument as a baseclass ''' self.doctype = doctype self.lexer.lineno = 0 del self.errors[:] del self.warnings[:] self.lexer.lexerror = False ast = self.parser.parse(data, lexer=self.lexer) if self.lexer.lexerror: ast = None if ast is None: self.errors.append("Couldn't build AST.") else: for check in self.sema[self.doctype]: visitor = check() if not visitor.visit(ast): self.errors.append("Couldn't visit AST.") self.errors.extend(visitor.errors) self.warnings.extend(visitor.warnings) return (ast, list(self.errors), list(self.warnings))
Parse an input string, and return an AST doctype must have WCADocument as a baseclass
def process_input(self, stream, value, rpc_executor): """Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that. """ self.sensor_log.push(stream, value) # FIXME: This should be specified in our device model if stream.important: associated_output = stream.associated_stream() self.sensor_log.push(associated_output, value) to_check = deque([x for x in self.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, self.mark_streamer) for result in results: result.raw_time = value.raw_time self.sensor_log.push(node.stream, result) except: self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node)) # If we generated any outputs, notify our downstream nodes # so that they are also checked to see if they should run. if len(results) > 0: to_check.extend(node.outputs)
Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that.
def _aggregate_metrics(self, session_group): """Sets the metrics of the group based on aggregation_type.""" if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or self._request.aggregation_type == api_pb2.AGGREGATION_UNSET): _set_avg_session_metrics(session_group) elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN: _set_median_session_metrics(session_group, self._request.aggregation_metric) elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, min) elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, max) else: raise error.HParamsError('Unknown aggregation_type in request: %s' % self._request.aggregation_type)
Sets the metrics of the group based on aggregation_type.
def set_settings_secret(self, password): """Unlocks the secret data by passing the unlock password to the server. The server will cache the password for that machine. in password of type str The cipher key. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable. """ if not isinstance(password, basestring): raise TypeError("password can only be an instance of type basestring") self._call("setSettingsSecret", in_p=[password])
Unlocks the secret data by passing the unlock password to the server. The server will cache the password for that machine. in password of type str The cipher key. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable.
def _split_op( self, identifier, hs_label=None, dagger=False, args=None): """Return `name`, total `subscript`, total `superscript` and `arguments` str. All of the returned strings are fully rendered. Args: identifier (str or SymbolicLabelBase): A (non-rendered/ascii) identifier that may include a subscript. The output `name` will be the `identifier` without any subscript hs_label (str): The rendered label for the Hilbert space of the operator, or None. Returned unchanged. dagger (bool): Flag to indicate whether the operator is daggered. If True, :attr:`dagger_sym` will be included in the `superscript` (or `subscript`, depending on the settings) args (list or None): List of arguments (expressions). Each element will be rendered with :meth:`doprint`. The total list of args will then be joined with commas, enclosed with :attr:`_parenth_left` and :attr:`parenth_right`, and returnd as the `arguments` string """ if self._isinstance(identifier, 'SymbolicLabelBase'): identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES( identifier.expr) name, total_subscript = self._split_identifier(identifier) total_superscript = '' if (hs_label not in [None, '']): if self._settings['show_hs_label'] == 'subscript': if len(total_subscript) == 0: total_subscript = '(' + hs_label + ')' else: total_subscript += ',(' + hs_label + ')' else: total_superscript += '(' + hs_label + ')' if dagger: total_superscript += self._dagger_sym args_str = '' if (args is not None) and (len(args) > 0): args_str = (self._parenth_left + ",".join([self.doprint(arg) for arg in args]) + self._parenth_right) return name, total_subscript, total_superscript, args_str
Return `name`, total `subscript`, total `superscript` and `arguments` str. All of the returned strings are fully rendered. Args: identifier (str or SymbolicLabelBase): A (non-rendered/ascii) identifier that may include a subscript. The output `name` will be the `identifier` without any subscript hs_label (str): The rendered label for the Hilbert space of the operator, or None. Returned unchanged. dagger (bool): Flag to indicate whether the operator is daggered. If True, :attr:`dagger_sym` will be included in the `superscript` (or `subscript`, depending on the settings) args (list or None): List of arguments (expressions). Each element will be rendered with :meth:`doprint`. The total list of args will then be joined with commas, enclosed with :attr:`_parenth_left` and :attr:`parenth_right`, and returnd as the `arguments` string
def _localize_inputs_command(self, task_dir, inputs, user_project): """Returns a command that will stage inputs.""" commands = [] for i in inputs: if i.recursive or not i.value: continue source_file_path = i.uri local_file_path = task_dir + '/' + _DATA_SUBDIR + '/' + i.docker_path dest_file_path = self._get_input_target_path(local_file_path) commands.append('mkdir -p "%s"' % os.path.dirname(local_file_path)) if i.file_provider in [job_model.P_LOCAL, job_model.P_GCS]: # The semantics that we expect here are implemented consistently in # "gsutil cp", and are a bit different than "cp" when it comes to # wildcard handling, so use it for both local and GCS: # # - `cp path/* dest/` will error if "path" has subdirectories. # - `cp "path/*" "dest/"` will fail (it expects wildcard expansion # to come from shell). if user_project: command = 'gsutil -u %s -mq cp "%s" "%s"' % ( user_project, source_file_path, dest_file_path) else: command = 'gsutil -mq cp "%s" "%s"' % (source_file_path, dest_file_path) commands.append(command) return '\n'.join(commands)
Returns a command that will stage inputs.
def skip_build(self): """Check if build should be skipped """ skip_msg = self.config.get('skip', '[ci skip]') return ( os.environ.get('CODEBUILD_BUILD_SUCCEEDING') == '0' or self.info['current_tag'] or skip_msg in self.info['head']['message'] )
Check if build should be skipped
def feed(self, data): """Consume some data and advances the state as necessary. :param str data: a blob of data to feed from. """ send = self._send_to_parser draw = self.listener.draw match_text = self._text_pattern.match taking_plain_text = self._taking_plain_text length = len(data) offset = 0 while offset < length: if taking_plain_text: match = match_text(data, offset) if match: start, offset = match.span() draw(data[start:offset]) else: taking_plain_text = False else: taking_plain_text = send(data[offset:offset + 1]) offset += 1 self._taking_plain_text = taking_plain_text
Consume some data and advances the state as necessary. :param str data: a blob of data to feed from.
def modify_signature(self, signature): """ Modify an existing signature Can modify the content, contenttype and name. An unset attribute will not delete the attribute but leave it untouched. :param: signature a zobject.Signature object, with modified content/contentype/name, the id should be present and valid, the name does not allows to identify the signature for that operation. """ # if no content is specified, just use a selector (id/name) dic = signature.to_creator(for_modify=True) self.request('ModifySignature', {'signature': dic})
Modify an existing signature Can modify the content, contenttype and name. An unset attribute will not delete the attribute but leave it untouched. :param: signature a zobject.Signature object, with modified content/contentype/name, the id should be present and valid, the name does not allows to identify the signature for that operation.
def set_logger_level(logger_name, log_level='error'): ''' Tweak a specific logger's logging level ''' logging.getLogger(logger_name).setLevel( LOG_LEVELS.get(log_level.lower(), logging.ERROR) )
Tweak a specific logger's logging level
def _realValue_to_float(value_str): """ Convert a value string that conforms to DSP0004 `realValue`, into the corresponding float and return it. The special values 'INF', '-INF', and 'NAN' are supported. Note that the Python `float()` function supports a superset of input formats compared to the `realValue` definition in DSP0004. For example, "1." is allowed for `float()` but not for `realValue`. In addition, it has the same support for Unicode decimal digits as `int()`. Therefore, the match patterns explicitly check for US-ASCII digits, and the `float()` function should never raise `ValueError`. Returns None if the value string does not conform to `realValue`. """ if REAL_VALUE.match(value_str): value = float(value_str) else: value = None return value
Convert a value string that conforms to DSP0004 `realValue`, into the corresponding float and return it. The special values 'INF', '-INF', and 'NAN' are supported. Note that the Python `float()` function supports a superset of input formats compared to the `realValue` definition in DSP0004. For example, "1." is allowed for `float()` but not for `realValue`. In addition, it has the same support for Unicode decimal digits as `int()`. Therefore, the match patterns explicitly check for US-ASCII digits, and the `float()` function should never raise `ValueError`. Returns None if the value string does not conform to `realValue`.
def dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. Dict keys are produced in the order in which they appear in OrderedDicts. Safe version. If objects are not "conventional" objects, they will be dumped converted to string with the str() function. They will then not be recovered when loading with the load() function. """ # Display OrderedDicts correctly class OrderedDumper(SafeDumper): pass def _dict_representer(dumper, data): return dumper.represent_mapping( original_yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, list(data.items())) # Display long strings correctly def _long_str_representer(dumper, data): if data.find("\n") != -1: # Drop some uneeded data # \t are forbidden in YAML data = data.replace("\t", " ") # empty spaces at end of line are always useless in INGInious, and forbidden in YAML data = "\n".join([p.rstrip() for p in data.split("\n")]) return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') else: return dumper.represent_scalar('tag:yaml.org,2002:str', data) # Default representation for some odd objects def _default_representer(dumper, data): return _long_str_representer(dumper, str(data)) OrderedDumper.add_representer(str, _long_str_representer) OrderedDumper.add_representer(str, _long_str_representer) OrderedDumper.add_representer(OrderedDict, _dict_representer) OrderedDumper.add_representer(None, _default_representer) s = original_yaml.dump(data, stream, OrderedDumper, encoding='utf-8', allow_unicode=True, default_flow_style=False, indent=4, **kwds) if s is not None: return s.decode('utf-8') else: return
Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. Dict keys are produced in the order in which they appear in OrderedDicts. Safe version. If objects are not "conventional" objects, they will be dumped converted to string with the str() function. They will then not be recovered when loading with the load() function.
def file_sort(my_list): """ Sort a list of files in a nice way. eg item-10 will be after item-9 """ def alphanum_key(key): """ Split the key into str/int parts """ return [int(s) if s.isdigit() else s for s in re.split("([0-9]+)", key)] my_list.sort(key=alphanum_key) return my_list
Sort a list of files in a nice way. eg item-10 will be after item-9
def parse_checks(self, conf): """ Unpack configuration from human-friendly form to strict check definitions. """ checks = conf.get('checks', conf.get('pages', [])) checks = list(self.unpack_batches(checks)) checks = list(self.unpack_templates(checks, conf.get('templates', {}))) self.inject_missing_names(checks) for check in checks: self.inject_scenarios(check, conf.get('scenarios', {})) self.inject_notifiers(check, conf.get('notifiers', {})) self.expand_schedule(check) return checks
Unpack configuration from human-friendly form to strict check definitions.
def UnicodeFromCodePage(string): """Attempt to coerce string into a unicode object.""" # get the current code page codepage = ctypes.windll.kernel32.GetOEMCP() try: return string.decode("cp%s" % codepage) except UnicodeError: try: return string.decode("utf16", "ignore") except UnicodeError: # Fall back on utf8 but ignore errors return string.decode("utf8", "ignore")
Attempt to coerce string into a unicode object.
def _minimal_y(self, p): """ For the specified y and one offset by half a pixel, return the one that results in the fewest pixels turned on, so that when the thickness has been enforced to be at least one pixel, no extra pixels are needlessly included (which would cause double-width lines). """ y0 = self.pattern_y y1 = y0 + self._pixelsize(p)/2. return y0 if self._count_pixels_on_line(y0, p) < self._count_pixels_on_line(y1, p) else y1
For the specified y and one offset by half a pixel, return the one that results in the fewest pixels turned on, so that when the thickness has been enforced to be at least one pixel, no extra pixels are needlessly included (which would cause double-width lines).
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True): """ Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`. """ # Dictionary with config information: dicCnfg = {} # Open file with parameter configuration: # fleConfig = open(strCsvCnfg, 'r') with open(strCsvCnfg, 'r') as fleConfig: # Read file with ROI information: csvIn = csv.reader(fleConfig, delimiter='\n', skipinitialspace=True) # Loop through csv object to fill list with csv data: for lstTmp in csvIn: # Skip comments (i.e. lines starting with '#') and empty lines. # Note: Indexing the list (i.e. lstTmp[0][0]) does not work for # empty lines. However, if the first condition is no fullfilled # (i.e. line is empty and 'if lstTmp' evaluates to false), the # second logical test (after the 'and') is not actually carried # out. if lstTmp and not (lstTmp[0][0] == '#'): # Name of current parameter (e.g. 'varTr'): strParamKey = lstTmp[0].split(' = ')[0] # print(strParamKey) # Current parameter value (e.g. '2.94'): strParamVlu = lstTmp[0].split(' = ')[1] # print(strParamVlu) # Put paramter name (key) and value (item) into dictionary: dicCnfg[strParamKey] = strParamVlu # Are model parameters in cartesian or polar coordinates? # set either pol (polar) or crt (cartesian) dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd']) if lgcPrint: print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd'])) # Number of x- or radial positions to model: dicCnfg['varNum1'] = int(dicCnfg['varNum1']) # Number of y- or angular positions to model: dicCnfg['varNum2'] = int(dicCnfg['varNum2']) if lgcPrint: if dicCnfg['strKwCrd'] == 'crt': print('---Number of x-positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of y-positions to model: ' + str(dicCnfg['varNum2'])) elif dicCnfg['strKwCrd'] == 'pol': print('---Number of radial positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of angular positions to model: ' + str(dicCnfg['varNum2'])) # Number of pRF sizes to model: dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes']) if lgcPrint: print('---Number of pRF sizes to model: ' + str(dicCnfg['varNumPrfSizes'])) # Extent of visual space from centre of the screen in negative x-direction # (i.e. from the fixation point to the left end of the screen) in degrees # of visual angle. dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin']) if lgcPrint: print('---Extent of visual space in negative x-direction: ' + str(dicCnfg['varExtXmin'])) # Extent of visual space from centre of the screen in positive x-direction # (i.e. from the fixation point to the right end of the screen) in degrees # of visual angle. dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax']) if lgcPrint: print('---Extent of visual space in positive x-direction: ' + str(dicCnfg['varExtXmax'])) # Extent of visual space from centre of the screen in negative y-direction # (i.e. from the fixation point to the lower end of the screen) in degrees # of visual angle. dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin']) if lgcPrint: print('---Extent of visual space in negative y-direction: ' + str(dicCnfg['varExtYmin'])) # Extent of visual space from centre of the screen in positive y-direction # (i.e. from the fixation point to the upper end of the screen) in degrees # of visual angle. dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax']) if lgcPrint: print('---Extent of visual space in positive y-direction: ' + str(dicCnfg['varExtYmax'])) # Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin']) if lgcPrint: print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin'])) # Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax']) if lgcPrint: print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax'])) # Volume TR of input data [s]: dicCnfg['varTr'] = float(dicCnfg['varTr']) if lgcPrint: print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr'])) # Voxel resolution of fMRI data [mm]: dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes']) if lgcPrint: print('---Voxel resolution of fMRI data [mm]: ' + str(dicCnfg['varVoxRes'])) # Number of fMRI volumes and png files to load: dicCnfg['varNumVol'] = int(dicCnfg['varNumVol']) if lgcPrint: print('---Total number of fMRI volumes and png files: ' + str(dicCnfg['varNumVol'])) # Extent of temporal smoothing for fMRI data and pRF time course models # [standard deviation of the Gaussian kernel, in seconds]: # same temporal smoothing will be applied to pRF model time courses dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp']) if lgcPrint: print('---Extent of temporal smoothing (Gaussian SD in [s]): ' + str(dicCnfg['varSdSmthTmp'])) # Number of processes to run in parallel: dicCnfg['varPar'] = int(dicCnfg['varPar']) if lgcPrint: print('---Number of processes to run in parallel: ' + str(dicCnfg['varPar'])) # Size of space model in which the pRF models are # created (x- and y-dimension). dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']), int(dicCnfg['varVslSpcSzeY'])]) if lgcPrint: print('---Size of visual space model (x & y): ' + str(dicCnfg['tplVslSpcSze'])) # Path(s) of functional data: dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc']) if lgcPrint: print('---Path(s) of functional data:') for strTmp in dicCnfg['lstPathNiiFunc']: print(' ' + str(strTmp)) # Path of mask (to restrict pRF model finding): dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask']) if lgcPrint: print('---Path of mask (to restrict pRF model finding):') print(' ' + str(dicCnfg['strPathNiiMask'])) # Output basename: dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut']) if lgcPrint: print('---Output basename:') print(' ' + str(dicCnfg['strPathOut'])) # Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding # on CPU, 'gpu' for using GPU. dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion']) if lgcPrint: print('---Version (numpy, cython, or gpu): ' + str(dicCnfg['strVersion'])) # Create pRF time course models? dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True') if lgcPrint: print('---Create pRF time course models: ' + str(dicCnfg['lgcCrteMdl'])) # Path to npy file with pRF time course models (to save or laod). Without # file extension. dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl']) if lgcPrint: print('---Path to npy file with pRF time course models (to save ' + 'or load):') print(' ' + str(dicCnfg['strPathMdl'])) # switch to determine which hrf functions should be used # 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet']) if lgcPrint: print('---Switch to determine which hrf functions should be used: ' + str(dicCnfg['switchHrfSet'])) # should model fitting be based on k-fold cross-validation? # if not, set to 1 dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval']) if lgcPrint: print('---Model fitting will have this number of folds for xval: ' + str(dicCnfg['varNumXval'])) # If we create new pRF time course models, the following parameters have to # be provided: if dicCnfg['lgcCrteMdl']: # Name of the npy that holds spatial info about conditions dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf']) if lgcPrint: print('---Path to npy file with spatial condition info: ') print(' ' + str(dicCnfg['strSptExpInf'])) # Name of the npy that holds temporal info about conditions dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf']) if lgcPrint: print('---Path to npy file with temporal condition info: ') print(' ' + str(dicCnfg['strTmpExpInf'])) # Factor by which time courses and HRF will be upsampled for the # convolutions dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl']) if lgcPrint: print('---Factor by which time courses and HRF will be upsampled: ' + str(dicCnfg['varTmpOvsmpl'])) # Is this a test? if lgcTest: # Prepend absolute path of this file to config file paths: dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask']) dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut']) dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl']) dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf']) dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf']) # Loop through functional runs: varNumRun = len(dicCnfg['lstPathNiiFunc']) for idxRun in range(varNumRun): dicCnfg['lstPathNiiFunc'][idxRun] = ( strDir + dicCnfg['lstPathNiiFunc'][idxRun] ) return dicCnfg
Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`.
def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context}
Setup kubernetes API connection singleton
def is_valid_data(obj): """Check if data is JSON serializable. """ if obj: try: tmp = json.dumps(obj, default=datetime_encoder) del tmp except (TypeError, UnicodeDecodeError): return False return True
Check if data is JSON serializable.
def fetch(self, category=CATEGORY_BUILD): """Fetch the builds from the url. The method retrieves, from a Jenkins url, the builds updated since the given date. :param category: the category of items to fetch :returns: a generator of builds """ kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the builds from the url. The method retrieves, from a Jenkins url, the builds updated since the given date. :param category: the category of items to fetch :returns: a generator of builds
def is_read_only(cls, db: DATABASE_SUPPORTER_FWD_REF, logger: logging.Logger = None) -> bool: """Do we have read-only access?""" def convert_enums(row_): # All these columns are of type enum('N', 'Y'); # https://dev.mysql.com/doc/refman/5.0/en/enum.html return [True if x == 'Y' else (False if x == 'N' else None) for x in row_] # 1. Check per-database privileges. # We don't check SELECT privileges. We're just trying to ensure # nothing dangerous is present - for ANY database. # If we get an exception try: sql = """ SELECT db, /* must not have: */ Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Index_priv, Alter_priv, Lock_tables_priv, Create_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv FROM mysql.db WHERE CONCAT(user, '@', host) = CURRENT_USER() """ rows = db.fetchall(sql) for row in rows: dbname = row[0] prohibited = convert_enums(row[1:]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: database privileges " "wrong: dbname={}, prohibited={}".format( dbname, prohibited ) ) return False except mysql.OperationalError: # Probably: error 1142, "SELECT command denied to user 'xxx'@'yyy' # for table 'db'". This would be OK. pass # 2. Global privileges, e.g. as held by root try: sql = """ SELECT /* must not have: */ Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Reload_priv, Shutdown_priv, Process_priv, File_priv, Grant_priv, Index_priv, Alter_priv, Show_db_priv, Super_priv, Lock_tables_priv, Execute_priv, Repl_slave_priv, Repl_client_priv, Create_view_priv, Create_routine_priv, Alter_routine_priv, Create_user_priv, Event_priv, Trigger_priv, Create_tablespace_priv FROM mysql.user WHERE CONCAT(user, '@', host) = CURRENT_USER() """ rows = db.fetchall(sql) if not rows or len(rows) > 1: return False prohibited = convert_enums(rows[0]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: GLOBAL privileges " "wrong: prohibited={}".format(prohibited)) return False except mysql.OperationalError: # Probably: error 1142, "SELECT command denied to user 'xxx'@'yyy' # for table 'user'". This would be OK. pass return True
Do we have read-only access?
def cancel(self, mark_completed_as_cancelled=False): """ Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway. """ with self._lock: if not self._completed or mark_completed_as_cancelled: self._cancelled = True callbacks = self._prepare_done_callbacks() callbacks()
Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway.
def clean_names(lines, ensure_unique_names=False, strip_prefix=False, make_database_safe=False): """ Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _ """ names = {} for row in lines: if strip_prefix: row['name'] = row['name'][row['name'].find('-') + 1:] if row['indexed_by'] is not None: row['indexed_by'] = row['indexed_by'][row['indexed_by'].find( '-') + 1:] if ensure_unique_names: i = 1 while (row['name'] if i == 1 else row['name'] + "-" + str(i)) in names: i += 1 names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1 if i > 1: row['name'] = row['name'] + "-" + str(i) if make_database_safe: row['name'] = row['name'].replace("-", "_") return lines
Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _
def download_supplementary_files(self, directory='series', download_sra=True, email=None, sra_kwargs=None, nproc=1): """Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM """ if sra_kwargs is None: sra_kwargs = dict() if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_Supp") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) downloaded_paths = dict() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in itervalues(self.gsms): logger.info( "Downloading SRA files for %s series\n" % gsm.name) paths = gsm.download_supplementary_files(email=email, download_sra=download_sra, directory=dirpath, sra_kwargs=sra_kwargs) downloaded_paths[gsm.name] = paths elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in itervalues(self.gsms): downloaders.append([ gsm, download_sra, email, dirpath, sra_kwargs]) p = Pool(nproc) results = p.map(_supplementary_files_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM
def go_to_line(self, line): """ Moves the text cursor to given line. :param line: Line to go to. :type line: int :return: Method success. :rtype: bool """ cursor = self.textCursor() cursor.setPosition(self.document().findBlockByNumber(line - 1).position()) self.setTextCursor(cursor) return True
Moves the text cursor to given line. :param line: Line to go to. :type line: int :return: Method success. :rtype: bool