Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,100
def is_running(self): if self._process: if self._process.returncode is None: return True else: self._process = None return False
Checks if the QEMU process is running :returns: True or False
381,101
def GetEventTypeString(self, event_type): if 0 <= event_type < len(self._EVENT_TYPES): return self._EVENT_TYPES[event_type] return .format(event_type)
Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type.
381,102
def to_funset(self, lname="clamping", cname="clamped"): fs = set() for i, clamping in enumerate(self): fs.add(gringo.Fun(lname, [i])) fs = fs.union(clamping.to_funset(i, cname)) return fs
Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
381,103
def singular(plural): if plural.endswith(): return plural[:-3] + if plural.endswith(): return plural[:-1] raise ValueError( % (plural,))
Take a plural English word and turn it into singular Obviously, this doesn't work in general. It know just enough words to generate XML tag names for list items. For example, if we have an element called 'tracks' in the response, it will be serialized as a list without named items in JSON, but we need names for items in XML, so those will be called 'track'.
381,104
def collect(self): if libvirt is None: self.log.error() return {} conn = libvirt.openReadOnly(None) conninfo = conn.getInfo() memallocated = 0 coresallocated = 0 totalcores = 0 results = {} domIds = conn.listDomainsID() if 0 in domIds: domU = conn.lookupByID(0) totalcores = domU.info()[3] s = os.statvfs() freeSpace = (s.f_bavail * s.f_frsize) / 1024 for i in domIds: if i == 0: continue domU = conn.lookupByID(i) dominfo = domU.info() memallocated += dominfo[2] if i > 0: coresallocated += dominfo[3] results = { : conninfo[1], : memallocated / 1024, : conninfo[1] - (memallocated / 1024), : coresallocated, : freeSpace, : totalcores, : (totalcores - coresallocated) } for k in results.keys(): self.publish(k, results[k], 0)
Collect libvirt data
381,105
def get_time(self): command = const.CMD_GET_TIME response_size = 1032 cmd_response = self.__send_command(command, b, response_size) if cmd_response.get(): return self.__decode_time(self.__data[:4]) else: raise ZKErrorResponse("can't get time")
:return: the machine's time
381,106
def delete_shifts(self, shifts): url = "/2/shifts/?%s" % urlencode( {: ",".join(str(s) for s in shifts)}) data = self._delete_resource(url) return data
Delete existing shifts. http://dev.wheniwork.com/#delete-shift
381,107
def add_how(voevent, descriptions=None, references=None): if not voevent.xpath(): etree.SubElement(voevent, ) if descriptions is not None: for desc in _listify(descriptions): etree.SubElement(voevent.How, ) voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof).
381,108
def add(self, key, column_parent, column, consistency_level): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_add(key, column_parent, column, consistency_level) return d
Increment or decrement a counter. Parameters: - key - column_parent - column - consistency_level
381,109
def export(request, page_id, export_unpublished=False): try: if export_unpublished: root_page = Page.objects.get(id=page_id) else: root_page = Page.objects.get(id=page_id, live=True) except Page.DoesNotExist: return JsonResponse({: _()}) payload = export_pages(root_page, export_unpublished=export_unpublished) return JsonResponse(payload)
API endpoint of this source site to export a part of the page tree rooted at page_id Requests are made by a destination site's import_from_api view.
381,110
def summarycanvas(args): p = OptionParser(summarycanvas.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) for vcffile in args: counter = get_gain_loss_summary(vcffile) pf = op.basename(vcffile).split(".")[0] print(pf + " " + " ".join("{}:{}".format(k, v) for k, v in sorted(counter.items())))
%prog summarycanvas output.vcf.gz Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
381,111
def byte_adaptor(fbuffer): if six.PY3: strings = fbuffer.read().decode() fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
381,112
def paintNormal( self, painter ): rect = self.rect() x = 0 y = self.padding() w = rect.width() h = rect.height() - (2 * self.padding()) - 1 radius = self.borderRadius() color = self.color() alt_color = self.alternateColor() if ( self.isSelected() ): color = self.highlightColor() alt_color = self.alternateHighlightColor() gradient = QLinearGradient() gradient.setStart(0, 0) gradient.setFinalStop(0, h) gradient.setColorAt(0, color) gradient.setColorAt(0.8, alt_color) gradient.setColorAt(1, color) painter.setPen(self.borderColor()) if ( radius ): painter.setRenderHint(painter.Antialiasing) pen = painter.pen() pen.setWidthF(0.5) painter.setPen(pen) painter.setBrush(QBrush(gradient)) painter.drawRoundedRect(x, y, w, h, radius, radius) if ( self.showProgress() ): gradient = QLinearGradient() gradient.setStart(0, 0) gradient.setFinalStop(0, h) gradient.setColorAt(0, self.progressColor()) gradient.setColorAt(0.8, self.alternateProgressColor()) gradient.setColorAt(1, self.progressColor()) prog_w = (w - 4) * (self._percentComplete/100.0) radius -= 2 painter.setPen(Qt.NoPen) painter.setBrush(QBrush(gradient)) painter.drawRoundedRect(x + 2, y + 2, prog_w, h - 4, radius, radius) if ( self.text() ): painter.setPen(self.textColor()) painter.drawText(x, y, w, h, Qt.AlignCenter, self.text())
Paints this item as the normal look. :param painter | <QPainter>
381,113
def toString(self): string = "Layer : (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % ( self.name, self.kind, self.size, self.active, self.frozen) if (self.type == ): string += toStringArray(, self.target, self.displayWidth) string += toStringArray(, self.activation, self.displayWidth) if (self.type != and self._verbosity > 1): string += toStringArray(, self.error, self.displayWidth) if (self._verbosity > 4 and self.type != ): string += toStringArray(, self.weight, self.displayWidth) string += toStringArray(, self.dweight, self.displayWidth) string += toStringArray(, self.delta, self.displayWidth) string += toStringArray(, self.netinput, self.displayWidth) string += toStringArray(, self.wed, self.displayWidth) return string
Returns a string representation of Layer instance.
381,114
def _as_versioned_jar(self, internal_target): jar, _ = internal_target.get_artifact_info() pushdb_entry = self._get_db(internal_target).get_entry(internal_target) return jar.copy(rev=pushdb_entry.version().version())
Fetches the jar representation of the given target, and applies the latest pushdb version.
381,115
def _draw(self, prev_angle = None, prev_length = None): if (prev_angle is None) or (prev_length is None): (length, angle)= np.unravel_index(self.drawFrom(, self.getrand()), self.firstLenAng_shape) angle = angle-((self.firstLenAng_shape[1]-1)/2) angle += 0.5 length += 0.5 length *= self.fm.pixels_per_degree else: ind = int(floor(prev_length/self.fm.pixels_per_degree)) while ind >= len(self.probability_cumsum): ind -= 1 while not(self.probability_cumsum[ind]).any(): ind -= 1 J, I = np.unravel_index(self.drawFrom(+repr(ind),self.getrand(+repr(ind))), self.full_H1[ind].shape) angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle) angle += 0.5 length = J+0.5 length *= self.fm.pixels_per_degree return angle, length
Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected.
381,116
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset): start_codon_no = codon_no - 1 if len(sbjct_nucs) == 3: start_codon_no = codon_no start_codon = get_codon(sbjct_seq, start_codon_no, start_offset) end_codon = get_codon(sbjct_seq, codon_no, start_offset) pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt) return pos_name
This function is used to name a insertion mutation based on the HGVS recommendation.
381,117
def convertFsDirWavToWav(dirName, Fs, nC): types = (dirName+os.sep+,) filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) newDir = dirName + os.sep + "Fs" + str(Fs) + "_" + "NC"+str(nC) if os.path.exists(newDir) and newDir!=".": shutil.rmtree(newDir) os.makedirs(newDir) for f in filesToProcess: _, wavFileName = ntpath.split(f) command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + newDir + os.sep + wavFileName + "\""; print(command) os.system(command)
This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files
381,118
def month_name_to_number(month, to_int=False): number = { : , : , : , : , : , : , : , : , : , : , : , : , }.get(month) return int(number) if to_int else number
Convert a month name (MMM) to its number (01-12). Args: month (str): 3-letters string describing month. to_int (bool): cast number to int or not. Returns: str/int: the month's number (between 01 and 12).
381,119
def figsize(x=8, y=7., aspect=1.): mpl.rcParams.update({: (x*aspect, y)})
manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar
381,120
def read_ndk_version(ndk_dir): try: with open(join(ndk_dir, )) as fileh: ndk_data = fileh.read() except IOError: info( ) return for line in ndk_data.split(): if line.startswith(): break else: info( ) return ndk_version = LooseVersion(line.split()[-1].strip()) return ndk_version
Read the NDK version from the NDK dir, if possible
381,121
def get_as_nullable_datetime(self, key): value = self.get(key) return DateTimeConverter.to_nullable_datetime(value)
Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported.
381,122
def _do_highlight(content, query, tag=): for term in query: term = term.decode() for match in re.findall(, term): match_re = re.compile(match, re.I) content = match_re.sub( % (tag, term, tag), content) return content
Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted
381,123
def get_json_results(self, response): try: print ("CODE 429, sleeping for {timeout} seconds").format(timeout=str(timeout)) time.sleep(timeout) except (AttributeError, ValueError) as e: if not self.silent_fail: raise PyMsCognitiveWebSearchException("CODE 429. Failed to auto-sleep: {message}".format(code=response.status_code,message=json_results["message"]) ) else: print ("CODE 429. Failed to auto-sleep: {message}. Trying again in 5 seconds.".format(code=response.status_code,message=json_results["message"])) time.sleep(5) except ValueError as vE: if not self.silent_fail: raise PyMsCognitiveWebSearchException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) return json_results
Parses the request result and returns the JSON object. Handles all errors.
381,124
def _index_idiom(el_name, index, alt=None): el_index = "%s[%d]" % (el_name, index) if index == 0: cond = "%s" % el_name else: cond = "len(%s) - 1 >= %d" % (el_name, index) output = IND + " return output + IND + "%s = %s if %s else %s\n\n" % ( el_name, el_index, cond, repr(alt) )
Generate string where `el_name` is indexed by `index` if there are enough items or `alt` is returned. Args: el_name (str): Name of the `container` which is indexed. index (int): Index of the item you want to obtain from container. alt (whatever, default None): Alternative value. Returns: str: Python code. Live example:: >>> import generator as g >>> print g._index_idiom("xex", 0) # pick element from list xex = xex[0] if xex else None >>> print g._index_idiom("xex", 1, "something") # pick element from list xex = xex[1] if len(xex) - 1 >= 1 else 'something'
381,125
def register_date_conversion_handler(date_specifier_patterns): def _decorator(func): global DATE_SPECIFIERS_CONVERSION_HANDLERS DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func return func return _decorator
Decorator for registering handlers that convert text dates to dates. Args: date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
381,126
def http_method(self, method): self.build_url() try: response = self.get_http_method(method) is_success = response.ok try: response_message = response.json() except ValueError: response_message = response.text except requests.exceptions.RequestException as exc: is_success = False response_message = exc.args return is_success, response_message
Execute the given HTTP method and returns if it's success or not and the response as a string if not success and as python object after unjson if it's success.
381,127
def _get_cache_key(self, args, kwargs): hash_input = json.dumps({: self.name, : args, : kwargs}, sort_keys=True) return hashlib.md5(hash_input).hexdigest()
Returns key to be used in cache
381,128
def get_evidence(self, relation): provenance = relation.get() text = None context = None if provenance: sentence_tag = provenance[0].get() if sentence_tag and in sentence_tag: sentence_id = sentence_tag[] sentence = self.doc.sentences.get(sentence_id) if sentence is not None: text = _sanitize(sentence[]) timexes = sentence.get(, []) if timexes: timex = timexes[0] tc = time_context_from_timex(timex) context = WorldContext(time=tc) geolocs = sentence.get(, []) if geolocs: geoloc = geolocs[0] rc = ref_context_from_geoloc(geoloc) if context: context.geo_location = rc else: context = WorldContext(geo_location=rc) doc_id = provenance[0].get(, {}).get() if doc_id: title = self.doc.documents.get(doc_id, {}).get() if title: provenance[0][][] = title annotations = {: relation.get(), : provenance} if self.doc.dct is not None: annotations[] = self.doc.dct.to_json() epistemics = {} negations = self.get_negation(relation) hedgings = self.get_hedging(relation) if hedgings: epistemics[] = hedgings if negations: epistemics[] = True context=context, epistemics=epistemics) return ev
Return the Evidence object for the INDRA Statment.
381,129
def get_macs(vm_): * macs = [] nics = get_nics(vm_) if nics is None: return None for nic in nics: macs.append(nic) return macs
Return a list off MAC addresses from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_macs <vm name>
381,130
def launch_job(job_spec): project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discovery=False) request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) request.execute()
Launch job on ML Engine.
381,131
def tokenize_paragraphs(cls, text): paragraphs = [] paragraphs_first_pass = text.split() for p in paragraphs_first_pass: paragraphs_second_pass = re.split(, p) paragraphs += paragraphs_second_pass paragraphs = [p for p in paragraphs if p] return paragraphs
Convert an input string into a list of paragraphs.
381,132
def validate_url(url): if not isinstance(url, basestring): raise TypeError("url must be a string, not %r"%type(url)) url = url.lower() proto_addr = url.split() assert len(proto_addr) == 2, %url proto, addr = proto_addr assert proto in [,,,,], "Invalid protocol: %r"%proto pat = re.compile(r) if proto == : lis = addr.split() assert len(lis) == 2, %url addr,s_port = lis try: port = int(s_port) except ValueError: raise AssertionError("Invalid port %r in url: %r"%(port, url)) assert addr == or pat.match(addr) is not None, %url else: pass return True
validate a url for zeromq
381,133
def _etextno_to_uri_subdirectory(etextno): str_etextno = str(etextno).zfill(2) all_but_last_digit = list(str_etextno[:-1]) subdir_part = "/".join(all_but_last_digit) subdir = "{}/{}".format(subdir_part, etextno) return subdir
Returns the subdirectory that an etextno will be found in a gutenberg mirror. Generally, one finds the subdirectory by separating out each digit of the etext number, and uses it for a directory. The exception here is for etext numbers less than 10, which are prepended with a 0 for the directory traversal. >>> _etextno_to_uri_subdirectory(1) '0/1' >>> _etextno_to_uri_subdirectory(19) '1/19' >>> _etextno_to_uri_subdirectory(15453) '1/5/4/5/15453'
381,134
def _dfromtimestamp(timestamp): try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
Custom date timestamp constructor. ditto
381,135
def tag_wordnet(self, **kwargs): global wordnet_tagger if wordnet_tagger is None: wordnet_tagger = WordnetTagger() self.__wordnet_tagger = wordnet_tagger if len(kwargs) > 0: return self.__wordnet_tagger.tag_text(self, **kwargs) return self.__wordnet_tagger.tag_text(self, **self.__kwargs)
Create wordnet attribute in ``words`` layer. See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method for applicable keyword arguments.
381,136
def get_lldp_neighbor_detail_input_request_type_get_request_interface_type(self, **kwargs): config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail input = ET.SubElement(get_lldp_neighbor_detail, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
381,137
def get(self, name): path = self._get_cluster_storage_path(name) try: with open(path, ) as storage: cluster = self.load(storage) for node in sum(cluster.nodes.values(), []): if not hasattr(node, ): log.debug("Monkey patching old version of `Node` class: %s", node.name) node.ips = [node.ip_public, node.ip_private] node.preferred_ip = None cluster.storage_file = path return cluster except IOError as ex: raise ClusterNotFound("Error accessing storage file %s: %s" % (path, ex))
Retrieves the cluster with the given name. :param str name: name of the cluster (identifier) :return: :py:class:`elasticluster.cluster.Cluster`
381,138
def send(remote_host=None): my_facts = get() if not remote_host: remote_host = nago.extensions.settings.get() remote_node = nago.core.get_node(remote_host) if not remote_node: raise Exception("Remote host with token= not found" % remote_host) response = remote_node.send_command(, , host_token=remote_node.token, **my_facts) result = {} result[] = response result[] = "sent %s facts to remote node " % (len(my_facts), remote_node.get()) return result
Send my facts to a remote host if remote_host is provided, data will be sent to that host. Otherwise it will be sent to master.
381,139
def register_entrypoints(self): for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
Look through the `setup_tools` `entry_points` and load all of the formats.
381,140
def add_info(self, data): for key in data: if key in (,,,,,,): raise ValueError("Sorry, cannot set build info with key of {}".format(key)) self.obj[key] = data[key] self.changes.append("Adding build info") return self
add info to a build
381,141
def remove_entity_tags(self): t spaCy entity tags. Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...). ' terms_to_remove = [term for term in self._term_idx_store._i2val if any([word in SPACY_ENTITY_TAGS for word in term.split()])] return self.remove_terms(terms_to_remove)
Returns ------- A new TermDocumentMatrix consisting of only terms in the current TermDocumentMatrix that aren't spaCy entity tags. Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...).
381,142
def _binary_enable_zero_disable_one_conversion(cls, val, **kwargs): try: if val is not None: if ord(val) == 0: return elif ord(val) == 1: return else: return .format(val) else: return except TypeError: return
converts a binary 0/1 to Disabled/Enabled
381,143
def transform_cur_commands_interactive(_, **kwargs): event_payload = kwargs.get(, {}) cur_commands = event_payload.get(, ).split() _transform_cur_commands(cur_commands) event_payload.update({ : .join(cur_commands) })
Transform any aliases in current commands in interactive into their respective commands.
381,144
def get(*args, **kwargs): from invenio.modules.oauth2server.models import Client q = Client.query return q.count(), q.all()
Get users.
381,145
def radii_of_curvature(self): rocs = [] for i, _ in enumerate(self): if 0 < i < len(self) - 1: rocs.append(radius_of_circumcircle( self[i - 1][], self[i][], self[i + 1][])) else: rocs.append(None) return rocs
The radius of curvature at each point on the Polymer primitive. Notes ----- Each element of the returned list is the radius of curvature, at a point on the Polymer primitive. Element i is the radius of the circumcircle formed from indices [i-1, i, i+1] of the primitve. The first and final values are None.
381,146
def add_option(self, section, option, value=None): if not self.config.has_section(section): message = self.add_section(section) if not message[0]: return message if not self.config.has_option(section, option): if value: self.config.set(section, option, value) else: self.config.set(section, option) return(True, self.config.options(section)) return(False, .format(option, section))
Creates an option for a section. If the section does not exist, it will create the section.
381,147
def compile(cls, code, path=None, libraries=None, contract_name=, extra_args=None): result = cls._code_or_path( code, path, contract_name, libraries, , extra_args) return result[]
Return the binary of last contract in code.
381,148
def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, ), subdirectories=((, ), (, , ))): dfs = {} for subdirs in tqdm(list(product(*subdirectories))): urlspath = os.path.join(dirpath, subdirs[0], .format(subdirs[1])) if not os.path.isfile(urlspath): if subdirs != (, ): df.sort_index(inplace=True) dfs[subdirs] = df return pd.concat(dfs.values())
Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper...
381,149
def call_temperature(*args, **kwargs): *** res = dict() if not in kwargs: raise CommandExecutionError("Parameter (150~500) is missing") try: value = max(min(int(kwargs[]), 500), 150) except Exception as err: raise CommandExecutionError("Parameter does not contains an integer") devices = _get_lights() for dev_id in not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"ct": value}) return res
Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3
381,150
def display_dataset(self): header = self.dataset.header self.parent.setWindowTitle(basename(self.filename)) short_filename = short_strings(basename(self.filename)) self.idx_filename.setText(short_filename) self.idx_s_freq.setText(str(header[])) self.idx_n_chan.setText(str(len(header[]))) start_time = header[].strftime() self.idx_start_time.setText(start_time) end_time = (header[] + timedelta(seconds=header[] / header[])) self.idx_end_time.setText(end_time.strftime())
Update the widget with information about the dataset.
381,151
def insert_mass_range_option_group(parser,nonSpin=False): massOpts = parser.add_argument_group("Options related to mass and spin " "limits for bank generation") massOpts.add_argument("--min-mass1", action="store", type=positive_float, required=True, help="Minimum mass1: must be >= min-mass2. " "REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-mass1", action="store", type=positive_float, required=True, help="Maximum mass1: must be >= max-mass2. " "REQUIRED. UNITS=Solar mass") massOpts.add_argument("--min-mass2", action="store", type=positive_float, required=True, help="Minimum mass2. REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-mass2", action="store", type=positive_float, required=True, help="Maximum mass2. REQUIRED. UNITS=Solar mass") massOpts.add_argument("--max-total-mass", action="store", type=positive_float, default=None, help="Maximum total mass. OPTIONAL, if not provided " "the max total mass is determined by the component " "masses. UNITS=Solar mass") massOpts.add_argument("--min-total-mass", action="store", type=positive_float, default=None, help="Minimum total mass. OPTIONAL, if not provided the " "min total mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--max-chirp-mass", action="store", type=positive_float, default=None, help="Maximum chirp mass. OPTIONAL, if not provided the " "max chirp mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--min-chirp-mass", action="store", type=positive_float, default=None, help="Minimum total mass. OPTIONAL, if not provided the " "min chirp mass is determined by the component masses." " UNITS=Solar mass") massOpts.add_argument("--max-eta", action="store", type=positive_float, default=0.25, help="Maximum symmetric mass ratio. OPTIONAL, no upper bound" " on eta will be imposed if not provided. " "UNITS=Solar mass.") massOpts.add_argument("--min-eta", action="store", type=nonnegative_float, default=0., help="Minimum symmetric mass ratio. OPTIONAL, no lower bound" " on eta will be imposed if not provided. " "UNITS=Solar mass.") massOpts.add_argument("--ns-eos", action="store", default=None, help="Select the EOS to be used for the NS when calculating " "the remnant disk mass. Only 2H is currently supported. " "OPTIONAL") massOpts.add_argument("--remnant-mass-threshold", action="store", type=nonnegative_float, default=None, help="Setting this filters EM dim NS-BH binaries: if the " "remnant disk mass does not exceed this value, the NS-BH " "binary is dropped from the target parameter space. " "When it is set to None (default value) the EM dim " "filter is not activated. OPTIONAL") massOpts.add_argument("--use-eos-max-ns-mass", action="store_true", default=False, help="Cut the mass range of the smaller object to the maximum " "mass allowed by EOS. " "OPTIONAL") massOpts.add_argument("--delta-bh-spin", action="store", type=positive_float, default=None, help="Grid spacing used for the BH spin z component when " "generating the surface of the minumum minimum symmetric " "mass ratio as a function of BH spin and NS mass required " "to produce a remnant disk mass that exceeds the threshold " "specificed in --remnant-mass-threshold. " "OPTIONAL (0.1 by default) ") massOpts.add_argument("--delta-ns-mass", action="store", type=positive_float, default=None, help="Grid spacing used for the NS mass when generating the " "surface of the minumum minimum symmetric mass ratio as " "a function of BH spin and NS mass required to produce " "a remnant disk mass that exceeds the thrsehold specified " "in --remnant-mass-threshold. " "OPTIONAL (0.1 by default) ") if nonSpin: parser.add_argument_group(massOpts) return massOpts massOpts.add_argument("--max-ns-spin-mag", action="store", type=nonnegative_float, default=None, help="Maximum neutron star spin magnitude. Neutron stars " "are defined as components lighter than the NS-BH " "boundary (3 Msun by default). REQUIRED if min-mass2 " "< ns-bh-boundary-mass") massOpts.add_argument("--max-bh-spin-mag", action="store", type=nonnegative_float, default=None, help="Maximum black hole spin magnitude. Black holes are " "defined as components at or above the NS-BH boundary " "(3 Msun by default). REQUIRED if max-mass1 >= " "ns-bh-boundary-mass") action = massOpts.add_mutually_exclusive_group(required=False) action.add_argument("--ns-bh-boundary-mass", action=, type=positive_float, help="Mass boundary between neutron stars and black holes. " "Components below this mass are considered neutron " "stars and are subject to the neutron star spin limits. " "Components at/above are subject to the black hole spin " "limits. OPTIONAL, default=%f. UNITS=Solar mass" \ % massRangeParameters.default_nsbh_boundary_mass) action.add_argument("--nsbh-flag", action="store_true", default=False, help="Set this flag if generating a bank that contains only " "systems with 1 black hole and 1 neutron star. With " "this flag set the heavier body will always be subject " "to the black hole spin restriction and the lighter " "to the neutron star spin restriction, regardless of " "mass. OPTIONAL. If set, the value of " "--ns-bh-boundary-mass will be ignored.") return massOpts
Adds the options used to specify mass ranges in the bank generation codes to an argparser as an OptionGroup. This should be used if you want to use these options in your code. Parameters ----------- parser : object OptionParser instance. nonSpin : boolean, optional (default=False) If this is provided the spin-related options will not be added.
381,152
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument(, dest=, metavar=, help=, required=True) parser.add_argument(, dest=, type=int, metavar=, help=) parser.add_argument(, dest=, metavar=, help=, choices=NIST2SEC.keys(), default=DEFAULT_EC_CURVE) parser.add_argument(, dest=, type=int, metavar=, help=f, default=DEFAULT_RSA_EXP) parser.add_argument(, dest=, metavar=, help=) args = parser.parse_args() if args.kty.upper() == : if args.keysize is None: args.keysize = DEFAULT_RSA_KEYSIZE jwk = new_rsa_key(public_exponent=args.rsa_exp, key_size=args.keysize, kid=args.kid) elif args.kty.upper() == : if args.crv not in NIST2SEC: print("Unknown curve: {0}".format(args.crv), file=sys.stderr) exit(1) jwk = new_ec_key(crv=args.crv, kid=args.kid) elif args.kty.upper() == : if args.keysize is None: args.keysize = DEFAULT_SYM_KEYSIZE randomkey = os.urandom(args.keysize) jwk = SYMKey(key=randomkey, kid=args.kid) else: print(f"Unknown key type: {args.kty}", file=sys.stderr) exit(1) jwk_dict = jwk.serialize(private=True) print(json.dumps(jwk_dict, sort_keys=True, indent=4)) print("SHA-256: " + jwk.thumbprint().decode(), file=sys.stderr)
Main function
381,153
def cli(env): manager = SoftLayer.DNSManager(env.client) zones = manager.list_zones() table = formatting.Table([, , , ]) table.align[] = table.align[] = for zone in zones: table.add_row([ zone[], zone[], zone[], zone[], ]) env.fout(table)
List all zones.
381,154
def start(self): service_names = .join(self.service_names) _log.info(, service_names) SpawningProxy(self.containers).start() _log.debug(, service_names)
Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine.
381,155
def map_clusters(self, size, sampled, clusters): ids = np.zeros(size, dtype=int) ids[:] = -2 ids[sampled] = clusters return ids
Translate cluster identity back to original data size. Parameters ---------- size : int size of original dataset sampled : array-like integer array describing location of finite values in original data. clusters : array-like integer array of cluster identities Returns ------- list of cluster identities the same length as original data. Where original data are non-finite, returns -2.
381,156
def recursively_preempt_states(self): self.preempted = True self.paused = False self.started = False
Preempt the state
381,157
def trace(self, urls=None, **overrides): if urls is not None: overrides[] = urls return self.where(accept=, **overrides)
Sets the acceptable HTTP method to TRACE
381,158
def _data(self, received_data): if self.listener.on_data(received_data) is False: self.stop() raise ListenerError(self.listener.connection_id, received_data)
Sends data to listener, if False is returned; socket is closed. :param received_data: Decoded data received from socket.
381,159
def create_db_instance_read_replica(DBInstanceIdentifier=None, SourceDBInstanceIdentifier=None, DBInstanceClass=None, AvailabilityZone=None, Port=None, AutoMinorVersionUpgrade=None, Iops=None, OptionGroupName=None, PubliclyAccessible=None, Tags=None, DBSubnetGroupName=None, StorageType=None, CopyTagsToSnapshot=None, MonitoringInterval=None, MonitoringRoleArn=None, KmsKeyId=None, PreSignedUrl=None, EnableIAMDatabaseAuthentication=None, SourceRegion=None): pass
Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance. All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below. You can create an encrypted Read Replica in a different AWS Region than the source DB instance. In that case, the region where you call the CreateDBInstanceReadReplica action is the destination region of the encrypted Read Replica. The source DB instance must be encrypted. To create an encrypted Read Replica in another AWS Region, you must provide the following values: To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . See also: AWS API Documentation Examples This example creates a DB instance read replica. Expected Output: :example: response = client.create_db_instance_read_replica( DBInstanceIdentifier='string', SourceDBInstanceIdentifier='string', DBInstanceClass='string', AvailabilityZone='string', Port=123, AutoMinorVersionUpgrade=True|False, Iops=123, OptionGroupName='string', PubliclyAccessible=True|False, Tags=[ { 'Key': 'string', 'Value': 'string' }, ], DBSubnetGroupName='string', StorageType='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, MonitoringRoleArn='string', KmsKeyId='string', EnableIAMDatabaseAuthentication=True|False, SourceRegion='string' ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. :type SourceDBInstanceIdentifier: string :param SourceDBInstanceIdentifier: [REQUIRED] The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas. Constraints: Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance. Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later. The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0. If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier. If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN) . :type DBInstanceClass: string :param DBInstanceClass: The compute and memory capacity of the Read Replica. Note that not all instance classes are available in all regions for all DB engines. Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large Default: Inherits from the source DB instance. :type AvailabilityZone: string :param AvailabilityZone: The Amazon EC2 Availability Zone that the Read Replica will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: us-east-1d :type Port: integer :param Port: The port number that the DB instance uses for connections. Default: Inherits from the source DB instance Valid Values: 1150-65535 :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default: Inherits from the source DB instance :type Iops: integer :param Iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. :type OptionGroupName: string :param OptionGroupName: The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used. :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. Default VPC: true VPC: false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type Tags: list :param Tags: A list of tags. (dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair. Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type DBSubnetGroupName: string :param DBSubnetGroupName: Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC. Constraints: Can only be specified if the source DB instance identifier specifies a DB instance in another region. The specified DB subnet group must be in the same region in which the operation is running. All Read Replicas in one region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC. Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default. Example: mySubnetgroup :type StorageType: string :param StorageType: Specifies the storage type to be associated with the Read Replica. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the Read Replica to snapshots of the Read Replica; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type KmsKeyId: string :param KmsKeyId: The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key. If you create an unencrypted Read Replica and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target Read Replica using the specified KMS encryption key. If you create an encrypted Read Replica from your AWS account, you can specify a value for KmsKeyId to encrypt the Read Replica with a new KMS encryption key. If you don't specify a value for KmsKeyId , then the Read Replica is encrypted with the same KMS key as the source DB instance. If you create an encrypted Read Replica in a different AWS region, then you must specify a KMS key for the destination AWS region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region. :type PreSignedUrl: string :param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the AWS region that contains the source DB instance. The PreSignedUrl parameter must be used when encrypting a Read Replica from another AWS region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source region that contains the encrypted DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The AWS Region that the Read Replica is created in. This region is the same one where the CreateDBInstanceReadReplica action is called that contains this presigned URL. For example, if you create an encrypted Read Replica in the us-east-1 region, and the source DB instance is in the west-2 region, then you call the CreateDBInstanceReadReplica action in the us-east-1 region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica action in the us-west-2 region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 region. KmsKeyId - The KMS key identifier for the key to use to encrypt the Read Replica in the destination region. This is the same identifier for both the CreateDBInstanceReadReplica action that is called in the destination region, and the action contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica to be created. This identifier must be in the Amazon Resource Name (ARN) format for the source region. For example, if you create an encrypted Read Replica from a DB instance in the us-west-2 region, then your SourceDBInstanceIdentifier would look like this example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-instance-20161115 . To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process . Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher. Default: false :type SourceRegion: string :param SourceRegion: The ID of the region that contains the source for the read replica. :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: DBInstanceIdentifier - The identifier for the encrypted Read Replica in the destination region. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted Read Replica. This identifier must be in the ARN format for the source region and is the same value as the SourceDBInstanceIdentifier in the presigned URL.
381,160
def parse_localclasspath(self, tup_tree): self.check_node(tup_tree, ) k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(LOCALNAMESPACEPATH, CLASSNAME))", name(tup_tree), k), conn_id=self.conn_id) namespace = self.parse_localnamespacepath(k[0]) class_path = self.parse_classname(k[1]) class_path.namespace = namespace return class_path
Parse a LOCALCLASSPATH element and return the class path it represents as a CIMClassName object. :: <!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)>
381,161
def mmi_ramp_roman(raster_layer): items = [] sorted_mmi_scale = sorted( earthquake_mmi_scale[], key=itemgetter()) for class_max in sorted_mmi_scale: colour = class_max[] label = % class_max[] ramp_item = QgsColorRampShader.ColorRampItem( class_max[], colour, label) items.append(ramp_item) raster_shader = QgsRasterShader() ramp_shader = QgsColorRampShader() ramp_shader.setColorRampType(QgsColorRampShader.Interpolated) ramp_shader.setColorRampItemList(items) raster_shader.setRasterShaderFunction(ramp_shader) band = 1 renderer = QgsSingleBandPseudoColorRenderer( raster_layer.dataProvider(), band, raster_shader) raster_layer.setRenderer(renderer)
Generate an mmi ramp using range of 1-10 on roman. A standarised range is used so that two shakemaps of different intensities can be properly compared visually with colours stretched accross the same range. The colours used are the 'standard' colours commonly shown for the mercalli scale e.g. on wikipedia and other sources. :param raster_layer: A raster layer that will have an mmi style applied. :type raster_layer: QgsRasterLayer .. versionadded:: 4.0
381,162
def valid(*things): for thing in things: if type(thing) is str and not os.path.exists(thing): return False if thing.valid is None: return False return True
Return True if all tasks or files are valid. Valid tasks have been completed already. Valid files exist on the disk.
381,163
def randoffset(self, rstate=None): if rstate is None: rstate = np.random return np.dot(self.axes, randsphere(self.n, rstate=rstate))
Return a random offset from the center of the ellipsoid.
381,164
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): q = gcl.SourceQuery(filename, line, col) rootpath = ast_tree.find_tokens(q) rootpath = path_until(rootpath, is_thunk) if len(rootpath) <= 1: return None tup = inflate_context_tuple(rootpath, root_env) try: if isinstance(rootpath[-1], ast.Inherit): return tup[rootpath[-1].name] return rootpath[-1].eval(tup.env(tup)) except gcl.EvaluationError as e: return e
Find the value of the object under the cursor.
381,165
def is_cursor_on_first_line(self): cursor = self.textCursor() cursor.movePosition(QTextCursor.StartOfBlock) return cursor.atStart()
Return True if cursor is on the first line
381,166
def uninstall_host(trg_queue, *hosts, **kwargs): item_user = kwargs.pop(, None) item_group = kwargs.pop(, None) item_mode = kwargs.pop(, None) for host in hosts: try: down_host(trg_queue, host, user=item_user, group=item_group, mode=(_c.FSQ_ITEM_MODE if item_mode is None else item_mode)) except FSQError, e: raise FSQInstallError(e.errno, wrap_io_os_err(e)) tmp_full, tmp_queue = _tmp_trg(host, fsq_path.hosts(trg_queue)) _remove_dir(fsq_path.base(trg_queue, host), tmp_full, trg_queue)
Idempotently uninstall a host queue, should you want to subvert FSQ_ROOT settings, merely pass in an abolute path
381,167
def sentiment(self, text, method = "vocabulary"): assert method == "vocabulary" or method == "rnn" endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN" return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text })
determine the sentiment of the provided text in English language @param text: input text to categorize @param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis) and "rnn" (neural network based sentiment classification) @returns: dict
381,168
def sg_summary_image(tensor, prefix=None, name=None): r prefix = if prefix is None else prefix + name = prefix + _pretty_name(tensor) if name is None else prefix + name if not tf.get_variable_scope().reuse: tf.summary.image(name + , tensor)
r"""Register `tensor` to summary report as `image` Args: tensor: A tensor to log as image prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
381,169
def schema_completer(prefix): from c7n import schema load_resources() components = prefix.split() if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) provider_resources = provider.resources(cloud_provider) else: cloud_provider = provider_resources = provider.resources() components[0] = "aws.%s" % components[0] if len(components) == 1: choices = [r for r in provider.resources().keys() if r.startswith(components[0])] if len(choices) == 1: choices += [.format(choices[0], )] return choices if components[0] not in provider_resources.keys(): return [] if len(components) == 2: choices = [.format(components[0], x) for x in (, ) if x.startswith(components[1])] if len(choices) == 1: choices += [.format(choices[0], )] return choices elif len(components) == 3: resource_mapping = schema.resource_vocabulary(cloud_provider) return [.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]]] return []
For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned.
381,170
def check_status_code(response, codes=None): codes = codes or [httplib.OK] checker = ( codes if callable(codes) else lambda resp: resp.status_code in codes ) if not checker(response): raise exceptions.ApiError(response, response.json())
Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid
381,171
def age(self): aff4_type = self.Get(self.Schema.TYPE) if aff4_type: return aff4_type.age else: return rdfvalue.RDFDatetime.Now()
RDFDatetime at which the object was created.
381,172
def align(self, referencewords, datatuple): targetwords = [] for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])): if word: subwords = word.split("_") for w in subwords: targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) referencewords = [ w.lower() for w in referencewords ] alignment = [] for i, referenceword in enumerate(referencewords): found = False best = 0 distance = 999999 for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords): if referenceword == targetword and abs(i-j) < distance: found = True best = j distance = abs(i-j) if found: alignment.append(targetwords[best]) else: alignment.append((None,None,None,None,False)) return alignment
align the reference sentence with the tagged data
381,173
def adjust_opts(in_opts, config): memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg=arg, val=adjust_memory(opt[4:], memory_adjust.get("magnitude", 1), memory_adjust.get("direction"), maximum=memory_adjust.get("maximum"))) out_opts.append(opt) return out_opts
Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively.
381,174
def transform(self, data, data_type=, content_type=None, compression_type=None, split_type=None, job_name=None): local_mode = self.sagemaker_session.local_mode if not local_mode and not data.startswith(): raise ValueError(.format(data)) if job_name is not None: self._current_job_name = job_name else: base_name = self.base_transform_job_name or base_name_from_image(self._retrieve_image_name()) self._current_job_name = name_from_base(base_name) if self.output_path is None: self.output_path = .format(self.sagemaker_session.default_bucket(), self._current_job_name) self.latest_transform_job = _TransformJob.start_new(self, data, data_type, content_type, compression_type, split_type)
Start a new transform job. Args: data (str): Input data location in S3. data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values: * 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as inputs for the transform job. * 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as an input for the transform job. content_type (str): MIME type of the input data (default: None). compression_type (str): Compression type of the input data, if compressed (default: None). Valid values: 'Gzip', None. split_type (str): The record delimiter for the input object (default: 'None'). Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'. job_name (str): job name (default: None). If not specified, one will be generated.
381,175
def parse(self, data, doctype): self.doctype = doctype self.lexer.lineno = 0 del self.errors[:] del self.warnings[:] self.lexer.lexerror = False ast = self.parser.parse(data, lexer=self.lexer) if self.lexer.lexerror: ast = None if ast is None: self.errors.append("Couldnt visit AST.") self.errors.extend(visitor.errors) self.warnings.extend(visitor.warnings) return (ast, list(self.errors), list(self.warnings))
Parse an input string, and return an AST doctype must have WCADocument as a baseclass
381,176
def process_input(self, stream, value, rpc_executor): self.sensor_log.push(stream, value) if stream.important: associated_output = stream.associated_stream() self.sensor_log.push(associated_output, value) to_check = deque([x for x in self.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, self.mark_streamer) for result in results: result.raw_time = value.raw_time self.sensor_log.push(node.stream, result) except: self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node)) if len(results) > 0: to_check.extend(node.outputs)
Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that.
381,177
def _aggregate_metrics(self, session_group): if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or self._request.aggregation_type == api_pb2.AGGREGATION_UNSET): _set_avg_session_metrics(session_group) elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN: _set_median_session_metrics(session_group, self._request.aggregation_metric) elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, min) elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, max) else: raise error.HParamsError( % self._request.aggregation_type)
Sets the metrics of the group based on aggregation_type.
381,178
def set_settings_secret(self, password): if not isinstance(password, basestring): raise TypeError("password can only be an instance of type basestring") self._call("setSettingsSecret", in_p=[password])
Unlocks the secret data by passing the unlock password to the server. The server will cache the password for that machine. in password of type str The cipher key. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable.
381,179
def _split_op( self, identifier, hs_label=None, dagger=False, args=None): if self._isinstance(identifier, ): identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES( identifier.expr) name, total_subscript = self._split_identifier(identifier) total_superscript = if (hs_label not in [None, ]): if self._settings[] == : if len(total_subscript) == 0: total_subscript = + hs_label + else: total_subscript += + hs_label + else: total_superscript += + hs_label + if dagger: total_superscript += self._dagger_sym args_str = if (args is not None) and (len(args) > 0): args_str = (self._parenth_left + ",".join([self.doprint(arg) for arg in args]) + self._parenth_right) return name, total_subscript, total_superscript, args_str
Return `name`, total `subscript`, total `superscript` and `arguments` str. All of the returned strings are fully rendered. Args: identifier (str or SymbolicLabelBase): A (non-rendered/ascii) identifier that may include a subscript. The output `name` will be the `identifier` without any subscript hs_label (str): The rendered label for the Hilbert space of the operator, or None. Returned unchanged. dagger (bool): Flag to indicate whether the operator is daggered. If True, :attr:`dagger_sym` will be included in the `superscript` (or `subscript`, depending on the settings) args (list or None): List of arguments (expressions). Each element will be rendered with :meth:`doprint`. The total list of args will then be joined with commas, enclosed with :attr:`_parenth_left` and :attr:`parenth_right`, and returnd as the `arguments` string
381,180
def _localize_inputs_command(self, task_dir, inputs, user_project): commands = [] for i in inputs: if i.recursive or not i.value: continue source_file_path = i.uri local_file_path = task_dir + + _DATA_SUBDIR + + i.docker_path dest_file_path = self._get_input_target_path(local_file_path) commands.append( % os.path.dirname(local_file_path)) if i.file_provider in [job_model.P_LOCAL, job_model.P_GCS]: if user_project: command = % ( user_project, source_file_path, dest_file_path) else: command = % (source_file_path, dest_file_path) commands.append(command) return .join(commands)
Returns a command that will stage inputs.
381,181
def skip_build(self): skip_msg = self.config.get(, ) return ( os.environ.get() == or self.info[] or skip_msg in self.info[][] )
Check if build should be skipped
381,182
def feed(self, data): send = self._send_to_parser draw = self.listener.draw match_text = self._text_pattern.match taking_plain_text = self._taking_plain_text length = len(data) offset = 0 while offset < length: if taking_plain_text: match = match_text(data, offset) if match: start, offset = match.span() draw(data[start:offset]) else: taking_plain_text = False else: taking_plain_text = send(data[offset:offset + 1]) offset += 1 self._taking_plain_text = taking_plain_text
Consume some data and advances the state as necessary. :param str data: a blob of data to feed from.
381,183
def modify_signature(self, signature): dic = signature.to_creator(for_modify=True) self.request(, {: dic})
Modify an existing signature Can modify the content, contenttype and name. An unset attribute will not delete the attribute but leave it untouched. :param: signature a zobject.Signature object, with modified content/contentype/name, the id should be present and valid, the name does not allows to identify the signature for that operation.
381,184
def set_logger_level(logger_name, log_level=): s logging level ' logging.getLogger(logger_name).setLevel( LOG_LEVELS.get(log_level.lower(), logging.ERROR) )
Tweak a specific logger's logging level
381,185
def _realValue_to_float(value_str): if REAL_VALUE.match(value_str): value = float(value_str) else: value = None return value
Convert a value string that conforms to DSP0004 `realValue`, into the corresponding float and return it. The special values 'INF', '-INF', and 'NAN' are supported. Note that the Python `float()` function supports a superset of input formats compared to the `realValue` definition in DSP0004. For example, "1." is allowed for `float()` but not for `realValue`. In addition, it has the same support for Unicode decimal digits as `int()`. Therefore, the match patterns explicitly check for US-ASCII digits, and the `float()` function should never raise `ValueError`. Returns None if the value string does not conform to `realValue`.
381,186
def dump(data, stream=None, **kwds): class OrderedDumper(SafeDumper): pass def _dict_representer(dumper, data): return dumper.represent_mapping( original_yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, list(data.items())) def _long_str_representer(dumper, data): if data.find("\n") != -1: data = data.replace("\t", " ") data = "\n".join([p.rstrip() for p in data.split("\n")]) return dumper.represent_scalar(, data, style=) else: return dumper.represent_scalar(, data) def _default_representer(dumper, data): return _long_str_representer(dumper, str(data)) OrderedDumper.add_representer(str, _long_str_representer) OrderedDumper.add_representer(str, _long_str_representer) OrderedDumper.add_representer(OrderedDict, _dict_representer) OrderedDumper.add_representer(None, _default_representer) s = original_yaml.dump(data, stream, OrderedDumper, encoding=, allow_unicode=True, default_flow_style=False, indent=4, **kwds) if s is not None: return s.decode() else: return
Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. Dict keys are produced in the order in which they appear in OrderedDicts. Safe version. If objects are not "conventional" objects, they will be dumped converted to string with the str() function. They will then not be recovered when loading with the load() function.
381,187
def file_sort(my_list): def alphanum_key(key): return [int(s) if s.isdigit() else s for s in re.split("([0-9]+)", key)] my_list.sort(key=alphanum_key) return my_list
Sort a list of files in a nice way. eg item-10 will be after item-9
381,188
def parse_checks(self, conf): checks = conf.get(, conf.get(, [])) checks = list(self.unpack_batches(checks)) checks = list(self.unpack_templates(checks, conf.get(, {}))) self.inject_missing_names(checks) for check in checks: self.inject_scenarios(check, conf.get(, {})) self.inject_notifiers(check, conf.get(, {})) self.expand_schedule(check) return checks
Unpack configuration from human-friendly form to strict check definitions.
381,189
def UnicodeFromCodePage(string): codepage = ctypes.windll.kernel32.GetOEMCP() try: return string.decode("cp%s" % codepage) except UnicodeError: try: return string.decode("utf16", "ignore") except UnicodeError: return string.decode("utf8", "ignore")
Attempt to coerce string into a unicode object.
381,190
def _minimal_y(self, p): y0 = self.pattern_y y1 = y0 + self._pixelsize(p)/2. return y0 if self._count_pixels_on_line(y0, p) < self._count_pixels_on_line(y1, p) else y1
For the specified y and one offset by half a pixel, return the one that results in the fewest pixels turned on, so that when the thickness has been enforced to be at least one pixel, no extra pixels are needlessly included (which would cause double-width lines).
381,191
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True): dicCnfg = {} with open(strCsvCnfg, ) as fleConfig: csvIn = csv.reader(fleConfig, delimiter=, skipinitialspace=True) for lstTmp in csvIn: if lstTmp and not (lstTmp[0][0] == ): strParamKey = lstTmp[0].split()[0] strParamVlu = lstTmp[0].split()[1] dicCnfg[strParamKey] = strParamVlu dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = int(dicCnfg[]) dicCnfg[] = int(dicCnfg[]) if lgcPrint: if dicCnfg[] == : print( + str(dicCnfg[])) print( + str(dicCnfg[])) elif dicCnfg[] == : print( + str(dicCnfg[])) print( + str(dicCnfg[])) dicCnfg[] = int(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = int(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = float(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = int(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = tuple([int(dicCnfg[]), int(dicCnfg[])]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print() for strTmp in dicCnfg[]: print( + str(strTmp)) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print() print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print() print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = (dicCnfg[] == ) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + ) print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) if dicCnfg[]: dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print() print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print() print( + str(dicCnfg[])) dicCnfg[] = ast.literal_eval(dicCnfg[]) if lgcPrint: print( + str(dicCnfg[])) if lgcTest: dicCnfg[] = (strDir + dicCnfg[]) dicCnfg[] = (strDir + dicCnfg[]) dicCnfg[] = (strDir + dicCnfg[]) dicCnfg[] = (strDir + dicCnfg[]) dicCnfg[] = (strDir + dicCnfg[]) varNumRun = len(dicCnfg[]) for idxRun in range(varNumRun): dicCnfg[][idxRun] = ( strDir + dicCnfg[][idxRun] ) return dicCnfg
Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`.
381,192
def _setup_conn(**kwargs): kubeconfig = kwargs.get() or __salt__[]() kubeconfig_data = kwargs.get() or __salt__[]() context = kwargs.get() or __salt__[]() if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get()): with tempfile.NamedTemporaryFile(prefix=, delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get() or __salt__[](): salt.utils.versions.warn_until(, kubeconfig\context\) try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError() else: raise CommandExecutionError(kubeconfig\context\) kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) return {: kubeconfig, : context}
Setup kubernetes API connection singleton
381,193
def is_valid_data(obj): if obj: try: tmp = json.dumps(obj, default=datetime_encoder) del tmp except (TypeError, UnicodeDecodeError): return False return True
Check if data is JSON serializable.
381,194
def fetch(self, category=CATEGORY_BUILD): kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the builds from the url. The method retrieves, from a Jenkins url, the builds updated since the given date. :param category: the category of items to fetch :returns: a generator of builds
381,195
def is_read_only(cls, db: DATABASE_SUPPORTER_FWD_REF, logger: logging.Logger = None) -> bool: def convert_enums(row_): return [True if x == else (False if x == else None) for x in row_] try: sql = rows = db.fetchall(sql) for row in rows: dbname = row[0] prohibited = convert_enums(row[1:]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: database privileges " "wrong: dbname={}, prohibited={}".format( dbname, prohibited ) ) return False except mysql.OperationalError: pass try: sql = rows = db.fetchall(sql) if not rows or len(rows) > 1: return False prohibited = convert_enums(rows[0]) if any(prohibited): if logger: logger.debug( "MySQL.is_read_only(): FAIL: GLOBAL privileges " "wrong: prohibited={}".format(prohibited)) return False except mysql.OperationalError: pass return True
Do we have read-only access?
381,196
def cancel(self, mark_completed_as_cancelled=False): with self._lock: if not self._completed or mark_completed_as_cancelled: self._cancelled = True callbacks = self._prepare_done_callbacks() callbacks()
Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway.
381,197
def clean_names(lines, ensure_unique_names=False, strip_prefix=False, make_database_safe=False): names = {} for row in lines: if strip_prefix: row[] = row[][row[].find() + 1:] if row[] is not None: row[] = row[][row[].find( ) + 1:] if ensure_unique_names: i = 1 while (row[] if i == 1 else row[] + "-" + str(i)) in names: i += 1 names[row[] if i == 1 else row[] + "-" + str(i)] = 1 if i > 1: row[] = row[] + "-" + str(i) if make_database_safe: row[] = row[].replace("-", "_") return lines
Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _
381,198
def download_supplementary_files(self, directory=, download_sra=True, email=None, sra_kwargs=None, nproc=1): if sra_kwargs is None: sra_kwargs = dict() if directory == : dirpath = os.path.abspath(self.get_accession() + "_Supp") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) downloaded_paths = dict() if nproc == 1: downloaded_paths = dict() for gsm in itervalues(self.gsms): logger.info( "Downloading SRA files for %s series\n" % gsm.name) paths = gsm.download_supplementary_files(email=email, download_sra=download_sra, directory=dirpath, sra_kwargs=sra_kwargs) downloaded_paths[gsm.name] = paths elif nproc > 1: downloaders = list() for gsm in itervalues(self.gsms): downloaders.append([ gsm, download_sra, email, dirpath, sra_kwargs]) p = Pool(nproc) results = p.map(_supplementary_files_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM
381,199
def go_to_line(self, line): cursor = self.textCursor() cursor.setPosition(self.document().findBlockByNumber(line - 1).position()) self.setTextCursor(cursor) return True
Moves the text cursor to given line. :param line: Line to go to. :type line: int :return: Method success. :rtype: bool