Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,200
def every_other(iterable): items = iter(iterable) while True: try: yield next(items) next(items) except StopIteration: return
Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g'
7,201
def _check_dn(self, dn, attr_value): if dn is not None: self._error() if not is_dn(attr_value): self._error( % attr_value)
Check dn attribute for issues.
7,202
def devop_write(self, args, bustype): usage = "Usage: devop write <spi|i2c> name bus address regstart count <bytes>" if len(args) < 5: print(usage) return name = args[0] bus = int(args[1],base=0) address = int(args[2],base=0) reg = int(args[3],base=0) count = int(args[4],base=0) args = args[5:] if len(args) < count: print(usage) return bytes = [0]*128 for i in range(count): bytes[i] = int(args[i],base=0) self.master.mav.device_op_write_send(self.target_system, self.target_component, self.request_id, bustype, bus, address, name, reg, count, bytes) self.request_id += 1
write to a device
7,203
def help(i): o=i.get(,) m=i.get(,) if m==: m= h= +cfg[].replace(, m)+ h+= h+= for q in sorted(cfg[]): s=q desc=cfg[][q].get(,) if desc!=: s+=+desc h+=+s+ h+= h+= if m==: for q in sorted(cfg[]): if q not in cfg[]: s=q desc=cfg[][q].get(,) if desc!=: s+=+desc h+=+s+ else: r=list_actions({:m}) if r[]>0: return r actions=r[] if len(actions)==0: h+= else: for q in sorted(actions.keys()): s=q desc=actions[q].get(,) if desc!=: s+=+desc h+=+s+ if m==: h+= h+=cfg[] h+= h+=cfg[] if o==: out(h) return {:0, :h}
Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 help - help text }
7,204
def split(self, k): if not 1 <= k <= self.num_rows - 1: raise ValueError("Invalid value of k. k must be between 1 and the" "number of rows - 1") rows = np.random.permutation(self.num_rows) first = self.take(rows[:k]) rest = self.take(rows[k:]) for column_label in self._formats: first._formats[column_label] = self._formats[column_label] rest._formats[column_label] = self._formats[column_label] return first, rest
Return a tuple of two tables where the first table contains ``k`` rows randomly sampled and the second contains the remaining rows. Args: ``k`` (int): The number of rows randomly sampled into the first table. ``k`` must be between 1 and ``num_rows - 1``. Raises: ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``. Returns: A tuple containing two instances of ``Table``. >>> jobs = Table().with_columns( ... 'job', make_array('a', 'b', 'c', 'd'), ... 'wage', make_array(10, 20, 15, 8)) >>> jobs job | wage a | 10 b | 20 c | 15 d | 8 >>> sample, rest = jobs.split(3) >>> sample # doctest: +SKIP job | wage c | 15 a | 10 b | 20 >>> rest # doctest: +SKIP job | wage d | 8
7,205
def forget_subject(sid): s freesurfer module to forget about cached data for the subject with subject id sid. The sid may be any sid that can be passed to the subject() function. This function is useful for batch-processing of subjects in a memory-limited environment; e.g., if you run out of memory while processing FreeSurfer subjects it is possibly because neuropythy is caching all of their data instead of freeing it. ' sub = subject(sid) if sub.path in subject._cache: del subject._cache[sub.path] else: for (k,v) in six.iteritems(subject._cache): if v is sub: del subject._cache[k] break return None
forget_subject(sid) causes neuropythy's freesurfer module to forget about cached data for the subject with subject id sid. The sid may be any sid that can be passed to the subject() function. This function is useful for batch-processing of subjects in a memory-limited environment; e.g., if you run out of memory while processing FreeSurfer subjects it is possibly because neuropythy is caching all of their data instead of freeing it.
7,206
def list_passwords(kwargs=None, call=None): response = _query(, ) ret = {} for item in response[]: if in item: server = item[][] if server not in ret: ret[server] = [] ret[server].append(item) return ret
List all password on the account .. versionadded:: 2015.8.0
7,207
def parse_mmtf_header(infile): infodict = {} mmtf_decoder = mmtf.parse(infile) infodict[] = mmtf_decoder.deposition_date infodict[] = mmtf_decoder.release_date try: infodict[] = [x.decode() for x in mmtf_decoder.experimental_methods] except AttributeError: infodict[] = [x for x in mmtf_decoder.experimental_methods] infodict[] = mmtf_decoder.resolution infodict[] = mmtf_decoder.title group_name_exclude = [] chem_comp_type_exclude = [, ] chemicals = list(set([mmtf_decoder.group_list[idx][] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx][].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx][] not in group_name_exclude])) infodict[] = chemicals return infodict
Parse an MMTF file and return basic header-like information. Args: infile (str): Path to MMTF file Returns: dict: Dictionary of parsed header Todo: - Can this be sped up by not parsing the 3D coordinate info somehow? - OR just store the sequences when this happens since it is already being parsed.
7,208
def integrate(self, outevent, inevent): assert outevent not in self for function in compat_itervalues(self.functions): assert outevent not in function assert inevent in function for call in compat_itervalues(function.calls): assert outevent not in call if call.callee_id != function.id: assert call.ratio is not None for cycle in self.cycles: total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self[inevent] = total total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total
Propagate function time ratio along the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html
7,209
def raise_for_missing_name(self, line: str, position: int, namespace: str, name: str) -> None: self.raise_for_missing_namespace(line, position, namespace, name) if self.has_enumerated_namespace(namespace) and not self.has_enumerated_namespace_name(namespace, name): raise MissingNamespaceNameWarning(self.get_line_number(), line, position, namespace, name) if self.has_regex_namespace(namespace) and not self.has_regex_namespace_name(namespace, name): raise MissingNamespaceRegexWarning(self.get_line_number(), line, position, namespace, name)
Raise an exception if the namespace is not defined or if it does not validate the given name.
7,210
def pause(self, remaining_pause_cycles): url = urljoin(self._url, ) elem = ElementTreeBuilder.Element(self.nodename) elem.append(Resource.element_for_value(, remaining_pause_cycles)) body = ElementTree.tostring(elem, encoding=) response = self.http_request(url, , body, { : }) if response.status not in (200, 201, 204): self.raise_http_error(response) self.update_from_element(ElementTree.fromstring(response.read()))
Pause a subscription
7,211
def rule_role(self, **kwargs): config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop() role = ET.SubElement(rule, "role") role.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
7,212
def _read_bytes_from_non_framed_body(self, b): _LOGGER.debug("starting non-framed body read") bytes_to_read = self.body_length _LOGGER.debug("%d bytes requested; reading %d bytes", b, bytes_to_read) ciphertext = self.source_stream.read(bytes_to_read) if len(self.output_buffer) + len(ciphertext) < self.body_length: raise SerializationError("Total message body contents less than specified in body description") if self.verifier is not None: self.verifier.update(ciphertext) tag = deserialize_tag(stream=self.source_stream, header=self._header, verifier=self.verifier) aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self._header.content_type, is_final_frame=True ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=1, length=self.body_length, ) self.decryptor = Decryptor( algorithm=self._header.algorithm, key=self._derived_data_key, associated_data=associated_data, iv=self._unframed_body_iv, tag=tag, ) plaintext = self.decryptor.update(ciphertext) plaintext += self.decryptor.finalize() self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier) return plaintext
Reads the requested number of bytes from a streaming non-framed message body. :param int b: Number of bytes to read :returns: Decrypted bytes from source stream :rtype: bytes
7,213
async def openurl(url, **opts): if url.find() == -1: newurl = alias(url) if newurl is None: raise s_exc.BadUrl(f) url = newurl info = s_urlhelp.chopurl(url) info.update(opts) host = info.get() port = info.get() auth = None user = info.get() if user is not None: passwd = info.get() auth = (user, {: passwd}) scheme = info.get() if scheme == : path = info.get() name = info.get(, ) host = info.get() if host: path = path.strip() path = os.path.join(host, path) if in path: path, name = path.split() full = os.path.join(path, ) link = await s_link.unixconnect(full) elif scheme == : path, name = info.get().split() link = await s_link.unixconnect(path) else: path = info.get() name = info.get(, path[1:]) sslctx = None if scheme == : certpath = info.get() certdir = s_certdir.CertDir(certpath) sslctx = certdir.getClientSSLContext() link = await s_link.connect(host, port, ssl=sslctx) prox = await Proxy.anit(link, name) prox.onfini(link) try: await prox.handshake(auth=auth) except Exception: await prox.fini() raise return prox
Open a URL to a remote telepath object. Args: url (str): A telepath URL. **opts (dict): Telepath connect options. Returns: (synapse.telepath.Proxy): A telepath proxy object. The telepath proxy may then be used for sync or async calls: proxy = openurl(url) value = proxy.getFooThing() ... or ... proxy = await openurl(url) valu = await proxy.getFooThing() ... or ... async with await openurl(url) as proxy: valu = await proxy.getFooThing()
7,214
def getWorkflowDir(workflowID, configWorkDir=None): workDir = configWorkDir or os.getenv() or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) return workflowDir
Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str
7,215
def _get_or_add_image(self, image_file): image_part, rId = self.part.get_or_add_image_part(image_file) desc, image_size = image_part.desc, image_part._px_size return rId, desc, image_size
Return an (rId, description, image_size) 3-tuple identifying the related image part containing *image_file* and describing the image.
7,216
def _solve(self): while len(self._remove_constr) > 0: self._remove_constr.pop().delete() try: self._prob.solve(lp.ObjectiveSense.Maximize) except lp.SolverError as e: raise_from(FluxBalanceError(.format( e), result=self._prob.result), e) finally: self._remove_constr = self._temp_constr self._temp_constr = []
Solve the problem with the current objective.
7,217
def extract_subsection(im, shape): r shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]]
7,218
def update_with_result(self, result): assert isinstance(result, dict), "%s is not a dictionary" % result for type in ("instance", "plugin"): id = (result[type] or {}).get("id") is_context = not id if is_context: item = self.instances[0] else: item = self.items.get(id) if item is None: continue item.isProcessing = False item.currentProgress = 1 item.processed = True item.hasWarning = item.hasWarning or any([ record["levelno"] == logging.WARNING for record in result["records"] ]) if result.get("error"): item.hasError = True item.amountFailed += 1 else: item.succeeded = True item.amountPassed += 1 item.duration += result["duration"] item.finishedAt = time.time() if item.itemType == "plugin" and not item.actionsIconVisible: actions = list(item.actions) for action in list(actions): if action["on"] == "failed" and not item.hasError: actions.remove(action) if action["on"] == "succeeded" and not item.succeeded: actions.remove(action) if action["on"] == "processed" and not item.processed: actions.remove(action) if actions: item.actionsIconVisible = True class DummySection(object): hasWarning = False hasError = False succeeded = False section_item = DummySection() for section in self.sections: if item.itemType == "plugin" and section.name == item.verb: section_item = section if (item.itemType == "instance" and section.name == item.category): section_item = section section_item.hasWarning = ( section_item.hasWarning or item.hasWarning ) section_item.hasError = section_item.hasError or item.hasError section_item.succeeded = section_item.succeeded or item.succeeded section_item.isProcessing = False
Update item-model with result from host State is sent from host after processing had taken place and represents the events that took place; including log messages and completion status. Arguments: result (dict): Dictionary following the Result schema
7,219
def restore_flattened_dict(i): a={} b=i[] first=True for x in b: if first: first=False y=x[1:2] if y==: a=[] else: a={} set_by_flat_key({:a, :x, :b[x]}) return {:0, : a}
Input: { dict - flattened dict } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 dict - restored dict }
7,220
def _store_variable(self, j, key, m, value): if hasattr(value, ): v = value.copy() else: v = value self.history[j][key][m].append(v)
Store a copy of the variable in the history
7,221
def get_feature_info(feature): dimensions = feature.findall() for dim in dimensions: if dim.attrib[] == : rt = dim.text elif dim.attrib[] == : mz = dim.text return {: float(rt), : float(mz), : int(feature.find().text), : float(feature.find().text), }
Returns a dict with feature information
7,222
def get_gene_symbols(chrom, start, stop): gene_symbols = query_gene_symbol(chrom, start, stop) logger.debug("Found gene symbols: {0}".format(.join(gene_symbols))) return gene_symbols
Get the gene symbols that a interval overlaps
7,223
def view_count_plus(slug): entry = TabWiki.update( view_count=TabWiki.view_count + 1, ).where(TabWiki.uid == slug) entry.execute()
View count plus one.
7,224
def getInvestigators(self, tags = None, seperator = ";", _getTag = False): if tags is None: tags = [] elif isinstance(tags, str): tags = [, tags] else: tags.append() return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
Returns a list of the names of investigators. The optional arguments are ignored. # Returns `list [str]` > A list of all the found investigator's names
7,225
def translate_char(source_char, carrier, reverse=False, encoding=False): u if not isinstance(source_char, unicode) and encoding: source_char = source_char.decode(encoding, ) elif not isinstance(source_char, unicode): raise AttributeError(u"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`") if len(source_char) > 1: raise AttributeError(u"`source_char` must be a letter. use `translate` method insted.") translate_dictionary = _loader.translate_dictionaries[carrier] if not reverse: translate_dictionary = translate_dictionary[0] else: translate_dictionary = translate_dictionary[1] if not translate_dictionary: return source_char return translate_dictionary.get(source_char, source_char)
u"""translate unicode emoji character to unicode carrier emoji character (or reverse) Attributes: source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode carrier - the target carrier reverse - if you want to translate CARRIER => UNICODE, turn it True encoding - encoding name for decode (Default is None)
7,226
def complete(self, stream): assert not self.is_complete() self._marker.addInputPort(outputPort=stream.oport) self.stream.oport.schema = stream.oport.schema self._pending_schema._set(self.stream.oport.schema) stream.oport.operator._start_op = True
Complete the pending stream. Any connections made to :py:attr:`stream` are connected to `stream` once this method returns. Args: stream(Stream): Stream that completes the connection.
7,227
def _get_default_namespace(self) -> Optional[Namespace]: return self._get_query(Namespace).filter(Namespace.url == self._get_namespace_url()).one_or_none()
Get the reference BEL namespace if it exists.
7,228
def _check_inplace_setting(self, value): if self._is_mixed_type: if not self._is_numeric_mixed_type: try: if np.isnan(value): return True except Exception: pass raise TypeError( ) return True
check whether we allow in-place setting with this type of value
7,229
def update_transfer_job(self, job_name, body): body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict
7,230
def add_output(self, address, value, unit=): value_satoshi = self.from_unit_to_satoshi(value, unit) if self.verbose: print("Adding output of: %s satoshi (%.8f)" % ( value_satoshi, (value_satoshi / 1e8) )) self.outs.append({ : address, : value_satoshi })
Add an output (a person who will receive funds via this tx). If no unit is specified, satoshi is implied.
7,231
def _select_date_range(self, lines): headers = [] num_lev = [] dates = [] for idx, line in enumerate(lines): if line[0] == : year, month, day, hour = map(int, line[13:26].split()) try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: raise ValueError() begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = .join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = .join([line for line in itertools.compress(lines, selector)]) dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file.
7,232
def _build(self, name, **params): log = self._getparam(, self._discard, **params) rebuild = False wparams = params.copy() wparams[] = False for path in list(self._watch.paths_open): if path in self.modules: continue try: self._watch.remove(path, **wparams) rebuild = True except Exception as e: log.warning("Remove of watched module %r failed -- %s", path, e) log.debug("Removed watch for path %r", path) for path in list(self.modules): if path not in self._watch.paths_open: try: self._watch.add(path, **wparams) rebuild = True except Exception as e: log.error("watch failed on module %r -- %s", path, e) continue if rebuild: self._watch.commit(**params)
Rebuild operations by removing open modules that no longer need to be watched, and adding new modules if they are not currently being watched. This is done by comparing self.modules to watch_files.paths_open
7,233
def get_or_create_media(self, api_media): return Media.objects.get_or_create(site_id=self.site_id, wp_id=api_media["ID"], defaults=self.api_object_data("media", api_media))
Find or create a Media object given API data. :param api_media: the API data for the Media :return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
7,234
def get_summary_stats(self, output_csv=None): contig_size_list = [] self.summary_info["ncontigs"] = len(self.contigs) for contig_id, sequence in self.contigs.items(): logger.debug("Processing contig: {}".format(contig_id)) contig_len = len(sequence) contig_size_list.append(contig_len) self.summary_info["total_len"] += contig_len self.summary_info["avg_gc"].append( sum(map(sequence.count, ["G", "C"])) / contig_len ) self.summary_info["missing_data"] += sequence.count("N") logger.debug("Getting average contig size") self.summary_info["avg_contig_size"] = \ sum(contig_size_list) / len(contig_size_list) logger.debug("Getting average GC content") self.summary_info["avg_gc"] = \ sum(self.summary_info["avg_gc"]) / len(self.summary_info["avg_gc"]) logger.debug("Getting N50") cum_size = 0 for l in sorted(contig_size_list, reverse=True): cum_size += l if cum_size >= self.summary_info["total_len"] / 2: self.summary_info["n50"] = l break if output_csv: logger.debug("Writing report to csv") with open(output_csv, "w") as fh: summary_line = "{}, {}\\n".format( self.sample, ",".join( [str(x) for x in self.summary_info.values()])) fh.write(summary_line)
Generates a CSV report with summary statistics about the assembly The calculated statistics are: - Number of contigs - Average contig size - N50 - Total assembly length - Average GC content - Amount of missing data Parameters ---------- output_csv: str Name of the output CSV file.
7,235
def create(cls, name, key_chain_entry): key_chain_entry = key_chain_entry or [] json = {: name, : key_chain_entry} return ElementCreator(cls, json)
Create a key chain with list of keys Key_chain_entry format is:: [{'key': 'xxxx', 'key_id': 1-255, 'send_key': True|False}] :param str name: Name of key chain :param list key_chain_entry: list of key chain entries :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFKeyChain
7,236
def format(self, vertices): index = .join(str(vertices[vn].index) for vn in self.vnames) vcom = .join(self.vnames) buf = io.StringIO() buf.write(\ .format( index,self.name, vcom)) buf.write() for p in self.points: buf.write(+p.format()+) buf.write() buf.write() return buf.getvalue()
Format instance to dump vertices is dict of name to Vertex
7,237
def run_until_disconnected(self): if self.loop.is_running(): return self._run_until_disconnected() try: return self.loop.run_until_complete(self.disconnected) except KeyboardInterrupt: pass finally: self.disconnect()
Runs the event loop until `disconnect` is called or if an error while connecting/sending/receiving occurs in the background. In the latter case, said error will ``raise`` so you have a chance to ``except`` it on your own code. If the loop is already running, this method returns a coroutine that you should await on your own code.
7,238
def docpie(doc, argv=None, help=True, version=None, stdopt=True, attachopt=True, attachvalue=True, helpstyle=, auto2dashes=True, name=None, case_sensitive=False, optionsfirst=False, appearedonly=False, namedoptions=False, extra=None): if case_sensitive: warnings.warn( ) kwargs = locals() argv = kwargs.pop() pie = Docpie(**kwargs) pie.docpie(argv) return pie
Parse `argv` based on command-line interface described in `doc`. `docpie` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object but None If passed, the object will be printed if --version is in `argv`. stdopt : bool (default: True) When it's True, long flag should only starts with -- attachopt: bool (default: True) write/pass several short flag into one, e.g. -abc can mean -a -b -c. This only works when stdopt=True attachvalue: bool (default: True) allow you to write short flag and its value together, e.g. -abc can mean -a bc auto2dashes: bool (default: True) automaticly handle -- (which means "end of command line flag") name: str (default: None) the "name" of your program. In each of your "usage" the "name" will be ignored. By default docpie will ignore the first element of your "usage". case_sensitive: bool (deprecated / default: False) specifies if it need case sensitive when matching "Usage:" and "Options:" optionsfirst: bool (default: False) everything after first positional argument will be interpreted as positional argument appearedonly: bool (default: False) when set True, the options that never appear in argv will not be put in result. Note this only affect options extra: dict customize pre-handled options. See http://docpie.comes.today/document/advanced-apis/ for more infomation. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docpie import docpie >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docpie(doc, argv) { '--': False, '-h': False, '--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * Full documentation is available in README.md as well as online at http://docpie.comes.today/document/quick-start/
7,239
def run_all(logdir, verbose=False): run_box_to_gaussian(logdir, verbose=verbose) run_sobel(logdir, verbose=verbose)
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
7,240
def arg_completions( completion_text: str, parent_function: str, args: list, arg_idx: int, bel_spec: BELSpec, bel_fmt: str, species_id: str, namespace: str, size: int, ): function_long = bel_spec["functions"]["to_long"].get(parent_function) if not function_long: return [] signatures = bel_spec["functions"]["signatures"][function_long]["signatures"] function_list = [] entity_types = [] fn_replace_list, ns_arg_replace_list = [], [] position_flag = False for signature in signatures: sig_arg = signature["arguments"][arg_idx] sig_type = sig_arg["type"] if sig_arg.get("position", False) and arg_idx == sig_arg["position"] - 1: position_flag = True if sig_type in ["Function", "Modifier"]: function_list.extend(sig_arg["values"]) elif sig_type in ["NSArg", "StrArgNSArg"]: entity_types.extend(sig_arg["values"]) if not position_flag: opt_fn_sig_args = [] opt_nsarg_sig_args = [] mult_fn_sig_args = [] mult_nsarg_sig_args = [] for signature in signatures: signature_opt_fn_sig_args = [] signature_opt_nsarg_sig_args = [] signature_mult_fn_sig_args = [] signature_mult_nsarg_sig_args = [] max_position = -1 for sig_arg in signature["arguments"]: if "position" in sig_arg: max_position = sig_arg["position"] continue if ( sig_arg.get("optional", False) is True and sig_arg.get("multiple", False) is False ): if sig_arg["type"] in ["Function", "Modifier"]: signature_opt_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_opt_nsarg_sig_args.extend(sig_arg["values"]) elif sig_arg.get("multiple", False) is True: if sig_arg["type"] in ["Function", "Modifier"]: signature_mult_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_mult_nsarg_sig_args.extend(sig_arg["values"]) for idx, arg in enumerate(args): if idx <= max_position - 1: continue if idx == arg_idx: continue log.debug(f"Remove Optional Args {arg} {signature_opt_fn_sig_args}") opt_fn_sig_args.extend(signature_opt_fn_sig_args) opt_nsarg_sig_args.extend(signature_opt_nsarg_sig_args) mult_fn_sig_args.extend(signature_mult_fn_sig_args) mult_nsarg_sig_args.extend(signature_mult_nsarg_sig_args) function_list.extend(list(set(opt_fn_sig_args + mult_fn_sig_args))) entity_types.extend(list(set(opt_nsarg_sig_args + mult_nsarg_sig_args))) if function_list: log.debug(f"ArgComp - position-based Function list: {function_list}") fn_replace_list = function_completions( completion_text, bel_spec, function_list, bel_fmt, size ) if entity_types: log.debug(f"ArgComp - position-based Entity types: {entity_types}") ns_arg_replace_list = nsarg_completions( completion_text, entity_types, bel_spec, namespace, species_id, bel_fmt, size, ) replace_list = fn_replace_list + ns_arg_replace_list return replace_list
Function argument completion Only allow legal options for completion given function name, arguments and index of argument to replace. Args: completion_text: text to use for completion - used for creating highlight parent_function: BEL function containing these args args: arguments of BEL function arg_idx: completing on this argument identified by this index bel_spec: BEL Specification bel_fmt: short, medium, long BEL function/relation formats species_id: filter on this species id, e.g. TAX:9606 if available namespace: filter on this namespace if available size: number of completions to return Return: list of replacements
7,241
def post_step(self, ctxt, step, idx, result): debugger = ExtensionDebugger() for ext in self.exts: with debugger(ext): ext.post_step(ctxt, step, idx, result) return result
Called after executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step that was executed. :param idx: The index of the step in the list of steps. :param result: An instance of ``timid.steps.StepResult`` describing the result of executing the step. May be altered by the extension, e.g., to set the ``ignore`` attribute. :returns: The ``result`` parameter, for convenience.
7,242
def add_intern_pattern(self, url=None): try: pat = self.get_intern_pattern(url=url) if pat: log.debug(LOG_CHECK, "Add intern pattern %r", pat) self.aggregate.config[].append(get_link_pat(pat)) except UnicodeError as msg: res = _("URL has unparsable domain name: %(domain)s") % \ {"domain": msg} self.set_result(res, valid=False)
Add intern URL regex to config.
7,243
def _from_dict(cls, _dict): args = {} if in _dict: args[] = DocInfo._from_dict(_dict.get()) if in _dict: args[] = _dict.get() if in _dict: args[] = _dict.get() if in _dict: args[] = [ Tables._from_dict(x) for x in (_dict.get()) ] return cls(**args)
Initialize a TableReturn object from a json dictionary.
7,244
def osx_clipboard_get(): p = subprocess.Popen([, , ], stdout=subprocess.PIPE) text, stderr = p.communicate() text = text.replace(, ) return text
Get the clipboard's text on OS X.
7,245
def addbr(name): fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name) return Bridge(name)
Create new bridge with the given name
7,246
def print_roi(self, loglevel=logging.INFO): self.logger.log(loglevel, + str(self.roi))
Print information about the spectral and spatial properties of the ROI (sources, diffuse components).
7,247
def disconnect_async(self, connection_id, callback): try: context = self.connections.get_context(connection_id) except ArgumentError: callback(connection_id, self.id, False, "Could not find connection information") return self.connections.begin_disconnection(connection_id, callback, self.get_config()) self.bable.disconnect( connection_handle=context[], on_disconnected=[self._on_disconnection_finished, context] )
Asynchronously disconnect from a device that has previously been connected Args: connection_id (int): A unique identifier for this connection on the DeviceManager that owns this adapter. callback (callable): A function called as callback(connection_id, adapter_id, success, failure_reason) when the disconnection finishes. Disconnection can only either succeed or timeout.
7,248
def delete_rule(name=None, localport=None, protocol=None, dir=None, remoteip=None): test*test8080tcpintest_remote_ip*test_remote_ip8000tcpin192.168.0.1*allow80* cmd = [, , , , ] if name: cmd.append(.format(name)) if protocol: cmd.append(.format(protocol)) if dir: cmd.append(.format(dir)) if remoteip: cmd.append(.format(remoteip)) if protocol is None \ or ( not in protocol and not in protocol): if localport: if not protocol: cmd.append() cmd.append(.format(localport)) ret = __salt__[](cmd, python_shell=False, ignore_retcode=True) if ret[] != 0: raise CommandExecutionError(ret[]) return True
.. versionadded:: 2015.8.0 Delete an existing firewall rule identified by name and optionally by ports, protocols, direction, and remote IP. Args: name (str): The name of the rule to delete. If the name ``all`` is used you must specify additional parameters. localport (Optional[str]): The port of the rule. If protocol is not specified, protocol will be set to ``tcp`` protocol (Optional[str]): The protocol of the rule. Default is ``tcp`` when ``localport`` is specified dir (Optional[str]): The direction of the rule. remoteip (Optional[str]): The remote IP of the rule. Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash # Delete incoming tcp port 8080 in the rule named 'test' salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in' # Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named # 'test_remote_ip' salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1' # Delete all rules for local port 80: salt '*' firewall.delete_rule all 80 tcp # Delete a rule called 'allow80': salt '*' firewall.delete_rule allow80
7,249
def cmd_zf(self, ch=None): viewer = self.get_viewer(ch) if viewer is None: self.log("No current viewer/channel.") return viewer.zoom_fit() cur_lvl = viewer.get_zoom() self.log("zoom=%f" % (cur_lvl))
zf ch=chname Zoom the image for the given viewer/channel to fit the window.
7,250
def makediagram(edges): graph = pydot.Dot(graph_type=) nodes = edges2nodes(edges) epnodes = [(node, makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"] endnodes = [(node, makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"] epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)] nodedict = dict(epnodes + epbr + endnodes) for value in list(nodedict.values()): graph.add_node(value) for e1, e2 in edges: graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2])) return graph
make the diagram with the edges
7,251
def set_elements_text(parent_to_parse, element_path=None, text_values=None): if text_values is None: text_values = [] return _set_elements_property(parent_to_parse, element_path, _ELEM_TEXT, text_values)
Assigns an array of text values to each of the elements parsed from the parent. The text values are assigned in the same order they are provided. If there are less values then elements, the remaining elements are skipped; but if there are more, new elements will be inserted for each with the remaining text values.
7,252
def _michael_b(m_l, m_r): m_r = m_r ^ _rotate_left32(m_l, 17) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _XSWAP(m_l) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _rotate_left32(m_l, 3) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _rotate_right32(m_l, 2) m_l = (m_l + m_r) % 2**32 return m_l, m_r
Defined in 802.11i p.49
7,253
def forward_kinematics(self, joints, full_kinematics=False): frame_matrix = np.eye(4) if full_kinematics: frame_matrixes = [] if len(self.links) != len(joints): raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links))) for index, (link, joint_angle) in enumerate(zip(self.links, joints)): frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle))) if full_kinematics: frame_matrixes.append(frame_matrix) if full_kinematics: return frame_matrixes else: return frame_matrix
Returns the transformation matrix of the forward kinematics Parameters ---------- joints: list The list of the positions of each joint. Note : Inactive joints must be in the list. full_kinematics: bool Return the transformation matrices of each joint Returns ------- frame_matrix: The transformation matrix
7,254
def volume(self): mprop = vtk.vtkMassProperties() mprop.SetInputData(self.tri_filter()) return mprop.GetVolume()
Mesh volume - will throw a VTK error/warning if not a closed surface Returns ------- volume : float Total volume of the mesh.
7,255
def datum_to_value(self, instance, datum): if datum is None: return [] if not isinstance(datum, Sequence): raise TypeError( "datum must be a sequence, not %s" % type(datum).__name__) bound = getattr(instance._origin, "FilesystemGroupDevices") return bound(( get_device_object(instance._origin, item) for item in datum ))
Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum.
7,256
def get_unicodedata(version, output=HOME, no_zip=False): target = os.path.join(output, , version) zip_target = os.path.join(output, , % version) if not os.path.exists(target) and os.path.exists(zip_target): unzip_unicode(output, version) download_unicodedata(version, output, no_zip)
Ensure we have Unicode data to generate Unicode tables.
7,257
def get_category_drilldown(parser, token): bits = token.split_contents() error_str = \ \ if len(bits) == 4: if bits[2] != : raise template.TemplateSyntaxError(error_str % {: bits[0]}) if bits[2] == : varname = bits[3].strip("usingasusingastagnameas\"") model = bits[5].strip("using\"") model = bits[3].strip("'\"") category = FilterExpression(bits[1], parser) return CategoryDrillDownNode(category, varname, model)
Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n
7,258
def is_intersection(g, n): return len(set(g.predecessors(n) + g.successors(n))) > 2
Determine if a node is an intersection graph: 1 -->-- 2 -->-- 3 >>> is_intersection(g, 2) False graph: 1 -- 2 -- 3 | 4 >>> is_intersection(g, 2) True Parameters ---------- g : networkx DiGraph n : node id Returns ------- bool
7,259
def make_index_for(package, index_dir, verbose=True): index_template = item_template = index_filename = os.path.join(index_dir, "index.html") if not os.path.isdir(index_dir): os.makedirs(index_dir) parts = [] for pkg_filename in package.files: pkg_name = os.path.basename(pkg_filename) if pkg_name == "index.html": pkg_name = os.path.basename(os.path.dirname(pkg_filename)) else: pkg_name = package.splitext(pkg_name) pkg_relpath_to = os.path.relpath(pkg_filename, index_dir) parts.append(item_template.format(pkg_name, pkg_relpath_to)) if not parts: print("OOPS: Package %s has no files" % package.name) return if verbose: root_index = not Package.isa(package.files[0]) if root_index: info = "with %d package(s)" % len(package.files) else: package_versions = sorted(set(package.versions)) info = ", ".join(reversed(package_versions)) message = "%-30s %s" % (package.name, info) print(message) with open(index_filename, "w") as f: packages = "\n".join(parts) text = index_template.format(title=package.name, packages=packages) f.write(text.strip()) f.close()
Create an 'index.html' for one package. :param package: Package object to use. :param index_dir: Where 'index.html' should be created.
7,260
def _check_query(self, query, style_cols=None): try: self.sql_client.send( utils.minify_sql(( , , , , , )).format(query=query, comma= if style_cols else , style_cols=(.join(style_cols) if style_cols else )), do_post=False) except Exception as err: raise ValueError(( ).format(query=query, cols=.join([.format(c) for c in style_cols]), err=err))
Checks if query from Layer or QueryLayer is valid
7,261
def create_ports(port, mpi, rank): if port == "random" or port is None: ports = {} else: port = int(port) ports = { "REQ": port + 0, "PUSH": port + 1, "SUB": port + 2 } if mpi == : for port in ports: ports[port] += (rank * 3) return ports
create a list of ports for the current rank
7,262
def add_request(self, request): if request.name in self.requests: raise ValueError(.format(request.name)) self.requests[request.name] = request for method, url in request.urls: if RE_URL_ARG.search(url): request_regex = + RE_URL_ARG_ESC.sub(r, re.escape(url)) + self.__request_regex.append((method, re.compile(request_regex), request)) else: request_key = (method, url) if request_key in self.__request_urls: raise ValueError(.format(url)) self.__request_urls[request_key] = request
Add a request object
7,263
def from_text(text): if text.isdigit(): value = int(text) if value >= 0 and value <= 15: return value value = _by_text.get(text.upper()) if value is None: raise UnknownOpcode return value
Convert text into an opcode. @param text: the textual opcode @type text: string @raises UnknownOpcode: the opcode is unknown @rtype: int
7,264
def _init_index(root_dir, schema, index_name): index_dir = os.path.join(root_dir, index_name) try: if not os.path.exists(index_dir): os.makedirs(index_dir) return create_in(index_dir, schema), index_dir else: return open_dir(index_dir), index_dir except Exception as e: logger.error("Init error: failed to open search index at: : {} ".format(index_dir, e)) raise
Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.
7,265
def validate_subfolders(filedir, metadata): if not os.path.isdir(filedir): print("Error: " + filedir + " is not a directory") return False subfolders = os.listdir(filedir) for subfolder in subfolders: if subfolder not in metadata: print("Error: folder " + subfolder + " present on disk but not in metadata") return False for subfolder in metadata: if subfolder not in subfolders: print("Error: folder " + subfolder + " present in metadata but not on disk") return False return True
Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched.
7,266
def match_aspect_to_viewport(self): viewport = self.viewport self.aspect = float(viewport.width) / viewport.height
Updates Camera.aspect to match the viewport's aspect ratio.
7,267
def get_user(self, user): return self.service.get_user( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get user's data (first and last name, email, etc). Args: user (string): User name. Returns: (dictionary): User's data encoded in a dictionary. Raises: requests.HTTPError on failure.
7,268
def average_gradient(data, *kwargs): return np.average(np.array(np.gradient(data))**2)
Compute average gradient norm of an image
7,269
def natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True): return sorted(seq, key=lambda x: natsort_key(key(x), number_type=number_type, signed=signed, exp=exp))
\ Sorts a sequence naturally (alphabetically and numerically), not lexicographically. >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) ['num2', 'num3', 'num5'] >>> b = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')] >>> from operator import itemgetter >>> natsorted(b, key=itemgetter(1)) [('c', 'num2'), ('a', 'num3'), ('b', 'num5')]
7,270
def send_output_report(self, data): assert( self.is_opened() ) if not ( isinstance(data, ctypes.Array) and \ issubclass(data._type_, c_ubyte) ): raw_data_type = c_ubyte * len(data) raw_data = raw_data_type() for index in range( len(data) ): raw_data[index] = data[index] else: raw_data = data over_write = winapi.OVERLAPPED() over_write.h_event = winapi.CreateEvent(None, 0, 0, None) if over_write.h_event: try: overlapped_write = over_write winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, byref(overlapped_write)) error = ctypes.GetLastError() if error == winapi.ERROR_IO_PENDING: result = error elif error == 1167: raise HIDError("Error device disconnected before write") else: raise HIDError("Error %d when trying to write to HID "\ "device: %s"%(error, ctypes.FormatError(error)) ) result = winapi.WaitForSingleObject(overlapped_write.h_event, 10000 ) if result != winapi.WAIT_OBJECT_0: winapi.CancelIo( int(self.hid_handle) ) raise HIDError("Write timed out") finally: winapi.CloseHandle(over_write.h_event) else: return winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, None) return True
Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data
7,271
def miraligner(args): hairpin, mirna = _download_mirbase(args) precursors = _read_precursor(args.hairpin, args.sps) matures = _read_mature(args.mirna, args.sps) gtf = _read_gtf(args.gtf) out_dts = [] out_files = [] for bam_fn in args.files: sample = op.splitext(op.basename(bam_fn))[0] logger.info("Reading %s" % bam_fn) if bam_fn.endswith("bam") or bam_fn.endswith("sam"): bam_fn = _sam_to_bam(bam_fn) bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort" pysam.sort("-n", bam_fn, bam_sort_by_n) reads = _read_bam(bam_sort_by_n + ".bam", precursors) elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \ bam_fn.endswith("fastq"): if args.collapse: bam_fn = _collapse_fastq(bam_fn) out_file = op.join(args.out, sample + ".premirna") bam_fn = _filter_seqs(bam_fn) if args.miraligner: _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out) reads = _read_miraligner(out_file) out_files.append(out_file) else: raise ValueError("Format not recognized.") if args.miraligner: _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out) if not args.miraligner: reads = _annotate(reads, matures, precursors) out_file = op.join(args.out, sample + ".mirna") out_file, dt, dt_pre = _tab_output(reads, out_file, sample) try: vcf_file = op.join(args.out, sample + ".vcf") if not file_exists(vcf_file): create_vcf(dt_pre, matures, gtf, vcf_file) try: import vcf vcf.Reader(filename=vcf_file) except Exception as e: logger.warning(e.__doc__) logger.warning(e.message) except Exception as e: logger.warning(e.__doc__) logger.warning(e.message) if isinstance(dt, pd.DataFrame): out_dts.append(dt) if out_dts: _create_counts(out_dts, args.out) else: print("No files analyzed!")
Realign BAM hits to miRBAse to get better accuracy and annotation
7,272
def do_powershell_complete(cli, prog_name): commandline = os.environ[] args = split_args(commandline)[1:] quote = single_quote incomplete = if args and not commandline.endswith(): incomplete = args[-1] args = args[:-1] quote_pos = commandline.rfind(incomplete) - 1 if quote_pos >= 0 and commandline[quote_pos] == : quote = double_quote for item, help in get_choices(cli, prog_name, args, incomplete): echo(quote(item)) return True
Do the powershell completion Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line Returns ------- bool True if the completion was successful, False otherwise
7,273
def single_value(cls, value, shape, pixel_scale, origin=(0.0, 0.0)): array = np.ones(shape) * value return cls(array, pixel_scale, origin)
Creates an instance of Array and fills it with a single value Parameters ---------- value: float The value with which the array should be filled shape: (int, int) The shape of the array pixel_scale: float The scale of a pixel in arc seconds Returns ------- array: ScaledSquarePixelArray An array filled with a single value
7,274
def minimum_katcp_version(major, minor=0): version_tuple = (major, minor) def decorator(handler): handler._minimum_katcp_version = version_tuple return handler return decorator
Decorator; exclude handler if server's protocol version is too low Useful for including default handler implementations for KATCP features that are only present in certain KATCP protocol versions Examples -------- >>> class MyDevice(DeviceServer): ... '''This device server will expose ?myreq''' ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 1) ... ... @minimum_katcp_version(5, 1) ... def request_myreq(self, req, msg): ... '''A request that should only be present for KATCP >v5.1''' ... # Request handler implementation here. ... >>> class MyOldDevice(MyDevice): ... '''This device server will not expose ?myreq''' ... ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 0) ...
7,275
def focusd(task): if registration.get_registered(event_hooks=True, root_access=True): start_cmd_srv = (os.getuid() == 0) else: start_cmd_srv = False _run = lambda: Focusd(task).run(start_cmd_srv) daemonize(get_daemon_pidfile(task), task.task_dir, _run)
Forks the current process as a daemon to run a task. `task` ``Task`` instance for the task to run.
7,276
def get_task(self, task=0, timeout=None, block=True): return _NuMapTask(self, task=task, timeout=timeout, block=block)
Returns an iterator which results are limited to one **task**. The default iterator the one which e.g. will be used in a for loop is the iterator for the first task (task =0). The returned iterator is a ``_NuMapTask`` instance. Compare:: for result_from_task_0 in imap_instance: pass with:: for result_from_task_1 in imap_instance.get_task(task_id =1): pass a typical use case is:: task_0_iterator = imap_instance.get_task(task_id =0) task_1_iterator = imap_instance.get_task(task_id =1) for (task_1_res, task_0_res) in izip(task_0_iterator, task_1_iterator): pass
7,277
def getTotalw(self): w = sum([field.w for field in self.fields]) return w
Returns the cumulative w for all the fields in the dataset
7,278
def is_training_modified(self): last_modified = self.trainer.get_last_modified() if last_modified > self.training_timestamp: return True else: return False
Returns `True` if training data was modified since last training. Returns `False` otherwise, or if using builtin training data.
7,279
def list(self, **params): _, _, visit_outcomes = self.http_client.get("/visit_outcomes", params=params) return visit_outcomes
Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list
7,280
def addGlobalServices(self): if self.options.get() and self.options.get(): _cache = self.getCacheService() _cache.startService()
This is where we put service that we don't want to be duplicated on worker subprocesses
7,281
def untranslateName(s): s = s.replace(, ) s = s.replace(, ) if s[:2] == : s = s[2:] s = s.replace(, ) return s
Undo Python conversion of CL parameter or variable name.
7,282
def make_matrix(version, reserve_regions=True, add_timing=True): size = calc_matrix_size(version) row = [0x2] * size matrix = tuple([bytearray(row) for i in range(size)]) if reserve_regions: if version > 6: for i in range(6): matrix[i][-11] = 0x0 matrix[i][-10] = 0x0 matrix[i][-9] = 0x0 matrix[-11][i] = 0x0 matrix[-10][i] = 0x0 matrix[-9][i] = 0x0 for i in range(9): matrix[i][8] = 0x0 matrix[8][i] = 0x0 if version > 0: matrix[-i][8] = 0x0 matrix[8][- i] = 0x0 if add_timing: add_timing_pattern(matrix, version < 1) return matrix
\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays
7,283
def Datetime(null=True, **kwargs): return Property( types=datetime.datetime, convert=util.local_timezone, load=dateutil.parser.parse, null=null, **kwargs )
A datetime property.
7,284
def defaults(d1, d2): d1 = d1.copy() tolist = isinstance(d2, pd.DataFrame) keys = (k for k in d2 if k not in d1) for k in keys: if tolist: d1[k] = d2[k].tolist() else: d1[k] = d2[k] return d1
Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1
7,285
def find_and_filter_sgf_files(base_dir, min_year=None, komi=None): sgf_files = [] for dirpath, dirnames, filenames in os.walk(base_dir): for filename in filenames: if filename.endswith(): path = os.path.join(dirpath, filename) sgf_files.append(path) if min_year == komi == None: print ("Found {} sgf_files".format(len(sgf_files))) return sgf_files f = filter_year_komi(min_year, komi) filtered_sgf_files = [sgf for sgf in tqdm(sgf_files) if f(sgf)] print("{} of {} .sgf files matched (min_year >= {}, komi = {})".format( len(filtered_sgf_files), len(sgf_files), min_year, komi)) return filtered_sgf_files
Finds all sgf files in base_dir with year >= min_year and komi
7,286
def Points(plist, r=5, c="gray", alpha=1): def _colorPoints(plist, cols, r, alpha): n = len(plist) if n > len(cols): colors.printc("~times Error: mismatch in colorPoints()", n, len(cols), c=1) exit() if n != len(cols): colors.printc("~lightning Warning: mismatch in colorPoints()", n, len(cols)) src = vtk.vtkPointSource() src.SetNumberOfPoints(n) src.Update() vgf = vtk.vtkVertexGlyphFilter() vgf.SetInputData(src.GetOutput()) vgf.Update() pd = vgf.GetOutput() ucols = vtk.vtkUnsignedCharArray() ucols.SetNumberOfComponents(3) ucols.SetName("pointsRGB") for i in range(len(plist)): c = np.array(colors.getColor(cols[i])) * 255 ucols.InsertNextTuple3(c[0], c[1], c[2]) pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True)) pd.GetPointData().SetScalars(ucols) actor = Actor(pd, c, alpha) actor.mapper.ScalarVisibilityOn() actor.GetProperty().SetInterpolationToFlat() actor.GetProperty().SetPointSize(r) settings.collectable_actors.append(actor) return actor n = len(plist) if n == 0: return None elif n == 3: if utils.isSequence(plist[0]) and len(plist[0]) > 3: plist = list(zip(plist[0], plist[1], plist[2])) elif n == 2: if utils.isSequence(plist[0]) and len(plist[0]) > 3: plist = list(zip(plist[0], plist[1], [0] * len(plist[0]))) if utils.isSequence(c) and len(c) > 3: actor = _colorPoints(plist, c, r, alpha) settings.collectable_actors.append(actor) return actor n = len(plist) sourcePoints = vtk.vtkPoints() sourceVertices = vtk.vtkCellArray() is3d = len(plist[0]) > 2 if is3d: for pt in plist: aid = sourcePoints.InsertNextPoint(pt) sourceVertices.InsertNextCell(1) sourceVertices.InsertCellPoint(aid) else: for pt in plist: aid = sourcePoints.InsertNextPoint(pt[0], pt[1], 0) sourceVertices.InsertNextCell(1) sourceVertices.InsertCellPoint(aid) pd = vtk.vtkPolyData() pd.SetPoints(sourcePoints) pd.SetVerts(sourceVertices) if n == 1: pd.GetPoints().SetPoint(0, [0, 0, 0]) else: pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True)) actor = Actor(pd, c, alpha) actor.GetProperty().SetPointSize(r) if n == 1: actor.SetPosition(plist[0]) settings.collectable_actors.append(actor) return actor
Build a point ``Actor`` for a list of points. :param float r: point radius. :param c: color name, number, or list of [R,G,B] colors of same length as plist. :type c: int, str, list :param float alpha: transparency in range [0,1]. .. hint:: |lorenz| |lorenz.py|_
7,287
def vcs_upload(): if env.deploy_tool == "git": remote_path = "ssh://%s@%s%s" % (env.user, env.host_string, env.repo_path) if not exists(env.repo_path): run("mkdir -p %s" % env.repo_path) with cd(env.repo_path): run("git init --bare") local("git push -f %s master" % remote_path) with cd(env.repo_path): run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path) run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path) elif env.deploy_tool == "hg": remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string, env.repo_path) with cd(env.repo_path): if not exists("%s/.hg" % env.repo_path): run("hg init") print(env.repo_path) with fab_settings(warn_only=True): push = local("hg push -f %s" % remote_path) if push.return_code == 255: abort() run("hg update")
Uploads the project with the selected VCS tool.
7,288
def inFootprint(footprint,ra,dec): if footprint is None: return np.ones(len(ra),dtype=bool) try: if isinstance(footprint,str) and os.path.exists(footprint): filename = footprint footprint = read_map(filename) nside = hp.npix2nside(len(footprint)) pix = ang2pix(nside,ra,dec) inside = (footprint[pix] > 0) except IOError: logger.warning("Failed to load healpix footprint; trying to use mangle...") inside = inMangle(filename,ra,dec) return inside
Check if set of ra,dec combinations are in footprint. Careful, input files must be in celestial coordinates. filename : Either healpix map or mangle polygon file ra,dec : Celestial coordinates Returns: inside : boolean array of coordinates in footprint
7,289
def values(self): values = [] for __, data in self.items(): values.append(data) return values
return a list of all state values
7,290
def remove(self, branch, turn, tick): for parent, entitys in list(self.parents.items()): for entity, keys in list(entitys.items()): for key, branchs in list(keys.items()): if branch in branchs: branhc = branchs[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branchs[branch] if not branchs: del keys[key] if not keys: del entitys[entity] if not entitys: del self.parents[parent] for branchkey, branches in list(self.branches.items()): if branch in branches: branhc = branches[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branches[branch] if not branches: del self.branches[branchkey] for keykey, keys in list(self.keys.items()): for key, branchs in list(keys.items()): if branch in branchs: branhc = branchs[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branches[branch] if not branchs: del keys[key] if not keys: del self.keys[keykey] sets = self.settings[branch] if turn in sets: setsturn = sets[turn] if tick in setsturn: del setsturn[tick] setsturn.truncate(tick) if not setsturn: del sets[turn] sets.truncate(turn) if not sets: del self.settings[branch] presets = self.presettings[branch] if turn in presets: presetsturn = presets[turn] if tick in presetsturn: del presetsturn[tick] presetsturn.truncate(tick) if not presetsturn: del presets[turn] presets.truncate(turn) if not presets: del self.presettings[branch] for entity, brnch in list(self.keycache): if brnch == branch: kc = self.keycache[entity, brnch] if turn in kc: kcturn = kc[turn] if tick in kcturn: del kcturn[tick] kcturn.truncate(tick) if not kcturn: del kc[turn] kc.truncate(turn) if not kc: del self.keycache[entity, brnch] self.shallowest = OrderedDict() self.send(self, branch=branch, turn=turn, tick=tick, action=)
Delete data on or after this tick On the assumption that the future has been invalidated.
7,291
def hmget(self, *args): if args and not any(arg in self._instancehash_fields for arg in args): raise ValueError("Only InstanceHashField can be used here.") return self._call_command(, args)
This command on the model allow getting many instancehash fields with only one redis call. You must pass hash name to retrieve as arguments.
7,292
def add(self, data): for host in data: for key in data[host]: if not data[host][key] == []: self.add_item(host, key, data[host][key])
Add a list of item into the container :data: dict of items & value per hostname
7,293
def _validate_num_units(num_units, service_name, add_error): if num_units is None: return 0 try: num_units = int(num_units) except (TypeError, ValueError): add_error( .format(service_name)) return if num_units < 0: add_error( .format(num_units, service_name)) return return num_units
Check that the given num_units is valid. Use the given service name to describe possible errors. Use the given add_error callable to register validation error. If no errors are encountered, return the number of units as an integer. Return None otherwise.
7,294
def color(self): color = idc.GetColor(self.ea, idc.CIC_ITEM) if color == 0xFFFFFFFF: return None return color
Line color in IDA View
7,295
def search(self, word, limit=30): search = Search(PrefixQuery("word", word), sort={"count": "desc"}) for doc in self.connection.search( search, indexes=[self.index], count=limit): yield (doc["word"], doc["count"])
Search for a word within the wordgatherer collection. :param word: Word to search for. :param limit: Maximum number of results to return.
7,296
def put(self, key, value): key = self._service_key(key) self._service_ops[](key, value)
Stores the object `value` named by `key` in `service`. Args: key: Key naming `value`. value: the object to store.
7,297
def meanprecision(a): s = a.sum() m = a / s return (m,s)
Mean and precision of Dirichlet distribution. Parameters ---------- a : array Parameters of Dirichlet distribution. Returns ------- mean : array Numbers [0,1] of the means of the Dirichlet distribution. precision : float Precision or concentration parameter of the Dirichlet distribution.
7,298
def t_RP(self, t): r if t.value != and OPTIONS.bracket.value: t.type = return t
r'[])]
7,299
def initialize_layers(self, layers=None): if layers is not None: self.layers = layers self.layers_ = Layers() if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError( "You canlayersNeuralNetself.layers(Layer class, kwargs)ll have to actually instantiate the layers given the layer = None for i, layer_def in enumerate(self.layers): if isinstance(layer_def[1], dict): layer_factory, layer_kw = layer_def layer_kw = layer_kw.copy() else: layer_name, layer_factory = layer_def layer_kw = {: layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None if not in layer_kw: layer_kw[] = self._layer_name(layer_factory, i) more_params = self._get_params_for(layer_kw[]) layer_kw.update(more_params) if layer_kw[] in self.layers_: raise ValueError( "Two layers with name {}.".format(layer_kw[])) try: is_input_layer = issubclass(layer_factory, InputLayer) except TypeError: is_input_layer = False if not is_input_layer: if in layer_kw: layer_kw[] = self.layers_[ layer_kw[]] elif in layer_kw: layer_kw[] = [ self.layers_[name] for name in layer_kw[]] else: layer_kw[] = layer for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] for attr in (, ): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) try: layer_wrapper = layer_kw.pop(, None) layer = layer_factory(**layer_kw) except TypeError as e: msg = ("Failed to instantiate {} with args {}.\n" "Maybe parameter names have changed?".format( layer_factory, layer_kw)) chain_exception(TypeError(msg), e) self.layers_[layer_kw[]] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_["LW_%s" % layer_kw[]] = layer self._output_layers = [layer] return [layer]
Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def`