Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
22,500
def insert(table=, chain=None, position=None, rule=None, family=): *-m state --state RELATED,ESTABLISHED -j ACCEPT*-m state --state RELATED,ESTABLISHED -j ACCEPT if not chain: return if not position: return if not rule: return if position < 0: rules = get_rules(family=family) size = len(rules[table][chain][]) position = (size + position) + 1 if position is 0: position = 1 wait = if _has_option(, family) else returnCheck = check(table, chain, rule, family) if isinstance(returnCheck, bool) and returnCheck: return False cmd = .format( _iptables_cmd(family), wait, table, chain, position, rule) out = __salt__[](cmd) return out
Insert a rule into the specified table/chain, at the specified position. This function accepts a rule in a standard iptables command format, starting with the chain. Trying to force users to adapt to a new method of creating rules would be irritating at best, and we already have a parser that can handle it. If the position specified is a negative number, then the insert will be performed counting from the end of the list. For instance, a position of -1 will insert the rule as the second to last rule. To insert a rule in the last position, use the append function instead. CLI Examples: .. code-block:: bash salt '*' iptables.insert filter INPUT position=3 \\ rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' IPv6: salt '*' iptables.insert filter INPUT position=3 \\ rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\ family=ipv6
22,501
def dfs_iterative(graph, start, seen): seen[start] = True to_visit = [start] while to_visit: node = to_visit.pop() for neighbor in graph[node]: if not seen[neighbor]: seen[neighbor] = True to_visit.append(neighbor)
DFS, detect connected component, iterative implementation :param graph: directed graph in listlist or listdict format :param int node: to start graph exploration :param boolean-table seen: will be set true for the connected component containing node. :complexity: `O(|V|+|E|)`
22,502
def intinlist(lst): for item in lst: try: item = int(item) return True except ValueError: pass return False
test if int in list
22,503
def set_target(self, target): self.targetname = target m = __import__(.format(target), fromlist=[]) self.target = m.get_target(self) self.check_build_layout() self.check_configuration_tokens()
Set the target to use (one of buildozer.targets, such as "android")
22,504
def top_i_answer(self, i): for j, a in enumerate(self.answers): if j == i - 1: return a
获取排名某一位的答案. :param int i: 要获取的答案的排名 :return: 答案对象,能直接获取的属性参见answers方法 :rtype: Answer
22,505
def parse_date(my_date): if isinstance(my_date, datetime.datetime): result = my_date elif isinstance(my_date, str): result = datetime.datetime.strptime(my_date, ) else: raise ValueError( % ( str(my_date), type(my_date))) assert result.tzinfo is None, % ( result) return result
Parse a date into canonical format of datetime.dateime. :param my_date: Either datetime.datetime or string in '%Y-%m-%dT%H:%M:%SZ' format. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :return: A datetime.datetime. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Parse a date and make sure it has no time zone.
22,506
def guess_type_tag(self, input_bytes, filename): mime_to_type = {: , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : } with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag: mime_type = mag.id_buffer(input_bytes[:1024]) if mime_type in mime_to_type: type_tag = mime_to_type[mime_type] if type_tag == : print ext = os.path.splitext(filename)[1][1:] if ext in [,]: type_tag = else: print % filename exit(1) return type_tag else: print return
Try to guess the type_tag for this sample
22,507
def get_var(var): *LINGUAS makeconf = _get_makeconf() with salt.utils.files.fopen(makeconf) as fn_: conf_file = salt.utils.data.decode(fn_.readlines()) for line in conf_file: if line.startswith(var): ret = line.split(, 1)[1] if in ret: ret = ret.split()[1] elif in ret: ret = ret.split()[0] ret = ret.strip() return ret return None
Get the value of a variable in make.conf Return the value of the variable or None if the variable is not in make.conf CLI Example: .. code-block:: bash salt '*' makeconf.get_var 'LINGUAS'
22,508
def error(self): if self._error is None: try: init = getattr(self, "_" + self.__class__.__name__ + "__init", None) if init is not None and callable(init): init() except Exception as e: pass return self._error
gets the error
22,509
def percent_encode_non_ascii_headers(self, encoding=): def do_encode(m): return "*={0}".format(encoding) + quote(to_native_str(m.group(1))) for index in range(len(self.headers) - 1, -1, -1): curr_name, curr_value = self.headers[index] try: curr_value.encode() except: new_value = self.ENCODE_HEADER_RX.sub(do_encode, curr_value) if new_value == curr_value: new_value = quote(curr_value) self.headers[index] = (curr_name, new_value)
Encode any headers that are not plain ascii as UTF-8 as per: https://tools.ietf.org/html/rfc8187#section-3.2.3 https://tools.ietf.org/html/rfc5987#section-3.2.2
22,510
def getatom(self, atomends=None): atomlist = [] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(atomlist)
Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).
22,511
def get_nowait_from_queue(queue): data = [] for _ in range(queue.qsize()): try: data.append(queue.get_nowait()) except q.Empty: break return data
Collect all immediately available items from a queue
22,512
def encipher(self,message): message = self.remove_punctuation(message) effective_ch = [0,0,0,0,0,0,0] ret = for j in range(len(message)): shift = 0 effective_ch[0] = 0; effective_ch[1] = self.wheel_1_settings[self.actual_key[0]] effective_ch[2] = self.wheel_2_settings[self.actual_key[1]] effective_ch[3] = self.wheel_3_settings[self.actual_key[2]] effective_ch[4] = self.wheel_4_settings[self.actual_key[3]] effective_ch[5] = self.wheel_5_settings[self.actual_key[4]] effective_ch[6] = self.wheel_6_settings[self.actual_key[5]] for i in range(0,27): if effective_ch[self.lug_positions[i][0]] or effective_ch[self.lug_positions[i][1]]: shift+=1 ret += self.subst(message[j],key=,offset=-shift); self.advance_key(); return ret
Encipher string using M209 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example (continuing from the example above):: ciphertext = m.encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
22,513
def circuit_to_latex_using_qcircuit( circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str: diagram = circuit.to_text_diagram_drawer( qubit_namer=qcircuit_qubit_namer, qubit_order=qubit_order, get_circuit_diagram_info=get_qcircuit_diagram_info) return _render(diagram)
Returns a QCircuit-based latex diagram of the given circuit. Args: circuit: The circuit to represent in latex. qubit_order: Determines the order of qubit wires in the diagram. Returns: Latex code for the diagram.
22,514
def translate(self, closed_regex=True): u if closed_regex: return self.regex else: return translate(self.pattern, closed_regex=False, **self.flags)
u""" Returns a Python regular expression allowing to match :return:
22,515
def correct_pairs(p, pf, tag): from jcvi.assembly.preprocess import correct as cr logging.debug("Work on {0} ({1})".format(pf, .join(p))) itag = tag[0] cm = ".".join((pf, itag)) targets = (cm + ".1.corr.fastq", cm + ".2.corr.fastq", \ pf + ".PE-0.corr.fastq") if not need_update(p, targets): logging.debug("Corrected reads found: {0}. Skipped.".format(targets)) return slink(p, pf, tag) cwd = os.getcwd() os.chdir(pf) cr(sorted(glob("*.fastq") + glob("*.fastq.gz")) + ["--nofragsdedup"]) sh("mv {0}.1.corr.fastq ../{1}".format(itag, targets[0])) sh("mv {0}.2.corr.fastq ../{1}".format(itag, targets[1])) sh("mv frag_reads_corr.corr.fastq ../{0}".format(targets[2])) logging.debug("Correction finished: {0}".format(targets)) os.chdir(cwd)
Take one pair of reads and correct to generate *.corr.fastq.
22,516
def show_version(a_device): remote_conn = ConnectHandler(**a_device) print() print(" print(remote_conn.send_command("show version")) print(" print()
Execute show version command using Netmiko.
22,517
def sign(s, passphrase, sig_format=SER_COMPACT, curve=): if isinstance(s, six.text_type): raise ValueError("Encode `s` to a bytestring yourself to" + " prevent problems with different default encodings") curve = Curve.by_name(curve) privkey = curve.passphrase_to_privkey(passphrase) return privkey.sign(hashlib.sha512(s).digest(), sig_format)
Signs `s' with passphrase `passphrase'
22,518
def add_template_events(self, columns, vectors): assert new_events is not None new_events[] = self.template_index for c, v in zip(columns, vectors): if v is not None: if isinstance(v, Array): new_events[c] = v.numpy() else: new_events[c] = v self.template_events = numpy.append(self.template_events, new_events)
Add a vector indexed
22,519
def flat_map(self, flatmap_fn): op = Operator( _generate_uuid(), OpType.FlatMap, "FlatMap", flatmap_fn, num_instances=self.env.config.parallelism) return self.__register(op)
Applies a flatmap operator to the stream. Attributes: flatmap_fn (function): The user-defined logic of the flatmap (e.g. split()).
22,520
def _reset_values(self, instance): self.value = None self.reference.value = None instance.__dict__.pop(self.field_name, None) instance.__dict__.pop(self.reference.field_name, None) self.reference.delete_cached_value(instance)
Reset all associated values and clean up dictionary items
22,521
def to_networkx(self): r nx = _import_networkx() def convert(number): if issubclass(number.dtype.type, (np.integer, np.bool_)): return int(number) else: return float(number) def edges(): for source, target, weight in zip(*self.get_edge_list()): yield int(source), int(target), {: convert(weight)} def nodes(): for vertex in range(self.n_vertices): signals = {name: convert(signal[vertex]) for name, signal in self.signals.items()} yield vertex, signals self._break_signals() graph = nx.DiGraph() if self.is_directed() else nx.Graph() graph.add_nodes_from(nodes()) graph.add_edges_from(edges()) graph.name = self.__class__.__name__ return graph
r"""Export the graph to NetworkX. Edge weights are stored as an edge attribute, under the name "weight". Signals are stored as node attributes, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Returns ------- graph : :class:`networkx.Graph` A NetworkX graph object. See Also -------- to_graphtool : export to graph-tool save : save to a file Examples -------- >>> import networkx as nx >>> from matplotlib import pyplot as plt >>> graph = graphs.Path(4, directed=True) >>> graph.set_signal(np.full(4, 2.3), 'signal') >>> graph = graph.to_networkx() >>> print(nx.info(graph)) Name: Path Type: DiGraph Number of nodes: 4 Number of edges: 3 Average in degree: 0.7500 Average out degree: 0.7500 >>> nx.is_directed(graph) True >>> graph.nodes() NodeView((0, 1, 2, 3)) >>> graph.edges() OutEdgeView([(0, 1), (1, 2), (2, 3)]) >>> graph.nodes()[2] {'signal': 2.3} >>> graph.edges()[(0, 1)] {'weight': 1.0} >>> # nx.draw(graph, with_labels=True) Another common goal is to use NetworkX to compute some properties to be be imported back in the PyGSP as signals. >>> import networkx as nx >>> from matplotlib import pyplot as plt >>> graph = graphs.Sensor(100, seed=42) >>> graph.set_signal(graph.coords, 'coords') >>> graph = graph.to_networkx() >>> betweenness = nx.betweenness_centrality(graph, weight='weight') >>> nx.set_node_attributes(graph, betweenness, 'betweenness') >>> graph = graphs.Graph.from_networkx(graph) >>> graph.compute_fourier_basis() >>> graph.set_coordinates(graph.signals['coords']) >>> fig, axes = plt.subplots(1, 2) >>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0]) >>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
22,522
def write(self, *messages): for message in messages: if not isinstance(message, Token): message = ensure_tuple(message, cls=self._input_type, length=self._input_length) if self._input_length is None: self._input_length = len(message) self.input.put(message)
Push a message list to this context's input queue. :param mixed value: message
22,523
def get_chunk_meta(self, meta_file): chunks = self.envs["CHUNKS"] if cij.nvme.get_meta(0, chunks * self.envs["CHUNK_META_SIZEOF"], meta_file): raise RuntimeError("cij.liblight.get_chunk_meta: fail") chunk_meta = cij.bin.Buffer(types=self.envs["CHUNK_META_STRUCT"], length=chunks) chunk_meta.read(meta_file) return chunk_meta
Get chunk meta table
22,524
def deposit_links_factory(pid): links = default_links_factory(pid) def _url(name, **kwargs): endpoint = .format( current_records_rest.default_endpoint_prefixes[pid.pid_type], name, ) return url_for(endpoint, pid_value=pid.pid_value, _external=True, **kwargs) links[] = _url() ui_endpoint = current_app.config.get() if ui_endpoint is not None: links[] = ui_endpoint.format( host=request.host, scheme=request.scheme, pid_value=pid.pid_value, ) deposit_cls = Deposit if in request.view_args: deposit_cls = request.view_args[].data[1].__class__ for action in extract_actions_from_class(deposit_cls): links[action] = _url(, action=action) return links
Factory for record links generation. The dictionary is formed as: .. code-block:: python { 'files': '/url/to/files', 'publish': '/url/to/publish', 'edit': '/url/to/edit', 'discard': '/url/to/discard', ... } :param pid: The record PID object. :returns: A dictionary that contains all the links.
22,525
def instance(self, skip_exist_test=False): model = self.database._models[self.related_to] meth = model.lazy_connect if skip_exist_test else model return meth(self.proxy_get())
Returns the instance of the related object linked by the field.
22,526
def genesis_signing_lockset(genesis, privkey): v = VoteBlock(0, 0, genesis.hash) v.sign(privkey) ls = LockSet(num_eligible_votes=1) ls.add(v) assert ls.has_quorum return ls
in order to avoid a complicated bootstrapping, we define the genesis_signing_lockset as a lockset with one vote by any validator.
22,527
def reporter(self): genedict = dict() notefile = os.path.join(self.targetpath, ) with open(notefile, ) as notes: for line in notes: if line.startswith(): continue try: gene, description, _ = line.split() except ValueError: try: gene, description = line.split() except ValueError: gene, description, _, _ = line.split() genedict[gene] = description.replace(, ).strip() for sample in self.runmetadata.samples: try: if sample[self.analysistype].results: sample[self.analysistype].uniquegenes = dict() for name, identity in sample[self.analysistype].results.items(): if in name: sample[self.analysistype].delimiter = else: sample[self.analysistype].delimiter = genename = name.split(sample[self.analysistype].delimiter)[0] try: bestidentity = sample[self.analysistype].uniquegenes[genename] if float(identity) > float(bestidentity): sample[self.analysistype].uniquegenes[genename] = float(identity) except KeyError: sample[self.analysistype].uniquegenes[genename] = float(identity) except AttributeError: raise make_path(self.reportpath) data = with open(os.path.join(self.reportpath, self.analysistype + ), ) as report: for sample in self.runmetadata.samples: try: if sample[self.analysistype].results: report.write(data)
Creates a report of the results
22,528
def get_page(self, form): page_size = form.cleaned_data[] start_index = form.cleaned_data[] paginator = Paginator(self.object_list, page_size) num_page = (start_index / page_size) + 1 return paginator.page(num_page)
Get the requested page
22,529
def summary(args): from jcvi.formats.base import SetFile from jcvi.formats.bed import BedSummary from jcvi.utils.table import tabulate p = OptionParser(summary.__doc__) p.add_option("--isoform", default=False, action="store_true", help="Find longest isoform of each id") p.add_option("--ids", help="Only include features from certain IDs") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args ids = opts.ids if ids: ids = SetFile(ids) logging.debug("Total ids loaded: {0}".format(len(ids))) if opts.isoform: pids = set() gff = Gff(gff_file) for g in gff: if g.type != "mRNA": continue if g.parent not in ids: continue if "longest" not in g.attributes: pids = set(x + ".1" for x in ids) break if g.attributes["longest"][0] == "0": continue pids.add(g.id) ids = pids logging.debug("After checking longest: {0}".format(len(ids))) gff = Gff(gff_file) for g in gff: if g.name in ids: ids.add(g.id) logging.debug("Total ids including aliases: {0}".format(len(ids))) gff = Gff(gff_file) beds = defaultdict(list) for g in gff: if ids and not (g.id in ids or g.name in ids or g.parent in ids): continue beds[g.type].append(g.bedline) table = {} for type, bb in sorted(beds.items()): bs = BedSummary(bb) table[(type, "Features")] = bs.nfeats table[(type, "Unique bases")] = bs.unique_bases table[(type, "Total bases")] = bs.total_bases print(tabulate(table), file=sys.stdout)
%prog summary gffile Print summary stats for features of different types.
22,530
def build_views(self): for view_str in self.view_list: logger.debug("Building %s" % view_str) if self.verbosity > 1: self.stdout.write("Building %s" % view_str) view = get_callable(view_str) self.get_view_instance(view).build_method()
Bake out specified buildable views.
22,531
def uri_from_fields(prefix, *fields): string = .join(AlignmentHelper.alpha_numeric(f.strip().lower(), ) for f in fields) if len(string) == len(fields) - 1: return return prefix + string
Construct a URI out of the fields, concatenating them after removing offensive characters. When all the fields are empty, return empty
22,532
def resize(self, nrows, front=False): nrows_current = self.get_nrows() if nrows == nrows_current: return if nrows < nrows_current: rowdiff = nrows_current - nrows if front: start = 0 stop = rowdiff else: start = nrows stop = nrows_current self.delete_rows(slice(start, stop)) else: rowdiff = nrows - nrows_current if front: firstrow = 0 else: firstrow = nrows_current self._FITS.insert_rows(self._ext+1, firstrow, rowdiff) self._update_info()
Resize the table to the given size, removing or adding rows as necessary. Note if expanding the table at the end, it is more efficient to use the append function than resizing and then writing. New added rows are zerod, except for 'i1', 'u2' and 'u4' data types which get -128,32768,2147483648 respectively parameters ---------- nrows: int new size of table front: bool, optional If True, add or remove rows from the front. Default is False
22,533
def close_stream(self): self.keep_listening = False self.stream.stop_stream() self.stream.close() self.pa.terminate()
Closes the stream. Performs cleanup.
22,534
def unban_chat_member(self, *args, **kwargs): return unban_chat_member(*args, **self._merge_overrides(**kwargs)).run()
See :func:`unban_chat_member`
22,535
def _MergeIdentical(self, a, b): if a != b: raise MergeError("values must be identical ( vs )" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return b
Tries to merge two values. The values are required to be identical. Args: a: The first value. b: The second value. Returns: The trivially merged value. Raises: MergeError: The values were not identical.
22,536
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes: encoders = [ self._registry.get_encoder(type_str) for type_str in types ] encoder = TupleEncoder(encoders=encoders) return encoder(args)
Encodes the python values in ``args`` as a sequence of binary values of the ABI types in ``types`` via the head-tail mechanism. :param types: An iterable of string representations of the ABI types that will be used for encoding e.g. ``('uint256', 'bytes[]', '(int,int)')`` :param args: An iterable of python values to be encoded. :returns: The head-tail encoded binary representation of the python values in ``args`` as values of the ABI types in ``types``.
22,537
def convert(fname,saveAs=True,showToo=False): cutoffLow=np.percentile(im,.01) cutoffHigh=np.percentile(im,99.99) im[np.where(im<cutoffLow)]=cutoffLow im[np.where(im>cutoffHigh)]=cutoffHigh im-=np.min(im) im/=np.max(im) im*=255 im = Image.fromarray(im) msg="%s\n"%os.path.basename(fname) msg+="%s\n"%cm.epochToString(os.path.getmtime(fname)) d = ImageDraw.Draw(im) fnt = ImageFont.truetype("arial.ttf", 20) d.text((6,6),msg,font=fnt,fill=0) d.text((4,4),msg,font=fnt,fill=255) if showToo: im.show() if saveAs is False: return if saveAs is True: saveAs=fname+".png" im.convert().save(saveAs) return saveAs
Convert weird TIF files into web-friendly versions. Auto contrast is applied (saturating lower and upper 0.1%). make saveAs True to save as .TIF.png make saveAs False and it won't save at all make saveAs "someFile.jpg" to save it as a different path/format
22,538
def create_connection(cls, address, timeout=None, source_address=None): sock = socket.create_connection(address, timeout, source_address) return cls(sock)
Create a SlipSocket connection. This convenience method creates a connection to the the specified address using the :func:`socket.create_connection` function. The socket that is returned from that call is automatically wrapped in a :class:`SlipSocket` object. .. note:: The :meth:`create_connection` method does not magically turn the socket at the remote address into a SlipSocket. For the connection to work properly, the remote socket must already have been configured to use the SLIP protocol.
22,539
def cmd(name, options=): if options: try: docopt.docopt( % (name, options, ), []) except SystemExit: pass def decorator(func): def command_wrapper(pymux, arguments): if name == and not in arguments: for i, p in enumerate(arguments): if not p.startswith(): arguments.insert(i + 1, ) break try: if six.PY2: arguments = [a.encode() for a in arguments] received_options = docopt.docopt( % (name, options), arguments, help=False) COMMANDS_TO_OPTION_FLAGS[name] = flags return func return decorator
Decorator for all commands. Commands will receive (pymux, variables) as input. Commands can raise CommandException.
22,540
def manangeRecurringPaymentsProfileStatus(self, params, fail_silently=False): defaults = {"method": "ManageRecurringPaymentsProfileStatus"} required = ["profileid", "action"] nvp_obj = self._fetch(params, required, defaults) flag_info_test_string = if nvp_obj.flag and not (fail_silently and nvp_obj.flag_info == flag_info_test_string): raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj) return nvp_obj
Requires `profileid` and `action` params. Action must be either "Cancel", "Suspend", or "Reactivate".
22,541
def translate(self): ctype = self.content_type() self.request.content_type = ctype if not self.is_multipart() and ctype: loadee = self.loader_for_type(ctype) if loadee: try: self.request.data = loadee(self.request.raw_post_data) self.request.POST = self.request.PUT = dict() except (TypeError, ValueError): raise MimerDataException else: self.request.data = None return self.request
Will look at the `Content-type` sent by the client, and maybe deserialize the contents into the format they sent. This will work for JSON, YAML, XML and Pickle. Since the data is not just key-value (and maybe just a list), the data will be placed on `request.data` instead, and the handler will have to read from there. It will also set `request.content_type` so the handler has an easy way to tell what's going on. `request.content_type` will always be None for form-encoded and/or multipart form data (what your browser sends.)
22,542
def show(self, id, detailed=None): filters = [ .format(detailed) if detailed is not None else None, ] return self._get( url=.format( root=self.URL, id=id ), headers=self.headers, params=self.build_param_string(filters) or None )
This API endpoint returns a single Key transaction, identified its ID. :type id: int :param id: Key transaction ID :type detailed: bool :param detailed: :rtype: dict :return: The JSON response of the API :: { "plugin": { "id": "integer", "name": "string", "guid": "string", "publisher": "string", "details": { "description": "integer", "is_public": "string", "created_at": "time", "updated_at": "time", "last_published_at": "time", "has_unpublished_changes": "boolean", "branding_image_url": "string", "upgraded_at": "time", "short_name": "string", "publisher_about_url": "string", "publisher_support_url": "string", "download_url": "string", "first_edited_at": "time", "last_edited_at": "time", "first_published_at": "time", "published_version": "string" }, "summary_metrics": [ { "id": "integer", "name": "string", "metric": "string", "value_function": "string", "thresholds": { "caution": "float", "critical": "float" }, "values": { "raw": "float", "formatted": "string" } } ] } }
22,543
def _start_new_episode(self): if self.has_interaction: self._flush() self.t = 0 self.has_interaction = False
Bookkeeping to do at the start of each new episode.
22,544
async def main(): client = Client(BMAS_ENDPOINT) response = await client(bma.node.summary) print(response) salt = getpass.getpass("Enter your passphrase (salt): ") password = getpass.getpass("Enter your password: ") key = SigningKey.from_credentials(salt, password) pubkey_from = key.pubkey pubkey_to = input("Enter certified pubkey: ") current_block = await client(bma.blockchain.current) identity = await get_identity_document(client, current_block, pubkey_to) certification = get_certification_document(current_block, identity, pubkey_from) certification.sign([key]) response = await client(bma.wot.certify, certification.signed_raw()) if response.status == 200: print(await response.text()) else: print("Error while publishing certification: {0}".format(await response.text())) await client.close()
Main code
22,545
def _check_image(self, X): if (len(X.shape) < 3) or (len(X.shape) > 4): raise ValueError( ) self._samples = X.shape[0] self._image_size = X.shape[1:3] if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]): raise ValueError( % (self._image_size[0], self._image_size[0], self.receptive_field[0], self.receptive_field[1]))
Checks the image size and its compatibility with classifier's receptive field. At this moment it is required that image size = K * receptive_field. This will be relaxed in future with the introduction of padding.
22,546
def seeds(args): p = OptionParser(seeds.__doc__) p.set_outfile() opts, args, iopts = add_seeds_options(p, args) if len(args) != 1: sys.exit(not p.print_help()) pngfile, = args pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0] sigma, kernel = opts.sigma, opts.kernel rows, cols = opts.rows, opts.cols labelrows, labelcols = opts.labelrows, opts.labelcols ff = opts.filter calib = opts.calibrate outdir = opts.outdir if outdir != : mkdir(outdir) if calib: calib = json.load(must_open(calib)) pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"] tr = np.array(tr) resizefile, mainfile, labelfile, exif = \ convert_image(pngfile, pf, outdir=outdir, rotate=opts.rotate, rows=rows, cols=cols, labelrows=labelrows, labelcols=labelcols) oimg = load_image(resizefile) img = load_image(mainfile) fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1, figsize=(iopts.w, iopts.h)) img_gray = rgb2gray(img) logging.debug("Running {0} edge detection ...".format(ff)) if ff == "canny": edges = canny(img_gray, sigma=opts.sigma) elif ff == "roberts": edges = roberts(img_gray) elif ff == "sobel": edges = sobel(img_gray) edges = clear_border(edges, buffer_size=opts.border) selem = disk(kernel) closed = closing(edges, selem) if kernel else edges filled = binary_fill_holes(closed) if opts.watershed: distance = distance_transform_edt(filled) local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False) coordinates = peak_local_max(distance, threshold_rel=.05) markers, nmarkers = label(local_maxi, return_num=True) logging.debug("Identified {0} watershed markers".format(nmarkers)) labels = watershed(closed, markers, mask=filled) else: labels = label(filled) w, h = img_gray.shape canvas_size = w * h min_size = int(round(canvas_size * opts.minsize / 100)) max_size = int(round(canvas_size * opts.maxsize / 100)) logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\ .format(min_size, opts.minsize, max_size, opts.maxsize)) ax1.set_title() ax1.imshow(oimg) params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel) if opts.watershed: params += ", watershed" ax2.set_title(.format(params)) closed = gray2rgb(closed) ax2_img = labels if opts.edges: ax2_img = closed elif opts.watershed: ax2.plot(coordinates[:, 1], coordinates[:, 0], ) ax2.imshow(ax2_img, cmap=iopts.cmap) ax3.set_title() ax3.imshow(img) filename = op.basename(pngfile) if labelfile: accession = extract_label(labelfile) else: accession = pf rp = regionprops(labels) rp = [x for x in rp if min_size <= x.area <= max_size] nb_labels = len(rp) logging.debug("A total of {0} objects identified.".format(nb_labels)) objects = [] for i, props in enumerate(rp): i += 1 if i > opts.count: break y0, x0 = props.centroid orientation = props.orientation major, minor = props.major_axis_length, props.minor_axis_length major_dx = cos(orientation) * major / 2 major_dy = sin(orientation) * major / 2 minor_dx = sin(orientation) * minor / 2 minor_dy = cos(orientation) * minor / 2 ax2.plot((x0 - major_dx, x0 + major_dx), (y0 + major_dy, y0 - major_dy), ) ax2.plot((x0 - minor_dx, x0 + minor_dx), (y0 - minor_dy, y0 + minor_dy), ) npixels = int(props.area) d = min(int(round(minor / 2 * .35)) + 1, 50) x0d, y0d = int(round(x0)), int(round(y0)) square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)] pixels = [] for row in square: pixels.extend(row) logging.debug("Seed format(i, npixels, len(pixels), 100. * npixels / canvas_size)) rgb = pixel_stats(pixels) objects.append(Seed(filename, accession, i, rgb, props, exif)) minr, minc, maxr, maxc = props.bbox rect = Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, ec=, lw=1) ax3.add_patch(rect) mc, mr = (minc + maxc) / 2, (minr + maxr) / 2 ax3.text(mc, mr, "{0}".format(i), color=, ha="center", va="center", size=6) for ax in (ax2, ax3): ax.set_xlim(0, h) ax.set_ylim(w, 0) ax4.text(.1, .92, "File: {0}".format(latex(filename)), color=) ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color=) yy = .8 fw = must_open(opts.outfile, "w") if not opts.noheader: print(Seed.header(calibrate=calib), file=fw) for o in objects: if calib: o.calibrate(pixel_cm_ratio, tr) print(o, file=fw) i = o.seedno if i > 7: continue ax4.text(.01, yy, str(i), va="center", bbox=dict(fc=, ec=)) ax4.text(.1, yy, o.pixeltag, va="center") yy -= .04 ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0, fc=rgb_to_hex(o.rgb))) ax4.text(.27, yy, o.hashtag, va="center") yy -= .06 ax4.text(.1 , yy, "(A total of {0} objects displayed)".format(nb_labels), color="darkslategrey") normalize_axes(ax4) for ax in (ax1, ax2, ax3): xticklabels = [int(x) for x in ax.get_xticks()] yticklabels = [int(x) for x in ax.get_yticks()] ax.set_xticklabels(xticklabels, family=, size=8) ax.set_yticklabels(yticklabels, family=, size=8) image_name = op.join(outdir, pf + "." + iopts.format) savefig(image_name, dpi=iopts.dpi, iopts=iopts) return objects
%prog seeds [pngfile|jpgfile] Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
22,547
def source(self, format=, accessible=False): if accessible: return self.http.get().value return self.http.get(+format).value
Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json'
22,548
def append(self, *args): self.args.append(args) if self.started: self.started = False return self.length()
add arguments to the set
22,549
def pad_z(pts, value=0.0): pts = np.asarray(pts) if pts.shape[-1] < 3: if len(pts.shape) < 2: return np.asarray((pts[0], pts[1], value), dtype=pts.dtype) pad_col = np.full(len(pts), value, dtype=pts.dtype) pts = np.asarray((pts.T[0], pts.T[1], pad_col)).T return pts
Adds a Z component from `pts` if it is missing. The value defaults to `value` (0.0)
22,550
def distance(self, other): from numpy.linalg import norm if isinstance(other, one): other = other.center return norm(self.center - asarray(other), ord=2)
Distance between the center of this region and another. Parameters ---------- other : one region, or array-like Either another region, or the center of another region.
22,551
def footprints_from_place(place, footprint_type=, retain_invalid=False): city = gdf_from_place(place) polygon = city[].iloc[0] return create_footprints_gdf(polygon, retain_invalid=retain_invalid, footprint_type=footprint_type)
Get footprints within the boundaries of some place. The query must be geocodable and OSM must have polygon boundaries for the geocode result. If OSM does not have a polygon for this place, you can instead get its footprints using the footprints_from_address function, which geocodes the place name to a point and gets the footprints within some distance of that point. Parameters ---------- place : string the query to geocode to get geojson boundary polygon footprint_type : string type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc. retain_invalid : bool if False discard any footprints with an invalid geometry Returns ------- GeoDataFrame
22,552
def _bp_static_url(blueprint): u = six.u( % (blueprint.url_prefix or , blueprint.static_url_path or )) return u
builds the absolute url path for a blueprint's static folder
22,553
def extraction_data_statistics(path): with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() X, y = extraction.return_main_dataset() functions.verify_dataset(X, y) if extraction.test_dataset[] == : X, X_test, y, y_test = train_test_split( X, y, test_size=extraction.test_dataset[], random_state=extraction.test_dataset[], stratify=y ) elif extraction.test_dataset[] == : if not in extraction.test_dataset or not extraction.test_dataset[]: raise exceptions.UserError() extraction_code = extraction.test_dataset["source"] extraction_function = functions.\ import_object_from_string_code(extraction_code, "extract_test_dataset") X_test, y_test = extraction_function() else: X_test, y_test = None, None extraction_code = extraction.meta_feature_generation[] return_splits_iterable = functions.import_object_from_string_code( extraction_code, ) number_of_splits = 0 test_indices = [] try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits += 1 test_indices.append(test_idx) except Exception as e: raise exceptions.UserError(, exception_message=str(e)) test_indices = np.concatenate(test_indices) X, y = X[test_indices], y[test_indices] extraction_code = extraction.stacked_ensemble_cv[] return_splits_iterable = functions.import_object_from_string_code( extraction_code, ) number_of_splits_stacked_cv = 0 try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits_stacked_cv += 1 except Exception as e: raise exceptions.UserError(, exception_message=str(e)) data_stats = dict() data_stats[] = functions.verify_dataset(X, y) if X_test is not None: data_stats[] = functions.verify_dataset(X_test, y_test) else: data_stats[] = None data_stats[] = {: number_of_splits} data_stats[] = {: number_of_splits_stacked_cv} extraction.data_statistics = data_stats session.add(extraction) session.commit()
Generates data statistics for the given data extraction setup stored in Xcessiv notebook. This is in rqtasks.py but not as a job yet. Temporarily call this directly while I'm figuring out Javascript lel. Args: path (str, unicode): Path to xcessiv notebook
22,554
def _sending_task(self, backend): with self.backend_mutex: self.backends[backend] += 1 self.task_counter[backend] += 1 this_task = self.task_counter[backend] return this_task
Used internally to safely increment `backend`s task count. Returns the overall count of tasks for `backend`.
22,555
def get_objs_from_record(self, record, key): uids = self.get_uids_from_record(record, key) objs = map(self.get_object_by_uid, uids) return dict(zip(uids, objs))
Returns a mapping of UID -> object
22,556
def _checkpoint(self, stage): if stage is None: return False try: is_checkpoint = stage.checkpoint except AttributeError: if hasattr(stage, "__call__"): stage = stage.__name__ else: if os.path.isabs(stage): check_fpath = stage else: check_fpath = checkpoint_filepath(stage, pm=self) return self._touch_checkpoint(check_fpath)
Decide whether to stop processing of a pipeline. This is the hook A pipeline can report various "checkpoints" as sort of status markers that designate the logical processing phase that's just been completed. The initiation of a pipeline can preordain one of those as a "stopping point" that when reached, should stop the pipeline's execution. :param pypiper.Stage | str stage: Pipeline processing stage/phase just completed. :return bool: Whether a checkpoint was created (i.e., whether it didn't already exist) :raise ValueError: If the stage is specified as an absolute filepath, and that path indicates a location that's not immediately within the main output folder, raise a ValueError.
22,557
def readline(self, size=None): if size is not None and size < 0: raise ValueError() if size is not None and size > self._MAXIMUM_READ_BUFFER_SIZE: raise ValueError() if not self._lines: if self._lines_buffer_offset >= self._file_object_size: return read_size = size if not read_size: read_size = self._MAXIMUM_READ_BUFFER_SIZE if self._lines_buffer_offset + read_size > self._file_object_size: read_size = self._file_object_size - self._lines_buffer_offset self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET) read_buffer = self._file_object.read(read_size) self._lines_buffer_offset += len(read_buffer) self._lines = read_buffer.split(self._end_of_line) if self._lines_buffer: self._lines[0] = b.join([self._lines_buffer, self._lines[0]]) self._lines_buffer = b if read_buffer[self._end_of_line_length:] != self._end_of_line: self._lines_buffer = self._lines.pop() for index, line in enumerate(self._lines): self._lines[index] = b.join([line, self._end_of_line]) if (self._lines_buffer and self._lines_buffer_offset >= self._file_object_size): self._lines.append(self._lines_buffer) self._lines_buffer = b if not self._lines: line = self._lines_buffer self._lines_buffer = b elif not size or size >= len(self._lines[0]): line = self._lines.pop(0) else: line = self._lines[0] self._lines[0] = line[size:] line = line[:size] last_offset = self._current_offset self._current_offset += len(line) decoded_line = line.decode(self._encoding) if last_offset == 0 and decoded_line[0] == : decoded_line = decoded_line[1:] return decoded_line
Reads a single line of text. The functions reads one entire line from the file-like object. A trailing end-of-line indicator (newline by default) is kept in the string (but may be absent when a file ends with an incomplete line). An empty string is returned only when end-of-file is encountered immediately. Args: size (Optional[int]): maximum byte size to read. If present and non-negative, it is a maximum byte count (including the trailing end-of-line) and an incomplete line may be returned. Returns: str: line of text. Raises: UnicodeDecodeError: if a line cannot be decoded. ValueError: if the size is smaller than zero or exceeds the maximum (as defined by _MAXIMUM_READ_BUFFER_SIZE).
22,558
def default_privileges_revoke(name, object_name, object_type, defprivileges=None, prepend=, maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): * object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if not has_default_privileges(name, object_name, object_type, defprivileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info( , object_name, object_type, defprivileges) return False _grants = .join(_defprivs) if object_type in [, ]: on_part = .format(prepend, object_name) else: on_part = object_name if object_type == : query = .format(object_name, name) else: query = .format( _grants, object_type.upper(), prepend, name) ret = _psql_prepare_and_run([, query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret[] == 0
.. versionadded:: 2019.0.0 Revoke default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_revoke user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role whose default privileges should be revoked object_name Name of the object on which the revoke is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to revoke, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
22,559
def path_to(self, p): if os.path.isabs(p): return p return os.sep.join([self._original_dir, p])
Returns the absolute path to a given relative path.
22,560
def record_drop_duplicate_fields(record): out = {} position = 0 tags = sorted(record.keys()) for tag in tags: fields = record[tag] out[tag] = [] current_fields = set() for full_field in fields: field = (tuple(full_field[0]),) + full_field[1:4] if field not in current_fields: current_fields.add(field) position += 1 out[tag].append(full_field[:4] + (position,)) return out
Return a record where all the duplicate fields have been removed. Fields are considered identical considering also the order of their subfields.
22,561
def plot_colormap_components(cmap): from ._helpers import set_ax_labels plt.figure(figsize=[8, 4]) gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05) ax = plt.subplot(gs[0]) gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.) ax.set_title(cmap.name, fontsize=20) ax.set_axis_off() ax = plt.subplot(gs[1]) x = np.arange(cmap.N) colors = cmap(x) r = colors[:, 0] g = colors[:, 1] b = colors[:, 2] RGB_weight = [0.299, 0.587, 0.114] k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight)) r.clip(0, 1, out=r) g.clip(0, 1, out=g) b.clip(0, 1, out=b) xi = np.linspace(0, 1, x.size) plt.plot(xi, r, "r", linewidth=5, alpha=0.6) plt.plot(xi, g, "g", linewidth=5, alpha=0.6) plt.plot(xi, b, "b", linewidth=5, alpha=0.6) plt.plot(xi, k, "k", linewidth=5, alpha=0.6) ax.set_xlim(0, 1) ax.set_ylim(-0.1, 1.1) set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity") cmap = grayify_cmap(cmap) ax = plt.subplot(gs[2]) gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.) ax.set_axis_off()
Plot the components of a given colormap.
22,562
def _search(mapping, filename): result = mapping.get(filename) if result is not None: return result name, ext = os.path.splitext(filename) result = mapping.get(ext) if result is not None: for pattern, result2 in result: if fnmatch(filename, pattern): return result2 return None
Search a Loader data structure for a filename.
22,563
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."): if not items: return items else: shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]] if len(items) > max_items: shortened.append(indicator) return joiner.join(shortened)
Abbreviate a list, truncating each element and adding an indicator at the end if the whole list was truncated. Set item_max_len to None or 0 not to truncate items.
22,564
def sudoers(self, enable): f_sudoers = "/isan/vdc_1/virtual-instance/guestshell+/rootfs/etc/sudoers" if enable is True: sed_cmd = r" " elif enable is False: sed_cmd = r" " else: raise RuntimeError() self.guestshell("run bash sudo sed -i" + sed_cmd + f_sudoers)
This method is used to enable/disable bash sudo commands running through the guestshell virtual service. By default sudo access is prevented due to the setting in the 'sudoers' file. Therefore the setting must be disabled in the file to enable sudo commands. This method assumes that the "bash-shell" feature is enabled. @@@ TO-DO: have a mech to check &| control bash-shell feature support :param enable: True - enables sudo commands False - disables sudo commands :return: returns the response of the sed command needed to make the file change
22,565
async def list_pools() -> None: logger = logging.getLogger(__name__) logger.debug("list_pools: >>> ") if not hasattr(list_pools, "cb"): logger.debug("list_pools: Creating callback") list_pools.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) res = await do_call(, list_pools.cb) res = json.loads(res.decode()) logger.debug("list_pools: <<< res: %r", res) return res
Lists names of created pool ledgers :return: Error code
22,566
def rewrite(self, source_bucket, source_object, destination_bucket, destination_object=None): destination_object = destination_object or source_object if (source_bucket == destination_bucket and source_object == destination_object): raise ValueError( % (source_bucket, source_object)) if not source_bucket or not source_object: raise ValueError() client = self.get_conn() source_bucket = client.get_bucket(bucket_name=source_bucket) source_object = source_bucket.blob(blob_name=source_object) destination_bucket = client.get_bucket(bucket_name=destination_bucket) token, bytes_rewritten, total_bytes = destination_bucket.blob( blob_name=destination_object).rewrite( source=source_object ) self.log.info(, total_bytes, bytes_rewritten) while token is not None: token, bytes_rewritten, total_bytes = destination_bucket.blob( blob_name=destination_object).rewrite( source=source_object, token=token ) self.log.info(, total_bytes, bytes_rewritten) self.log.info(, source_object.name, source_bucket.name, destination_object, destination_bucket.name)
Has the same functionality as copy, except that will work on files over 5 TB, as well as when copying between locations and/or storage classes. destination_object can be omitted, in which case source_object is used. :param source_bucket: The bucket of the object to copy from. :type source_bucket: str :param source_object: The object to copy. :type source_object: str :param destination_bucket: The destination of the object to copied to. :type destination_bucket: str :param destination_object: The (renamed) path of the object if given. Can be omitted; then the same name is used. :type destination_object: str
22,567
def get_html(self): here = path.abspath(path.dirname(__file__)) env = Environment(loader=FileSystemLoader(path.join(here, "res/"))) suggest = env.get_template("suggest.htm.j2") return suggest.render( logo=path.join(here, "res/logo.png"), user_login=self.user, repos=self.repos, )
Method to convert the repository list to a search results page.
22,568
def compile_file(source, globals_=None): if isinstance(source, gast.AST): source = quoting.to_source(source) tempdir = tempfile.mkdtemp() uuid = str(uuid4().hex[:4]) tmpname = os.path.join(tempdir, % uuid) with open(tmpname, ) as f: f.write(source) module_name = % uuid if six.PY3: spec = util.spec_from_file_location(module_name, tmpname) m = util.module_from_spec(spec) spec.loader.exec_module(m) else: m = imp.load_source(module_name, tmpname) if globals_: m.__dict__.update(globals_) return m
Compile by saving to file and importing that. Compiling the AST/source code this way ensures that the source code is readable by e.g. `pdb` or `inspect`. Args: source: The code to compile, either as a string or as an AST. globals_: A dictionary of variables that should be available as globals in the compiled module. They will be monkey patched after importing the module. Returns: A module object containing the compiled source code.
22,569
def check(text): err = "spelling.athletes" msg = "Misspelling of athlete{}' is the preferred form." misspellings = [ ["Dwyane Wade", ["Dwayne Wade"]], ["Miikka Kiprusoff", ["Mikka Kiprusoff"]], ["Mark Buehrle", ["Mark Buerhle"]], ["Skylar Diggins", ["Skyler Diggins"]], ["Agnieszka Radwanska", ["Agnieska Radwanska"]], ["J.J. Redick", ["J.J. Reddick"]], ["Manny Pacquiao", ["Manny Packquaio"]], ["Antawn Jamison", ["Antwan Jamison"]], ["Cal Ripken", ["Cal Ripkin"]], ["Jhonny Peralta", ["Johnny Peralta"]], ["Monta Ellis", ["Monte Ellis"]], ["Alex Rodriguez", ["Alex Rodriquez"]], ["Mark Teixeira", ["Mark Texeira"]], ["Brett Favre", ["Brett Farve"]], ["Torii Hunter", ["Tori Hunter"]], ["Stephen Curry", ["Stephon Curry"]], ["Mike Krzyzewski", ["Mike Kryzewski"]], ] return preferred_forms_check(text, misspellings, err, msg)
Suggest the preferred forms.
22,570
def get_dict_registry_services(registry, template_files, warn_missing_files=True): with open(registry) as fr: parsed_registry = json.load(fr) services = {} for type, type_services in parsed_registry.iteritems(): for name, service in type_services.iteritems(): if name in services: logger.warning("Template name appears twice, ignoring later items: `%s`", name) continue template_file = get_matching_service_template_file(name, template_files) if not template_file: if warn_missing_files: logger.warning("No template file for `%s` (%s) `%s`", type, service[], name) continue services[name] = { : type, : template_file, : service[] } return services
Return a dict mapping service name to a dict containing the service's type ('fixtures', 'platform_services', 'application_services', 'internal_services'), the template file's absolute path, and a list of environments to which the service is intended to deploy. Service names that appear twice in the output list will emit a warning and ignore the latter records. Services which have no template file will not appear in the returned dict. If the `warn_missing_files` boolean is True these files will emit a warning.
22,571
def _unpack_truisms(self, c): try: op = getattr(self, +c.op) except AttributeError: return set() return op(c)
Given a constraint, _unpack_truisms() returns a set of constraints that must be True this constraint to be True.
22,572
def get_instance(self, payload): return UserChannelInstance( self._version, payload, service_sid=self._solution[], user_sid=self._solution[], )
Build an instance of UserChannelInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance :rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
22,573
def editpermissions_anonymous_user_view(self, request, forum_id=None): forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None context = self.get_forum_perms_base_context(request, forum) context[] = forum context[] = .format(_(), _()) context[] = self._get_permissions_form( request, UserForumPermission, {: forum, : True}, ) return render(request, self.editpermissions_anonymous_user_view_template_name, context)
Allows to edit anonymous user permissions for the considered forum. The view displays a form to define which permissions are granted for the anonymous user for the considered forum.
22,574
def _get_gather_offset(self, size): gather_size = np.zeros(size).astype(int) gather_offset = np.zeros(size).astype(int) num_local_subjs = np.zeros(size).astype(int) subject_map = {} for idx, s in enumerate(np.arange(self.n_subj)): cur_rank = idx % size gather_size[cur_rank] += self.prior_size subject_map[idx] = (cur_rank, num_local_subjs[cur_rank]) num_local_subjs[cur_rank] += 1 for idx in np.arange(size - 1) + 1: gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1] tuple_size = tuple(gather_size) tuple_offset = tuple(gather_offset) return tuple_size, tuple_offset, subject_map
Calculate the offset for gather result from this process Parameters ---------- size : int The total number of process. Returns ------- tuple_size : tuple_int Number of elements to send from each process (one integer for each process) tuple_offset : tuple_int Number of elements away from the first element in the array at which to begin the new, segmented array for a process (one integer for each process) subject_map : dictionary Mapping between global subject id to local id
22,575
def member_del(self, cluster_id, member_id): cluster = self._storage[cluster_id] result = cluster.member_remove(member_id) self._storage[cluster_id] = cluster return result
remove member from cluster cluster
22,576
def step_a_file_named_filename_with(context, filename): step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8") if filename.endswith(".feature"): command_util.ensure_context_attribute_exists(context, "features", []) context.features.append(filename)
Creates a textual file with the content provided as docstring.
22,577
def get_feedback_from_submission(self, submission, only_feedback=False, show_everything=False, translation=gettext.NullTranslations()): if only_feedback: submission = {"text": submission.get("text", None), "problems": dict(submission.get("problems", {}))} if "text" in submission: submission["text"] = ParsableText(submission["text"], submission["response_type"], show_everything, translation).parse() if "problems" in submission: for problem in submission["problems"]: if isinstance(submission["problems"][problem], str): submission["problems"][problem] = (submission.get(, ), ParsableText(submission["problems"][problem], submission["response_type"], show_everything, translation).parse()) else: submission["problems"][problem] = (submission["problems"][problem][0], ParsableText(submission["problems"][problem][1], submission["response_type"], show_everything, translation).parse()) return submission
Get the input of a submission. If only_input is False, returns the full submissions with a dictionnary object at the key "input". Else, returns only the dictionnary. If show_everything is True, feedback normally hidden is shown.
22,578
def rename_bika_setup(): logger.info("Renaming Bika Setup...") bika_setup = api.get_bika_setup() bika_setup.setTitle("Setup") bika_setup.reindexObject() setup = api.get_portal().portal_setup setup.runImportStepFromProfile(, )
Rename Bika Setup to just Setup to avoid naming confusions for new users
22,579
def post(self, document): if type(document) is dict: document = [document] return self.make_request(method=, uri=, data=document)
Send to API a document or a list of document. :param document: a document or a list of document. :type document: dict or list :return: Message with location of job :rtype: dict :raises ValidationError: if API returns status 400 :raises Unauthorized: if API returns status 401 :raises Forbidden: if API returns status 403 :raises NotFound: if API returns status 404 :raises ApiError: if API returns other status
22,580
def insertLink(page, lnk, mark = True): CheckParent(page) annot = getLinkText(page, lnk) if annot == "": raise ValueError("link kind not supported") page._addAnnot_FromString([annot]) return
Insert a new link for the current page.
22,581
def main(self,argv=None): parser = optparse.OptionParser(usage=USAGE % self.__class__.__name__) newopt = parser.add_option newopt(,,action=,default=False, help=) opts,args = parser.parse_args(argv) if len(args) != 1: print >> sys.stderr,"You must supply exactly one file to run." sys.exit(1) self.run_file(args[0],opts.interact)
Run as a command-line script.
22,582
def valid_kdf(self, kdf): if kdf.input_length is None: return True if self.data_key_length > kdf.input_length(self): raise InvalidAlgorithmError( "Invalid Algorithm definition: data_key_len must not be greater than kdf_input_len" ) return True
Determine whether a KDFSuite can be used with this EncryptionSuite. :param kdf: KDFSuite to evaluate :type kdf: aws_encryption_sdk.identifiers.KDFSuite :rtype: bool
22,583
async def inspect(self, *, node_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="GET" ) return response
Inspect a node Args: node_id: The ID or name of the node
22,584
def system_config_dir(): r if LINUX or SUNOS: path = elif BSD or MACOS: path = else: path = os.environ.get() if path is None: path = else: path = os.path.join(path, ) return path
r"""Return the system-wide config dir (full path). - Linux, SunOS: /etc/glances - *BSD, macOS: /usr/local/etc/glances - Windows: %APPDATA%\glances
22,585
def get_properties(properties, identifier, namespace=, searchtype=None, as_dataframe=False, **kwargs): if isinstance(properties, text_types): properties = properties.split() properties = .join([PROPERTY_MAP.get(p, p) for p in properties]) properties = % properties results = get_json(identifier, namespace, , properties, searchtype=searchtype, **kwargs) results = results[][] if results else [] if as_dataframe: import pandas as pd return pd.DataFrame.from_records(results, index=) return results
Retrieve the specified properties from PubChem. :param identifier: The compound, substance or assay identifier to use as a search query. :param namespace: (optional) The identifier type. :param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity. :param as_dataframe: (optional) Automatically extract the properties into a pandas :class:`~pandas.DataFrame`.
22,586
def buildErrorResponse(self, request, error=None): if error is not None: cls, e, tb = error else: cls, e, tb = sys.exc_info() return remoting.Response(build_fault(cls, e, tb, self.gateway.debug), status=remoting.STATUS_ERROR)
Builds an error response. @param request: The AMF request @type request: L{Request<pyamf.remoting.Request>} @return: The AMF response @rtype: L{Response<pyamf.remoting.Response>}
22,587
def max_interval_intersec(S): B = ([(left, +1) for left, right in S] + [(right, -1) for left, right in S]) B.sort() c = 0 best = (c, None) for x, d in B: c += d if best[0] < c: best = (c, x) return best
determine a value that is contained in a largest number of given intervals :param S: list of half open intervals :complexity: O(n log n), where n = len(S)
22,588
def add_sun_flare(img, flare_center_x, flare_center_y, src_radius, src_color, circles): non_rgb_warning(img) input_dtype = img.dtype needs_float = False if input_dtype == np.float32: img = from_float(img, dtype=np.dtype()) needs_float = True elif input_dtype not in (np.uint8, np.float32): raise ValueError(.format(input_dtype)) overlay = img.copy() output = img.copy() for (alpha, (x, y), rad3, (r_color, g_color, b_color)) in circles: cv2.circle(overlay, (x, y), rad3, (r_color, g_color, b_color), -1) cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output) point = (int(flare_center_x), int(flare_center_y)) overlay = output.copy() num_times = src_radius // 10 alpha = np.linspace(0.0, 1, num=num_times) rad = np.linspace(1, src_radius, num=num_times) for i in range(num_times): cv2.circle(overlay, point, int(rad[i]), src_color, -1) alp = alpha[num_times - i - 1] * alpha[num_times - i - 1] * alpha[num_times - i - 1] cv2.addWeighted(overlay, alp, output, 1 - alp, 0, output) image_rgb = output if needs_float: image_rgb = to_float(image_rgb, max_value=255) return image_rgb
Add sun flare. From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library Args: img (np.array): flare_center_x (float): flare_center_y (float): src_radius: src_color (int, int, int): circles (list): Returns:
22,589
def post(self, url, entity): try: if entity is None or entity == "": jsonData = "" else: jsonData = entity.to_json() except Exception as e: jsonData = jsonpickle.pickler.encode(entity) response = requests.post( self.host + url, jsonData, headers={ "Content-Type": "application/json", : + self.token, :self.sourceHeader }, verify=False ) if response.status_code == 201: try: return json.loads(response._content.decode()) except Exception as e: return json.loads(response.content) elif response.status_code == 409: try: return json.loads(response._content.decode()) except Exception as e: return json.loads(response.content) elif response.status_code == 401: raise Exception(json.dumps({:})) else: raise Exception(response.content)
To make a POST request to Falkonry API server :param url: string :param entity: Instantiated class object
22,590
def getSpecialPrice(self, product, store_view=None, identifierType=None): return self.call( , [ product, store_view, identifierType ] )
Get product special price data :param product: ID or SKU of product :param store_view: ID or Code of Store view :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: Dictionary
22,591
def parse_scwrl_out(scwrl_std_out, scwrl_pdb): score = re.findall( r, scwrl_std_out)[0] split_scwrl = scwrl_pdb.splitlines() fixed_scwrl = [] for line in split_scwrl: if len(line) < 80: line += * (80 - len(line)) if re.search(r, line): front = line[:61] temp_factor = back = line[66:] fixed_scwrl.append(.join([front, temp_factor, back])) else: fixed_scwrl.append(line) fixed_scwrl_str = .join(fixed_scwrl) + return fixed_scwrl_str, float(score)
Parses SCWRL output and returns PDB and SCWRL score. Parameters ---------- scwrl_std_out : str Std out from SCWRL. scwrl_pdb : str String of packed SCWRL PDB. Returns ------- fixed_scwrl_str : str String of packed SCWRL PDB, with correct PDB format. score : float SCWRL Score
22,592
def move(self, d_xyz, inplace=False): state = Polygon.verify Polygon.verify = False if len(d_xyz) == 2: d_xyz = (d_xyz[0], d_xyz[1], 0) xyz = np.array(d_xyz) map_ = self.get_map()[1] + xyz space = self.map2pyny(map_) Polygon.verify = state if inplace: self.add_spaces(space) return None else: return space
Translate the whole Space in x, y and z coordinates. :param d_xyz: displacement in x, y(, and z). :type d_xyz: tuple (len=2 or 3) :param inplace: If True, the moved ``pyny.Space`` is copied and added to the current ``pyny.Space``. If False, it returns the new ``pyny.Space``. :type inplace: bool :returns: None, ``pyny.Space``
22,593
def enter(clsQname): def wrapper(routeHandler): @functools.wraps(routeHandler) def inner(self, request, *a, **kw): if getattr(inner, , None) is None: cls = namedAny(clsQname) inner._subKlein = cls().app.resource() return routeHandler(self, request, inner._subKlein, *a, **kw) inner._subKleinQname = clsQname return inner return wrapper
Delegate a rule to another class which instantiates a Klein app This also memoizes the resource instance on the handler function itself
22,594
def delete_record(self, record): try: self.session.delete(record) self.session.commit() except Exception as e: self.session.rollback() raise ProgrammingError(e) finally: self.session.close()
Permanently removes record from table.
22,595
def get_linked(self): linked_devices = {} self.logger.info("\nget_linked") self.direct_command_hub() sleep(1) self.get_buffer_status() msgs = self.buffer_status.get(, []) for entry in msgs: im_code = entry.get(, ) if im_code == : device_id = entry.get(, ) + entry.get(, ) \ + entry.get(, ) group = entry.get(, ) if device_id not in linked_devices: dev_info = self.id_request(device_id) dev_cat = dev_info.get(, ) dev_sub_cat = dev_info.get(, ) dev_cat_record = self.get_device_category(dev_cat) if dev_cat_record and in dev_cat_record: dev_cat_name = dev_cat_record[] dev_cat_type = dev_cat_record[] else: dev_cat_name = dev_cat_type = linked_dev_model = self.get_device_model(dev_cat, dev_sub_cat) if in linked_dev_model: dev_model_name = linked_dev_model[] else: dev_model_name = if in linked_dev_model: dev_sku = linked_dev_model[] else: dev_sku = self.logger.info("get_linked: Got first device: %s group %s " "cat type %s cat name %s dev model name %s", device_id, group, dev_cat_type, dev_cat_name, dev_model_name) linked_devices[device_id] = { : dev_cat_name, : dev_cat_type, : dev_model_name, : dev_cat, : dev_sub_cat, : dev_sku, : [] } linked_devices[device_id][].append(group) while self.buffer_status[]: self.direct_command_hub() sleep(1) self.get_buffer_status() msgs = self.buffer_status.get(, []) for entry in msgs: im_code = entry.get(, ) if im_code == : device_id = entry.get(, ) + entry.get(, ) \ + entry.get(, ) group = entry.get(, ) if device_id not in linked_devices: dev_info = self.id_request(device_id) dev_cat = dev_info.get(, ) dev_sub_cat = dev_info.get(, ) dev_cat_record = self.get_device_category(dev_cat) if dev_cat_record and in dev_cat_record: dev_cat_name = dev_cat_record[] dev_cat_type = dev_cat_record[] else: dev_cat_name = dev_cat_type = linked_dev_model = self.get_device_model(dev_cat, dev_sub_cat) if in linked_dev_model: dev_model_name = linked_dev_model[] else: dev_model_name = if in linked_dev_model: dev_sku = linked_dev_model[] else: dev_sku = self.logger.info("get_linked: Got device: %s group %s " + "cat type %s cat name %s dev model name %s", device_id, group, dev_cat_type, dev_cat_name, dev_model_name) linked_devices[device_id] = { : dev_cat_name, : dev_cat_type, : dev_model_name, : dev_cat, : dev_sub_cat, : dev_sku, : [] } linked_devices[device_id][].append(group) self.logger.info("get_linked: Final device list: %s", pprint.pformat(linked_devices)) return linked_devices
Get a list of currently linked devices from the hub
22,596
def reveal(input_image_file): from base64 import b64decode from zlib import decompress img = tools.open_image(input_image_file) try: if img.format in ["JPEG", "TIFF"]: if "exif" in img.info: exif_dict = piexif.load(img.info.get("exif", b"")) description_key = piexif.ImageIFD.ImageDescription encoded_message = exif_dict["0th"][description_key] else: encoded_message = b"" else: raise ValueError("Given file is neither JPEG nor TIFF.") finally: img.close() return b64decode(decompress(encoded_message))
Find a message in an image.
22,597
def check_child_friendly(self, name): name = name.split()[0] if name in self.container_modules: return root = os.path.dirname(os.path.realpath(__file__)) module_path = os.path.join(root, "modules") try: info = imp.find_module(name, [module_path]) except ImportError: return if not info: return (file, pathname, description) = info try: py_mod = imp.load_module(name, file, pathname, description) except Exception: return try: container = py_mod.Py3status.Meta.container except AttributeError: container = False del py_mod if container: self.container_modules.append(name) else: self.error("Module `{}` cannot contain others".format(name))
Check if a module is a container and so can have children
22,598
def get_plot(self, units=, ymin=None, ymax=None, width=None, height=None, dpi=None, plt=None, fonts=None, dos=None, dos_aspect=3, color=None, style=None, no_base_style=False): if color is None: color = if dos is not None: plt = pretty_subplot(1, 2, width=width, height=height, sharex=False, sharey=True, dpi=dpi, plt=plt, gridspec_kw={: [dos_aspect, 1], : 0}) ax = plt.gcf().axes[0] else: plt = pretty_plot(width, height, dpi=dpi, plt=plt) ax = plt.gca() data = self.bs_plot_data() dists = data[] freqs = data[] for nd, nb in itertools.product(range(len(data[])), range(self._nb_bands)): f = freqs[nd][nb] ax.plot(dists[nd], f, ls=, c=color, zorder=1) self._maketicks(ax, units=units) self._makeplot(ax, plt.gcf(), data, width=width, height=height, ymin=ymin, ymax=ymax, dos=dos, color=color) plt.tight_layout() plt.subplots_adjust(wspace=0) return plt
Get a :obj:`matplotlib.pyplot` object of the phonon band structure. Args: units (:obj:`str`, optional): Units of phonon frequency. Accepted (case-insensitive) values are Thz, cm-1, eV, meV. ymin (:obj:`float`, optional): The minimum energy on the y-axis. ymax (:obj:`float`, optional): The maximum energy on the y-axis. width (:obj:`float`, optional): The width of the plot. height (:obj:`float`, optional): The height of the plot. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. dos (:obj:`np.ndarray`): 2D Numpy array of total DOS data dos_aspect (float): Width division for vertical DOS color (:obj:`str` or :obj:`tuple`, optional): Line/fill colour in any matplotlib-accepted format style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. Returns: :obj:`matplotlib.pyplot`: The phonon band structure plot.
22,599
def _initialize_cfg(self): self.kb.functions = FunctionManager(self.kb) self._jobs_to_analyze_per_function = defaultdict(set) self._completed_functions = set()
Re-create the DiGraph