Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
377,600
def _build(self, input_sequence, state): input_shape = input_sequence.get_shape() if input_shape[0] is None: raise ValueError("Time dimension of input (dim 0) must be statically" "known.") seq_length = int(input_shape[0]) forward_state, backward_state = state output_sequence_f = [] output_sequence_b = [] with tf.name_scope("forward_rnn"): core_state = forward_state for i in six.moves.range(seq_length): core_output, core_state = self._forward_core( input_sequence[i, :,], core_state) output_sequence_f.append((core_output, core_state)) output_sequence_f = nest.map_structure( lambda *vals: tf.stack(vals), *output_sequence_f) with tf.name_scope("backward_rnn"): core_state = backward_state for i in six.moves.range(seq_length - 1, -1, -1): core_output, core_state = self._backward_core( input_sequence[i, :,], core_state) output_sequence_b.append((core_output, core_state)) output_sequence_b = nest.map_structure( lambda *vals: tf.stack(vals), *output_sequence_b) return { "outputs": { "forward": output_sequence_f[0], "backward": output_sequence_b[0] }, "state": { "forward": output_sequence_f[1], "backward": output_sequence_b[1] } }
Connects the BidirectionalRNN module into the graph. Args: input_sequence: tensor (time, batch, [feature_1, ..]). It must be time_major. state: tuple of states for the forward and backward cores. Returns: A dict with forward/backard states and output sequences: "outputs":{ "forward": ..., "backward": ...}, "state": { "forward": ..., "backward": ...} Raises: ValueError: in case time dimension is not statically known.
377,601
def max_cardinality_heuristic(G): adj = {v: set(G[v]) for v in G} num_nodes = len(adj) order = [0] * num_nodes upper_bound = 0 labelled_neighbors = {v: 0 for v in adj} for i in range(num_nodes): v = max(labelled_neighbors, key=lambda u: labelled_neighbors[u] + random()) del labelled_neighbors[v] for u in adj[v]: if u in labelled_neighbors: labelled_neighbors[u] += 1 order[-(i + 1)] = v for v in order: dv = len(adj[v]) if dv > upper_bound: upper_bound = dv _elim_adj(adj, v) return upper_bound, order
Computes an upper bound on the treewidth of graph G based on the max-cardinality heuristic for the elimination ordering. Parameters ---------- G : NetworkX graph The graph on which to compute an upper bound for the treewidth. inplace : bool If True, G will be made an empty graph in the process of running the function, otherwise the function uses a copy of G. Returns ------- treewidth_upper_bound : int An upper bound on the treewidth of the graph G. order : list An elimination order that induces the treewidth. Examples -------- This example computes an upper bound for the treewidth of the :math:`K_4` complete graph. >>> import dwave_networkx as dnx >>> import networkx as nx >>> K_4 = nx.complete_graph(4) >>> dnx.max_cardinality_heuristic(K_4) (3, [3, 1, 0, 2]) References ---------- Based on the algorithm presented in [GD]_
377,602
def getActiveSegment(self, c, i, timeStep): nSegments = len(self.cells[c][i]) bestActivation = self.activationThreshold which = -1 for j,s in enumerate(self.cells[c][i]): activity = self.getSegmentActivityLevel(s, self.activeState[timeStep], connectedSynapsesOnly = True) if activity >= bestActivation: bestActivation = activity which = j if which != -1: return self.cells[c][i][which] else: return None
For a given cell, return the segment with the strongest _connected_ activation, i.e. sum up the activations of the connected synapses of the segments only. That is, a segment is active only if it has enough connected synapses.
377,603
def basemz(df): d = np.array(df.columns)[df.values.argmax(axis=1)] return Trace(d, df.index, name=)
The mz of the most abundant ion.
377,604
def shelter_get(self, **kwargs): root = self._do_api_call("shelter.get", kwargs) shelter = root.find("shelter") for field in shelter: record = {} for field in shelter: record[field.tag] = field.text return record
shelter.get wrapper. Given a shelter ID, retrieve its details in dict form. :rtype: dict :returns: The shelter's details.
377,605
def value(self): if in self._json_data and self._json_data[]: return "[Attachment: {}]".format(self._json_data[].split()[-1]) else: return None
Retrieve the data value of this attachment. Will show the filename of the attachment if there is an attachment available otherwise None Use save_as in order to download as a file. Example ------- >>> file_attachment_property = project.part('Bike').property('file_attachment') >>> if file_attachment_property.value: ... file_attachment_property.save_as('file.ext') ... else: ... print('file attachment not set, its value is None')
377,606
def __calculate_adjacency_lists(graph): adj = {} for node in graph.get_all_node_ids(): neighbors = graph.neighbors(node) adj[node] = neighbors return adj
Builds an adjacency list representation for the graph, since we can't guarantee that the internal representation of the graph is stored that way.
377,607
def t_doublequote_end(self, t): r t.value = t.lexer.string_value t.type = t.lexer.string_value = None t.lexer.pop_state() return t
r'"
377,608
def infer_getattr(node, context=None): obj, attr = _infer_getattr_args(node, context) if ( obj is util.Uninferable or attr is util.Uninferable or not hasattr(obj, "igetattr") ): return util.Uninferable try: return next(obj.igetattr(attr, context=context)) except (StopIteration, InferenceError, AttributeInferenceError): if len(node.args) == 3: try: return next(node.args[2].infer(context=context)) except InferenceError: raise UseInferenceDefault raise UseInferenceDefault
Understand getattr calls If one of the arguments is an Uninferable object, then the result will be an Uninferable object. Otherwise, the normal attribute lookup will be done.
377,609
def _send(self, msg, buffers=None): if self.comm is not None and self.comm.kernel is not None: self.comm.send(data=msg, buffers=buffers)
Sends a message to the model in the front-end.
377,610
def display_initialize(self): echo(self.term.home + self.term.clear) echo(self.term.move_y(self.term.height // 2)) echo(self.term.center().rstrip()) flushout() if LIMIT_UCS == 0x10000: echo() echo(self.term.blink_red(self.term.center( .format(n=LIMIT_UCS)).rstrip())) echo() flushout()
Display 'please wait' message, and narrow build warning.
377,611
async def get_entity_by_id(self, get_entity_by_id_request): response = hangouts_pb2.GetEntityByIdResponse() await self._pb_request(, get_entity_by_id_request, response) return response
Return one or more user entities. Searching by phone number only finds entities when their phone number is in your contacts (and not always even then), and can't be used to find Google Voice contacts.
377,612
def run(main=None, argv=None, **flags): import sys as _sys import inspect main = main or _sys.modules[].main if main.__doc__: docstring = main.__doc__.split()[0] _parser.usage = .format(docstring) try: a = inspect.getfullargspec(main) except AttributeError: a = inspect.getargspec(main) if a.defaults: kwargs = dict(zip(reversed(a.args), reversed(a.defaults))) add_flag(**kwargs) else: kwargs = dict() if a.defaults is None: nargs = len(a.args) else: nargs = len(a.args) - len(a.defaults) posargs = a.args[:nargs] flag.add_args(posargs) add_flag(**flags) args = argv[1:] if argv else None unparsed, kw = flag._parse_flags_kw(args=args) d = flag.__dict__[] args = [d[k] for k in posargs] args += unparsed kwargs.update({k: d[k] for k in kwargs.keys()}) kwargs.update(kw) _sys.exit(main(*args, **kwargs))
:param main: main or sys.modules['__main__'].main :param argv: argument list used in argument parse :param flags: flags to define with defaults :return:
377,613
def channels_leave(self, room_id, **kwargs): return self.__call_api_post(, roomId=room_id, kwargs=kwargs)
Causes the callee to be removed from the channel.
377,614
def _array_type_std_res(self, counts, total, colsum, rowsum): if self.mr_dim_ind == 0: total = total[:, np.newaxis] rowsum = rowsum[:, np.newaxis] expected_counts = rowsum * colsum / total variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3 return (counts - expected_counts) / np.sqrt(variance)
Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array.
377,615
def gradient(self): r functional = self class GroupL1Gradient(Operator): def __init__(self): super(GroupL1Gradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x, out): pwnorm_x = functional.pointwise_norm(x) pwnorm_x.ufuncs.sign(out=pwnorm_x) functional.pointwise_norm.derivative(x).adjoint(pwnorm_x, out=out) return out return GroupL1Gradient()
r"""Gradient operator of the functional. The functional is not differentiable in ``x=0``. However, when evaluating the gradient operator in this point it will return 0. Notes ----- The gradient is given by .. math:: \left[ \nabla \| \|f\|_1 \|_1 \right]_i = \frac{f_i}{|f_i|} .. math:: \left[ \nabla \| \|f\|_2 \|_1 \right]_i = \frac{f_i}{\|f\|_2} else: .. math:: \left[ \nabla || ||f||_p ||_1 \right]_i = \frac{| f_i |^{p-2} f_i}{||f||_p^{p-1}}
377,616
def _val_to_store_info(self, val): if isinstance(val, str): return val, 0 elif isinstance(val, int): return "%d" % val, Client._FLAG_INTEGER elif isinstance(val, long): return "%d" % val, Client._FLAG_LONG return pickle.dumps(val, protocol=pickle.HIGHEST_PROTOCOL), Client._FLAG_PICKLE
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
377,617
def full_name(self): if self._full_name is None: fn = self.name.replace(".", "\\.") parent = self._parent if parent is not None: fn = parent.full_name + "." + fn self._full_name = fn return self._full_name
Obtains the full name of the actor. :return: the full name :rtype: str
377,618
def do_execute(self): if self.storagehandler is None: return "No storage handler available!" expr = str(self.resolve_option("expression")).replace( "{X}", str(self.storagehandler.storage[str(self.resolve_option("storage_name"))])) expr = self.storagehandler.expand(expr) self.storagehandler.storage[self.resolve_option("storage_name")] = eval(expr) self._output.append(self.input) return None
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
377,619
def get_contract_factory(self, name: ContractName) -> Contract: validate_contract_name(name) if "contract_types" not in self.manifest: raise InsufficientAssetsError( "This package does not contain any contract type data." ) try: contract_data = self.manifest["contract_types"][name] except KeyError: raise InsufficientAssetsError( "This package does not contain any package data to generate " f"a contract factory for contract type: {name}. Available contract types include: " f"{ list(self.manifest[].keys()) }." ) validate_minimal_contract_factory_data(contract_data) contract_kwargs = generate_contract_factory_kwargs(contract_data) contract_factory = self.w3.eth.contract(**contract_kwargs) return contract_factory
Return the contract factory for a given contract type, generated from the data vailable in ``Package.manifest``. Contract factories are accessible from the package class. .. code:: python Owned = OwnedPackage.get_contract_factory('owned') In cases where a contract uses a library, the contract factory will have unlinked bytecode. The ``ethpm`` package ships with its own subclass of ``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra methods and properties related to bytecode linking. .. code:: python >>> math = owned_package.contract_factories.math >>> math.needs_bytecode_linking True >>> linked_math = math.link_bytecode({'MathLib': '0x1234...'}) >>> linked_math.needs_bytecode_linking False
377,620
def Run(self): try: self.stats = {} self.BeginProcessing() processed_count = 0 for client_info_batch in _IterateAllClients( recency_window=self.recency_window): for client_info in client_info_batch: self.ProcessClientFullInfo(client_info) processed_count += len(client_info_batch) self.Log("Processed %d clients.", processed_count) self.HeartBeat() self.FinishProcessing() for fd in itervalues(self.stats): fd.Close() logging.info("%s: processed %d clients.", self.__class__.__name__, processed_count) except Exception as e: logging.exception("Error while calculating stats: %s", e) raise
Retrieve all the clients for the AbstractClientStatsCollectors.
377,621
def _dict_seq_locus(list_c, loci_obj, seq_obj): seqs = defaultdict(set) for c in list_c.values(): for l in c.loci2seq: [seqs[s].add(c.id) for s in c.loci2seq[l]] common = [s for s in seqs if len(seqs[s]) > 1] seqs_in_c = defaultdict(float) for c in list_c.values(): for l in c.loci2seq: for s in c.loci2seq[l]: if s in common: pos = seq_obj[s].pos[l] cov = 1.0 * loci_obj[l].coverage[pos] if seqs_in_c[(s, c.id)] < cov: seqs_in_c[(s, c.id)] = cov seqs_in_c = _transform(seqs_in_c) return seqs_in_c
return dict with sequences = [ cluster1, cluster2 ...]
377,622
def init_pop(self): pop = Pop(self.population_size) seed_with_raw_features = False if self.seed_with_ml: if (self.ml_type == or self.ml_type == ): seed_with_raw_features=True elif (hasattr(self.pipeline.named_steps[],) or hasattr(self.pipeline.named_steps[],)): coef = (self.pipeline.named_steps[].coef_ if hasattr(self.pipeline.named_steps[],) else self.pipeline.named_steps[].feature_importances_) if len(coef.shape)>1: coef = [np.mean(abs(c)) for c in coef.transpose()] coef = [c for c in coef if c!=0] locs = np.arange(len(coef)) locs = locs[np.argsort(np.abs(coef))[::-1]] for i,p in enumerate(pop.individuals): if i < len(locs): p.stack = [node(,loc=locs[i])] else: self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) else: seed_with_raw_features = True if seed_with_raw_features: for i,p in enumerate(pop.individuals): if i < self.n_features: p.stack = [node(, loc=self.random_state.randint(self.n_features))] else: self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) if self.verbosity > 2: print("seeded initial population:", self.stacks_2_eqns(pop.individuals)) else: for I in pop.individuals: depth = self.random_state.randint(self.min_depth,self.max_depth_init) self.make_program(I.stack,self.func_set,self.term_set,depth, self.otype) I.stack = list(reversed(I.stack)) return pop
initializes population of features as GP stacks.
377,623
def setMAC(self, xEUI): print % self.port address64 = try: if not xEUI: address64 = self.mac if not isinstance(xEUI, str): address64 = self.__convertLongToString(xEUI) if len(address64) < 16: address64 = address64.zfill(16) print address64 else: address64 = xEUI cmd = WPANCTL_CMD + % address64 if self.__sendCommand(cmd)[0] != : self.mac = address64 return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger( + str(e))
set the extended addresss of Thread device Args: xEUI: extended address in hex format Returns: True: successful to set the extended address False: fail to set the extended address
377,624
def _new_song(self): self.song = 0 self.dif_song = s != self.song self.pos = 0
Used internally to get a metasong index.
377,625
def xml(self, url, method=, params=None, data=None): r = self.req(url, method, params, data) return self.to_xml(r.content, base_url=r.url)
请求并返回xml :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :rtype: html.HtmlElement :return:
377,626
def complete_xml_element(self, xmlnode, doc): ns = xmlnode.ns() if self.instructions is not None: xmlnode.newTextChild(ns, "instructions", to_utf8(self.instructions)) if self.form: self.form.as_xml(xmlnode, doc) if self.remove: xmlnode.newChild(ns, "remove", None) else: if self.registered: xmlnode.newChild(ns, "registered", None) for field in legacy_fields: value = getattr(self, field) if value is not None: xmlnode.newTextChild(ns, field, to_utf8(value))
Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`
377,627
def __get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], children_on_fs: Dict[str, PersistedObject], logger: Logger) \ -> Dict[str, Any]: constructor_args_types_and_opt = get_constructor_attributes_types(desired_type) children_plan = dict() for attribute_name, att_desc in sorted(constructor_args_types_and_opt.items()): attribute_is_mandatory = att_desc[1] attribute_type = att_desc[0] if attribute_name in children_on_fs.keys(): child_on_fs = children_on_fs[attribute_name] t, parser_found = self.parser_finder.build_parser_for_fileobject_and_desiredtype(child_on_fs, attribute_type, logger=logger) children_plan[attribute_name] = parser_found.create_parsing_plan(t, child_on_fs, logger=logger, _main_call=False) else: if attribute_is_mandatory: raise MissingMandatoryAttributeFiles.create(obj_on_fs, desired_type, attribute_name) else: .'.format( loc=obj_on_fs.get_pretty_location(blank_parent_part=False, append_file_ext=False), typ=get_pretty_type_str(desired_type), att=attribute_name)) return children_plan
Simply inspects the required type to find the names and types of its constructor arguments. Then relies on the inner ParserFinder to parse each of them. :param obj_on_fs: :param desired_type: :param children_on_fs: :param logger: :return:
377,628
def anomalyGetLabels(self, start, end): return self._getAnomalyClassifier().getSelf().getLabels(start, end)
Get labels from the anomaly classifier within this model. :param start: (int) index to start getting labels :param end: (int) index to end getting labels
377,629
def get_aux_files(basename): base = os.path.splitext(basename)[0] files = {"bkg": base + "_bkg.fits", "rms": base + "_rms.fits", "mask": base + ".mim", "cat": base + "_comp.fits", "psf": base + "_psf.fits"} for k in files.keys(): if not os.path.exists(files[k]): files[k] = None return files
Look for and return all the aux files that are associated witht this filename. Will look for: background (_bkg.fits) rms (_rms.fits) mask (.mim) catalogue (_comp.fits) psf map (_psf.fits) will return filenames if they exist, or None where they do not. Parameters ---------- basename : str The name/path of the input image. Returns ------- aux : dict Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
377,630
def cmd(str, print_ret=False, usr_pwd=None, run=True): if usr_pwd: str = .format(usr_pwd[1], usr_pwd[0], str) print(.format(str)) if run: err, ret = commands.getstatusoutput(str) else: err = None ret = None if err: print(.format(ret)) raise Exception(ret) if ret and print_ret: lines = ret.split() for line in lines: print(.format(line)) return ret
Executes a command and throws an exception on error. in: str - command print_ret - print command return usr_pwd - execute command as another user (user_name, password) run - really execute command? out: returns the command output
377,631
def _wait(self, objects, attr, value, wait_interval=None, wait_time=None): r objects = list(objects) if not objects: return if wait_interval is None: wait_interval = self.wait_interval if wait_time < 0: end_time = None else: if wait_time is None: wait_time = self.wait_time if wait_time is None or wait_time < 0: end_time = None else: end_time = time() + wait_time while end_time is None or time() < end_time: loop_start = time() next_objs = [] for o in objects: obj = o.fetch() if getattr(obj, attr, None) == value: yield obj else: next_objs.append(obj) objects = next_objs if not objects: break loop_end = time() time_left = wait_interval - (loop_end - loop_start) if end_time is not None: time_left = min(time_left, end_time - loop_end) if time_left > 0: try: sleep(time_left) except KeyboardInterrupt: for o in objects: yield o return if objects: raise WaitTimeoutError(objects, attr, value, wait_interval, wait_time)
r""" Calls the ``fetch`` method of each object in ``objects`` periodically until the ``attr`` attribute of each one equals ``value``, yielding the final state of each object as soon as it satisfies the condition. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any remaining in-progress objects) is raised. If a `KeyboardInterrupt` is caught, any remaining objects are returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param iterable objects: an iterable of `Resource`\ s with ``fetch`` methods :param string attr: the attribute to watch :param value: the value of ``attr`` to wait for :param number wait_interval: how many seconds to sleep between requests; defaults to :attr:`wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if any objects have not yet completed, or a negative number to wait indefinitely; defaults to :attr:`wait_time` if not specified or `None` :rtype: generator :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded
377,632
def next(self): try: return self.results.pop(0) except IndexError: if self.next_uri is None: raise StopIteration() else: if not self.next_uri: self.results = self.list_method(marker=self.marker, limit=self.limit, prefix=self.prefix) else: args = self.extra_args self.results = self._list_method(self.next_uri, *args) if self.results: last_res = self.results[-1] self.marker = getattr(last_res, self.marker_att) try: return self.results.pop(0) except IndexError: raise StopIteration()
Return the next available item. If there are no more items in the local 'results' list, check if there is a 'next_uri' value. If so, use that to get the next page of results from the API, and return the first item from that query.
377,633
def remove_xattr(self, path, xattr_name, **kwargs): kwargs[] = xattr_name response = self._put(path, , **kwargs) assert not response.content
Remove an xattr of a file or directory.
377,634
def add_val(self, val): if not isinstance(val, type({})): raise ValueError(type({})) self.read() self.config.update(val) self.save()
add value in form of dict
377,635
def _can_parse(self, content_type): Accept content_type, content_subtype, content_param = utils.parse_media_type(content_type) for accepted in self.headers.get(, self.DEFAULT_CONTENT_TYPE).split(): type, subtype, param = utils.parse_media_type(accepted) matched = (type == content_type) \ and (subtype == content_subtype) \ and (param == content_param or not (param and content_param)) if matched: return True return False
Whether this navigator can parse the given content-type. Checks that the content_type matches one of the types specified in the 'Accept' header of the request, if supplied. If not supplied, matches against the default
377,636
def get_value_with_source(self, layer=None): if layer: return self._values[layer] for layer in reversed(self._layers): if layer in self._values: return self._values[layer] raise KeyError(layer)
Returns a tuple of the value's source and the value at the specified layer. If no layer is specified then the outer layer is used. Parameters ---------- layer : str Name of the layer to use. If None then the outermost where the value exists will be used. Raises ------ KeyError If the value is not set for the specified layer
377,637
def info(self, *args, **kwargs): self.lock() try: return logger.info(*args, **kwargs) finally: self.unlock()
Logs the line of the current thread owns the underlying lock, or blocks.
377,638
def group_by(resources, key): resource_map = {} parts = key.split() for r in resources: v = r for k in parts: v = v.get(k) if not isinstance(v, dict): break resource_map.setdefault(v, []).append(r) return resource_map
Return a mapping of key value to resources with the corresponding value. Key may be specified as dotted form for nested dictionary lookup
377,639
def build(self): helical_helix = Polypeptide() primitive_coords = self.curve_primitive.coordinates helices = [Helix.from_start_and_end(start=primitive_coords[i], end=primitive_coords[i + 1], helix_type=self.minor_helix_type, aa=1) for i in range(len(primitive_coords) - 1)] residues_per_turn = self.minor_residues_per_turn( minor_repeat=self.minor_repeat) if residues_per_turn == 0: residues_per_turn = _helix_parameters[self.minor_helix_type][0] if self.minor_handedness == : residues_per_turn *= -1 if self.orientation != -1: initial_angle = dihedral(numpy.array([0, 0, 0]), primitive_coords[0], primitive_coords[1], helices[0][0][]) else: initial_angle = dihedral( numpy.array([0, 0, primitive_coords[0][2]]), primitive_coords[0], numpy.array([primitive_coords[0][0], primitive_coords[0][1], primitive_coords[1][2]]), helices[0][0][]) addition_angle = self.phi_c_alpha - initial_angle for i, h in enumerate(helices): angle = (i * (360.0 / residues_per_turn)) + addition_angle h.rotate(angle=angle, axis=h.axis.unit_tangent, point=h.helix_start) helical_helix.extend(h) helical_helix.relabel_all() self._monomers = helical_helix._monomers[:] for monomer in self._monomers: monomer.ampal_parent = self return
Builds the `HelicalHelix`.
377,640
def all_subslices(itr): assert iterable(itr), .format(itr) if not hasattr(itr, ): itr = deque(itr) len_itr = len(itr) for start,_ in enumerate(itr): d = deque() for i in islice(itr, start, len_itr): d.append(i) yield tuple(d)
generates every possible slice that can be generated from an iterable
377,641
def fill_x509_data(self, x509_data): x509_issuer_serial = x509_data.find( , namespaces=constants.NS_MAP ) if x509_issuer_serial is not None: self.fill_x509_issuer_name(x509_issuer_serial) x509_crl = x509_data.find(, namespaces=constants.NS_MAP) if x509_crl is not None and self.crl is not None: x509_data.text = base64.b64encode( self.crl.public_bytes(serialization.Encoding.DER) ) x509_subject = x509_data.find( , namespaces=constants.NS_MAP ) if x509_subject is not None: x509_subject.text = get_rdns_name(self.x509.subject.rdns) x509_ski = x509_data.find(, namespaces=constants.NS_MAP) if x509_ski is not None: x509_ski.text = base64.b64encode( self.x509.extensions.get_extension_for_oid( ExtensionOID.SUBJECT_KEY_IDENTIFIER ).value.digest) x509_certificate = x509_data.find( , namespaces=constants.NS_MAP ) if x509_certificate is not None: s = base64.b64encode( self.x509.public_bytes(encoding=serialization.Encoding.DER) ) x509_certificate.text = b64_print(s)
Fills the X509Data Node :param x509_data: X509Data Node :type x509_data: lxml.etree.Element :return: None
377,642
def decode(input, fallback_encoding, errors=): fallback_encoding = _get_encoding(fallback_encoding) bom_encoding, input = _detect_bom(input) encoding = bom_encoding or fallback_encoding return encoding.codec_info.decode(input, errors)[0], encoding
Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`.
377,643
def _process_event(self, event): if event.get(): event.user = self.lookup_user(event.get()) if event.get(): event.channel = self.lookup_channel(event.get()) if self.user.id in event.mentions: event.mentions_me = True event.mentions = [ self.lookup_user(uid) for uid in event.mentions ] return event
Extend event object with User and Channel objects
377,644
def merge_wavelengths(waveset1, waveset2, threshold=1e-12): if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined.
377,645
def update(self): result = self.api.github_api(*self._apicall_parameters()) if result is None: self._next_update = datetime.now() + timedelta(seconds=self.BACKOFF) self._cached_result = self._apiresult_error() else: self._next_update = None self._cached_result = self._apiresult_postprocess(result) if not self._first_lookup: del self._data else: self._first_lookup = False return result is not None
Connect to GitHub API endpoint specified by `_apicall_parameters()`, postprocess the result using `_apiresult_postprocess()` and trigger a cache update if the API call was successful. If an error occurs, cache the empty result generated by `_apiresult_error()`. Additionally, set up retrying after a certain time. Return `True` if the API call was successful, `False` otherwise. Call this method directly if you want to invalidate the current cache. Otherwise, just call `data()`, which will automatically call `update()` if required.
377,646
def click(self, focus=None, sleep_interval=None): focus = focus or self._focus or pos_in_percentage = self.get_position(focus) self.poco.pre_action(, self, pos_in_percentage) ret = self.poco.click(pos_in_percentage) if sleep_interval: time.sleep(sleep_interval) else: self.poco.wait_stable() self.poco.post_action(, self, pos_in_percentage) return ret
Perform the click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the default one. It is also possible to click another point offset by providing ``focus`` argument. See ``CoordinateSystem`` for more details. Args: focus (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): an offset point (x, y) from the top left corner of the UI element(s), values must be in range of 0~1. This argument can be also specified by 'anchor' or 'center'. 'Center' means to click the center of bounding box of UI element. sleep_interval: number of seconds to wait after this action. Default is None which is the default sleep interval. This value can be configured by Poco initialization. See configuration at poco :py:class:`initialization <poco.pocofw.Poco>` for more details. Raises: PocoNoSuchNodeException: raised when the UI element does not exist
377,647
def register_message_handler(self, callback, *custom_filters, commands=None, regexp=None, content_types=None, state=None, run_task=None, **kwargs): filters_set = self.filters_factory.resolve(self.message_handlers, *custom_filters, commands=commands, regexp=regexp, content_types=content_types, state=state, **kwargs) self.message_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
Register handler for message .. code-block:: python3 # This handler works only if state is None (by default). dp.register_message_handler(cmd_start, commands=['start', 'about']) dp.register_message_handler(entry_point, commands=['setup']) # This handler works only if current state is "first_step" dp.register_message_handler(step_handler_1, state="first_step") # If you want to handle all states by one handler, use `state="*"`. dp.register_message_handler(cancel_handler, commands=['cancel'], state="*") dp.register_message_handler(cancel_handler, lambda msg: msg.text.lower() == 'cancel', state="*") :param callback: :param commands: list of commands :param regexp: REGEXP :param content_types: List of content types. :param custom_filters: list of custom filters :param kwargs: :param state: :return: decorated function
377,648
def get_class(kls): parts = kls.split() try: module = .join(parts[:-1]) m = __import__(module) except ImportError: module = .join(parts[:-2]) m = __import__(module) t = None starter = None for i in range(1, len(parts)): comp = parts[i] starter = parts[i:] m = getattr(m, comp) if isinstance(m, class_types): t = type starter = None if len(parts[i:]) == 1 else .join(parts[i + 1:]) break if isinstance(m, types.FunctionType): t = types.FunctionType starter = None break return t, m, starter
:param kls - string of fully identified starter function or starter method path for instance: - workers.abstract_worker.AbstractWorker.start - workers.example_script_worker.main :return tuple (type, object, starter) for instance: - (FunctionType, <function_main>, None) - (type, <Class_...>, 'start')
377,649
def getCocktailSum(e0, e1, eCocktail, uCocktail): mask = (eCocktail >= e0) & (eCocktail <= e1) if np.any(mask): idx = getMaskIndices(mask) eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]] not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1) uCocktailSum = fsum(uCocktail[mask[:-1]][:-1]) logging.debug(.format(uCocktailSum)) if not_coinc_low: eCl_bw = eCl - eCocktail[idx[0]-1] corr_low = (eCl - e0) / eCl_bw abs_corr_low = float(corr_low) * uCocktail[idx[0]-1] uCocktailSum += abs_corr_low logging.debug(( % ( e0, eCl, eCl - e0, eCl_bw, corr_low )).format(abs_corr_low, uCocktailSum)) if not_coinc_upp: if idx[1]+1 < len(eCocktail): eCu_bw = eCocktail[idx[1]+1] - eCu corr_upp = (e1 - eCu) / eCu_bw abs_corr_upp = float(corr_upp) * uCocktail[idx[1]] else: abs_corr_upp = eCu_bw = corr_upp = 0 uCocktailSum += abs_corr_upp logging.debug(( % ( e1, eCu, e1 - eCu, eCu_bw, corr_upp )).format(abs_corr_upp, uCocktailSum)) else: mask = (eCocktail >= e0) idx = getMaskIndices(mask) if idx[0] == idx[1] and idx[0] == len(eCocktail)-1: corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1]) uCocktailSum = float(corr) * uCocktail[idx[0]-1] else: corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]]) uCocktailSum = float(corr) * uCocktail[idx[0]] logging.debug(.format(uCocktailSum)) return uCocktailSum
get the cocktail sum for a given data bin range
377,650
def services(self): self._services = [] params = { "f" : "json" } json_dict = self._get(url=self._currentURL, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if "services" in json_dict.keys(): for s in json_dict[]: uURL = self._currentURL + "/%s.%s" % (s[], s[]) self._services.append( AGSService(url=uURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) ) return self._services
returns the services in the current folder
377,651
def find_donor_catchments(self, include_subject_catchment=): if self.gauged_cachments: self.donor_catchments = self.gauged_cachments. \ most_similar_catchments(subject_catchment=self.catchment, similarity_dist_function=lambda c1, c2: self._similarity_distance(c1, c2), include_subject_catchment=include_subject_catchment) else: self.donor_catchments = []
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments` is set manually. The results are stored in :attr:`.donor_catchments`. The (list of) :class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`. :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000 < 0.03 - `force`: always include subject catchment - `exclude`: do not include the subject catchment :type include_subject_catchment: str
377,652
def check_bcr_catchup(self): logger.debug(f"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}") if peer_bcr_len == 0: peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0 print(f"{peer.prefix} request count: {peer_bcr_len}") if peer_bcr_len == 1: next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1) print(f"{peer.prefix} {peer.myblockrequests} {next_hash}") else: self.stop_check_bcr_loop() self.check_bcr_loop = None logger.debug("BlockRequests have caught up...resuming sync") for peer in self.Peers: peer.ProtocolReady() time.sleep(2)
we're exceeding data request speed vs receive + process
377,653
def _sleep(self, seconds): for _ in range(int(seconds)): if not self.force_stop: sleep(1)
Sleep between requests, but don't force asynchronous code to wait :param seconds: The number of seconds to sleep :return: None
377,654
def background_at_centroid(self): from scipy.ndimage import map_coordinates if self._background is not None: if (self._is_completely_masked or np.any(~np.isfinite(self.centroid))): return np.nan * self._background_unit else: value = map_coordinates(self._background, [[self.ycentroid.value], [self.xcentroid.value]], order=1, mode=)[0] return value * self._background_unit else: return None
The value of the ``background`` at the position of the source centroid. The background value at fractional position values are determined using bilinear interpolation.
377,655
def do_lzop_get(creds, url, path, decrypt, do_retry=True): assert url.endswith(), def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt): def standard_detail_message(prefix=): return (prefix + .format(n=exc_processor_cxt, url=url)) typ, value, tb = exc_tup del exc_tup if issubclass(typ, socket.error): socketmsg = value[1] if isinstance(value, tuple) else value logger.info( msg=, detail=standard_detail_message( "The socket error{0}RequestTimeTooSkewedRetrying fetch because of a Request Skew timeretrying WAL file fetch from unexpected exceptionThe exception type is {etype} and its value is {evalue} and its traceback is {etraceback}could no longer locate object while performing wal restoreThe absolute URI that could not be located is {url}.This can be normal when Postgres is trying to detect what timelines are available during restoration.ExpiredTokencould no longer authenticate while performing wal restoreThe absolute URI that could not be accessed is {url}.This can be normal when using STS credentials.completed download and decompressionDownloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download()
Get and decompress a S3 URL This streams the content directly to lzop; the compressed version is never stored on disk.
377,656
def render(self, progress, width=None, status=None): results = [widget.render(progress, width=self._widget_lengths[i], status=status) for i, widget in enumerate(self._widgets)] if self._file_mode: res = "" for i, result in enumerate(results): res += result.rendered if result.length < self._widget_lengths[i] and progress < 1: break res += " " if i < len(results) - 1 else "" rendered_str = res[len(self._rendered):] self._rendered = res else: rendered_str = " ".join(r.rendered for r in results) if self._to_render: rendered_str = self._to_render + rendered_str self._to_render = None next_progress = min(r.next_progress for r in results) next_time = min(r.next_time for r in results) return RenderResult(rendered_str, next_progress=next_progress, next_time=next_time)
Render the widget.
377,657
def p_andnode_expression(self, t): self.accu.add(Term(, ["and(\""+t[2]+"\")"])) t[0] = "and(\""+t[2]+"\")"
andnode_expression : LB identlist RB
377,658
def extract_number_oscillations(self, index, amplitude_threshold): return pyclustering.utils.extract_number_oscillations(self.__amplitude, index, amplitude_threshold);
! @brief Extracts number of oscillations of specified oscillator. @param[in] index (uint): Index of oscillator whose dynamic is considered. @param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example, when oscillator amplitude is greater than threshold then oscillation is incremented. @return (uint) Number of oscillations of specified oscillator.
377,659
def get_authoryear_from_entry(entry, paren=False): def _format_last(person): return .join([n.strip() for n in person.last_names]) if len(entry.persons[]) > 0: persons = entry.persons[] elif len(entry.persons[]) > 0: persons = entry.persons[] else: raise AuthorYearError try: year = entry.fields[] except KeyError: raise AuthorYearError if paren and len(persons) == 1: template = return template.format(author=_format_last(persons[0]), year=year) elif not paren and len(persons) == 1: template = return template.format(author=_format_last(persons[0]), year=year) elif paren and len(persons) == 2: template = return template.format(author1=_format_last(persons[0]), author2=_format_last(persons[1]), year=year) elif not paren and len(persons) == 2: template = return template.format(author1=_format_last(persons[0]), author2=_format_last(persons[1]), year=year) elif not paren and len(persons) > 2: template = return template.format(author=_format_last(persons[0]), year=year) elif paren and len(persons) > 2: template = return template.format(author=_format_last(persons[0]), year=year)
Get and format author-year text from a pybtex entry to emulate natbib citations. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. parens : `bool`, optional Whether to add parentheses around the year. Default is `False`. Returns ------- authoryear : `str` The author-year citation text.
377,660
def is_activated(self, images, augmenter, parents, default): if self.activator is None: return default else: return self.activator(images, augmenter, parents, default)
Returns whether an augmenter may be executed. Returns ------- bool If True, the augmenter may be executed. If False, it may not be executed.
377,661
def OPTIONS(self, *args, **kwargs): return self._handle_api(self.API_OPTIONS, args, kwargs)
OPTIONS request
377,662
def get_remove_security_group_commands(self, sg_id, profile): return self._get_interface_commands(sg_id, profile, delete=True)
Commands for removing ACL from interface
377,663
def _get_hash(self, file_obj): size = 0 hash_buider = self.hash_builder() for piece in self._get_file_iterator(file_obj): hash_buider.update(piece) size += len(piece) file_obj.seek(0) return "%s_%x" % (hash_buider.hexdigest(), size)
Compute hash for the `file_obj`. Attr: file_obj (obj): File-like object with ``.write()`` and ``.seek()``. Returns: str: Hexdigest of the hash.
377,664
def _EntriesGenerator(self): table_name = getattr(self.path_spec, , None) column_name = getattr(self.path_spec, , None) if table_name and column_name: if self._number_of_entries is None: path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec( table_name=table_name, column_name=column_name, row_index=0, parent=self.path_spec.parent) sub_file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) if not file_entry: self._number_of_entries = 0 else: self._number_of_entries = sub_file_entry.GetNumberOfRows() for row_index in range(0, self._number_of_entries): yield sqlite_blob_path_spec.SQLiteBlobPathSpec( table_name=table_name, column_name=column_name, row_index=row_index, parent=self.path_spec.parent)
Retrieves directory entries. Since a directory can contain a vast number of entries using a generator is more memory efficient. Yields: SQLiteBlobPathSpec: a path specification. Raises: AccessError: if the access to list the directory was denied. BackEndError: if the directory could not be listed.
377,665
def autoargs(include=None, exclude=None, f=DECORATED ): return autoargs_decorate(f, include=include, exclude=exclude)
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing the function. In other words: ``` @autoargs def myfunc(a): print('hello') ``` will create the equivalent of ``` def myfunc(a): self.a = a print('hello') ``` Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049 :param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be included by default :param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None :return:
377,666
def deploy(remote, assets_to_s3): header("Deploying...") if assets_to_s3: for mod in get_deploy_assets2s3_list(CWD): _assets2s3(mod) remote_name = remote or "ALL" print("Pushing application's content to remote: %s " % remote_name) hosts = get_deploy_hosts_list(CWD, remote or None) git_push_to_master(cwd=CWD, hosts=hosts, name=remote_name) print("Done!")
To DEPLOY your application
377,667
def process_cli(log_level, mets, page_id, tasks): log = getLogger() run_tasks(mets, log_level, page_id, tasks) log.info("Finished")
Process a series of tasks
377,668
def setPololuProtocol(self): self._compact = False self._log and self._log.debug("Pololu protocol has been set.")
Set the pololu protocol.
377,669
def getManagers(self): manager_ids = [] manager_list = [] for department in self.getDepartments(): manager = department.getManager() if manager is None: continue manager_id = manager.getId() if manager_id not in manager_ids: manager_ids.append(manager_id) manager_list.append(manager) return manager_list
Return all managers of responsible departments
377,670
def _normalize(self, flags): norm = None if isinstance(flags, MessageFlags): norm = flags.bytes elif isinstance(flags, bytearray): norm = binascii.hexlify(flags) elif isinstance(flags, int): norm = bytes([flags]) elif isinstance(flags, bytes): norm = binascii.hexlify(flags) elif isinstance(flags, str): flags = flags[0:2] norm = binascii.hexlify(binascii.unhexlify(flags.lower())) elif flags is None: norm = None else: _LOGGER.warning(, type(flags), flags) return norm
Take any format of flags and turn it into a hex string.
377,671
def get(key, default=-1): if isinstance(key, int): return Routing(key) if key not in Routing._member_map_: extend_enum(Routing, key, default) return Routing[key]
Backport support for original codes.
377,672
def process_result_value(self, value, dialect): if value is None: return None p = value.split("|") if len(p) == 0: return None return SourceLocation(*map(int, p))
SQLAlchemy uses this to convert a string into a SourceLocation object. We separate the fields by a |
377,673
def validate(name, value, enforce_not_none=True, equals=None, instance_of=None, subclass_of=None, is_in=None, subset_of=None, contains = None, superset_of=None, min_value=None, min_strict=False, max_value=None, max_strict=False, length=None, min_len=None, min_len_strict=False, max_len=None, max_len_strict=False, custom=None, error_type=None, help_msg=None, **kw_context_args): instance_of = instance_of or (kw_context_args.pop() if in kw_context_args else None) is_in = is_in or (kw_context_args.pop() if in kw_context_args else None) try: if value is None: if enforce_not_none: raise ValueIsNone(wrong_value=value) else: if equals is not None: if value != equals: raise NotEqual(wrong_value=value, ref_value=equals) if instance_of is not None: assert_instance_of(value, instance_of) if subclass_of is not None: assert_subclass_of(value, subclass_of) if is_in is not None: if value not in is_in: raise NotInAllowedValues(wrong_value=value, allowed_values=is_in) if contains is not None: if contains not in value: raise DoesNotContainValue(wrong_value=value, ref_value=contains) if subset_of is not None: missing = value - subset_of if len(missing) != 0: raise NotSubset(wrong_value=value, reference_set=subset_of, unsupported=missing) if superset_of is not None: missing = superset_of - value if len(missing) != 0: raise NotSuperset(wrong_value=value, reference_set=superset_of, missing=missing) if min_value is not None: if min_strict: if not value > min_value: raise TooSmall(wrong_value=value, min_value=min_value, strict=True) else: if not value >= min_value: raise TooSmall(wrong_value=value, min_value=min_value, strict=False) if max_value is not None: if max_strict: if not value < max_value: raise TooBig(wrong_value=value, max_value=max_value, strict=True) else: if not value <= max_value: raise TooBig(wrong_value=value, max_value=max_value, strict=False) if length is not None: if len(value) != length: raise WrongLength(wrong_value=value, ref_length=length) if min_len is not None: if min_len_strict: if not len(value) > min_len: raise TooShort(wrong_value=value, min_length=min_len, strict=True) else: if not len(value) >= min_len: raise TooShort(wrong_value=value, min_length=min_len, strict=False) if max_len is not None: if max_len_strict: if not len(value) < max_len: raise TooLong(wrong_value=value, max_length=max_len, strict=True) else: if not len(value) <= max_len: raise TooLong(wrong_value=value, max_length=max_len, strict=False) except Exception as e: err = _QUICK_VALIDATOR._create_validation_error(name, value, validation_outcome=e, error_type=error_type, help_msg=help_msg, **kw_context_args) raise_(err) if custom is not None: assert_valid(name, value, custom, error_type=error_type, help_msg=help_msg, **kw_context_args) else: if error_type is None and help_msg is None and len(kw_context_args) > 0: raise ValueError("Keyword context arguments have been provided but help_msg and error_type are not: {}" "".format(kw_context_args))
A validation function for quick inline validation of `value`, with minimal capabilities: * None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False) * Type validation: `value` should be an instance of any of `var_types` if provided * Value validation: * if `allowed_values` is provided, `value` should be in that set * if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True` * if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True` :param name: the applicative name of the checked value, that will be used in error messages :param value: the value to check :param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None. :param equals: an optional value to enforce. :param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one match is enough to succeed. If None, type will not be enforced :param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one match is enough to succeed. If None, type will not be enforced :param is_in: an optional set of allowed values. :param subset_of: an optional superset for the variable :param contains: an optional value that the variable should contain (value in variable == True) :param superset_of: an optional subset for the variable :param min_value: an optional minimum value :param min_strict: if True, only values strictly greater than `min_value` will be accepted :param max_value: an optional maximum value :param max_strict: if True, only values strictly lesser than `max_value` will be accepted :param length: an optional strict length :param min_len: an optional minimum length :param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted :param max_len: an optional maximum length :param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted :param custom: a custom base validation function or list of base validation functions to use. This is the same syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a `ValidationError` will be raised with the provided `help_msg` :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: nothing in case of success. Otherwise, raises a ValidationError
377,674
def obj_to_md(self, file_path=None, title_columns=False, quote_numbers=True): return self.obj_to_mark_down(file_path=file_path, title_columns=title_columns, quote_numbers=quote_numbers)
This will return a str of a mark down tables. :param title_columns: bool if True will title all headers :param file_path: str of the path to the file to write to :param quote_numbers: bool if True will quote numbers that are strings :return: str
377,675
def unwind(self, values, backend, **kwargs): if not hasattr(self, "_unwind_value"): self._unwind_value = self._unwind(values, backend, **kwargs) return self._unwind_value
Unwind expression by applying *values* to the abstract nodes. The ``kwargs`` dictionary can contain data which can be used to override values
377,676
def remove_intra(M, contigs): N = np.copy(M) n = len(N) assert n == len(contigs) for (i, j) in itertools.product(range(n), range(n)): if contigs[i] == contigs[j]: N[i, j] = 0 return N
Remove intrachromosomal contacts Given a contact map and a list attributing each position to a given chromosome, set all contacts within each chromosome or contig to zero. Useful to perform calculations on interchromosomal contacts only. Parameters ---------- M : array_like The initial contact map contigs : list or array_like A 1D array whose value at index i reflect the contig label of the row i in the matrix M. The length of the array must be equal to the (identical) shape value of the matrix. Returns ------- N : numpy.ndarray The output contact map with no intrachromosomal contacts
377,677
def flags(self, index): return Qt.ItemFlags(QAbstractTableModel.flags(self, index) | Qt.ItemIsEditable)
Set flags
377,678
def _ParseHeader(self, parser_mediator, structure): _, month, day, hours, minutes, seconds, year = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( .format(structure.date_time)) return self._last_month = month event_data = XChatLogEventData() if structure.log_action[0] == : self._xchat_year = year event_data.text = elif structure.log_action[0] == : self._xchat_year = None event_data.text = else: logger.debug(.format( .join(structure.log_action))) return event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a log header. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
377,679
def _disable_prometheus_process_collector(self) -> None: logger.info("Removing prometheus process collector") try: core.REGISTRY.unregister(PROCESS_COLLECTOR) except KeyError: logger.debug("PROCESS_COLLECTOR already removed from prometheus")
There is a bug in SDC' Docker implementation and intolerable prometheus_client code, due to which its process_collector will fail. See https://github.com/prometheus/client_python/issues/80
377,680
def check(self, radl): SIMPLE_FEATURES = { "name": (str, lambda x, _: bool(x.value)), "path": (str, lambda x, _: bool(x.value)), "version": (str, is_version), "preinstalled": (str, ["YES", "NO"]) } self.check_simple(SIMPLE_FEATURES, radl)
Check the features in this application.
377,681
def reset(self): self.resetRNG() sNow = np.zeros(self.pop_size) Shk = self.RNG.rand(self.pop_size) sNow[Shk < self.p_init] = 1 self.sNow = sNow
Resets this agent type to prepare it for a new simulation run. This includes resetting the random number generator and initializing the style of each agent of this type.
377,682
def hdf5_col(self, chain=-1): return self.db._tables[chain].colinstances[self.name]
Return a pytables column object. :Parameters: chain : integer The index of the chain. .. note:: This method is specific to the ``hdf5`` backend.
377,683
def deepcopy(self, x=None, y=None): x = self.x if x is None else x y = self.y if y is None else y return Keypoint(x=x, y=y)
Create a deep copy of the Keypoint object. Parameters ---------- x : None or number, optional Coordinate of the keypoint on the x axis. If ``None``, the instance's value will be copied. y : None or number, optional Coordinate of the keypoint on the y axis. If ``None``, the instance's value will be copied. Returns ------- imgaug.Keypoint Deep copy.
377,684
def get_context_data(self, **kwargs): strain = super(StrainDetail, self).get_object() context = super(StrainDetail, self).get_context_data(**kwargs) context[] = Breeding.objects.filter(Strain=strain) context[] = Animal.objects.filter(Strain=strain).order_by(,) context[] = Animal.objects.filter(Strain=strain).values("Cage").distinct() context[] = False return context
This adds into the context of strain_list_all (which filters for all alive :class:`~mousedb.animal.models.Animal` objects and active cages) and cages which filters for the number of current cages.
377,685
def _adapt_response(self, response): errors, meta = super(ServerError, self)._adapt_response(response) return errors[0], meta
Convert various error responses to standardized ErrorDetails.
377,686
def validateOneNamespace(self, doc, elem, prefix, ns, value): if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o if ns is None: ns__o = None else: ns__o = ns._o ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value) return ret
Try to validate a single namespace declaration for an element basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Attribute Value Type ] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF uniqueness and matching are done separately
377,687
def _get_service_keys(self, service_name): guid = self.get_instance_guid(service_name) uri = "/v2/service_instances/%s/service_keys" % (guid) return self.api.get(uri)
Return the service keys for the given service.
377,688
def tee(process, filter): lines = [] while True: line = process.stdout.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stdout.write(line) lines.append(stripped_line) elif process.poll() is not None: process.stdout.close() break return lines
Read lines from process.stdout and echo them to sys.stdout. Returns a list of lines read. Lines are not newline terminated. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stdout.
377,689
def _to_dict(self): _dict = {} if hasattr(self, ) and self.score is not None: _dict[] = self.score if hasattr(self, ) and self.sentence is not None: _dict[] = self.sentence if hasattr(self, ) and self.type is not None: _dict[] = self.type if hasattr(self, ) and self.arguments is not None: _dict[] = [x._to_dict() for x in self.arguments] return _dict
Return a json dictionary representing this model.
377,690
def get_url_path(self, language=None): if self.is_first_root(): try: return reverse() except Exception: pass url = self.get_complete_slug(language) if not language: language = settings.PAGE_DEFAULT_LANGUAGE if settings.PAGE_USE_LANGUAGE_PREFIX: return reverse(, args=[language, url]) else: return reverse(, args=[url])
Return the URL's path component. Add the language prefix if ``PAGE_USE_LANGUAGE_PREFIX`` setting is set to ``True``. :param language: the wanted url language.
377,691
def prep_directory(self, target_dir): dirname = path.dirname(target_dir) if dirname: dirname = path.join(settings.BUILD_DIR, dirname) if not self.fs.exists(dirname): logger.debug("Creating directory at {}{}".format(self.fs_name, dirname)) self.fs.makedirs(dirname)
Prepares a new directory to store the file at the provided path, if needed.
377,692
def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]): env = inliner.document.settings.env r = env.get_domain().role()( , rawtext, etext, lineno, inliner, options, content) pnode = r[0][0] prefixes = get_import_prefixes_from_env(env) try: name, obj, parent = import_by_name(pnode[], prefixes) except ImportError: content = pnode[0] r[0][0] = nodes.emphasis(rawtext, content[0].astext(), classes=content[]) return r
Smart linking role. Expands to ':obj:`text`' if `text` is an object that can be imported; otherwise expands to '*text*'.
377,693
def ncr(n, r): r = min(r, n - r) numer = reduce(op.mul, range(n, n - r, -1), 1) denom = reduce(op.mul, range(1, r + 1), 1) return numer // denom
Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int
377,694
def is_valid(self): try: request = self.get_oauth_request() client = self.get_client(request) params = self._server.verify_request(request, client, None) except Exception as e: raise e return client
Returns a Client object if this is a valid OAuth request.
377,695
def generate_insufficient_overlap_message( e, exposure_geoextent, exposure_layer, hazard_geoextent, hazard_layer, viewport_geoextent): description = tr( ) message = m.Message(description) text = m.Paragraph(tr()) message.add(text) analysis_inputs = m.BulletedList() analysis_inputs.add(tr() % (hazard_layer.source())) analysis_inputs.add(tr() % (exposure_layer.source())) analysis_inputs.add( tr() % ( viewport_geoextent)) analysis_inputs.add( tr() % ( hazard_geoextent)) analysis_inputs.add( tr() % ( exposure_geoextent)) analysis_inputs.add( tr() % ( e)) message.add(analysis_inputs) return message
Generate insufficient overlap message. :param e: An exception. :type e: Exception :param exposure_geoextent: Extent of the exposure layer in the form [xmin, ymin, xmax, ymax] in EPSG:4326. :type exposure_geoextent: list :param exposure_layer: Exposure layer. :type exposure_layer: QgsMapLayer :param hazard_geoextent: Extent of the hazard layer in the form [xmin, ymin, xmax, ymax] in EPSG:4326. :type hazard_geoextent: list :param hazard_layer: Hazard layer instance. :type hazard_layer: QgsMapLayer :param viewport_geoextent: Viewport extents as a list [xmin, ymin, xmax, ymax] in EPSG:4326. :type viewport_geoextent: list :return: An InaSAFE message object. :rtype: safe.messaging.Message
377,696
def batch(self, num): self._params.pop(, None) it = iter(self) while True: chunk = list(islice(it, num)) if not chunk: return yield chunk
Iterator returning results in batches. When making more general queries that might have larger results, specify a batch result that should be returned with each iteration. :param int num: number of results per iteration :return: iterator holding list of results
377,697
def write_memory(self, addr, data, transfer_size=32): assert transfer_size in (8, 16, 32) if transfer_size == 32: self._link.write_mem32(addr, conversion.u32le_list_to_byte_list([data]), self._apsel) elif transfer_size == 16: self._link.write_mem16(addr, conversion.u16le_list_to_byte_list([data]), self._apsel) elif transfer_size == 8: self._link.write_mem8(addr, [data], self._apsel)
! @brief Write a single memory location. By default the transfer size is a word.
377,698
def hardware_flexport_id(self, **kwargs): config = ET.Element("config") hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware") flexport = ET.SubElement(hardware, "flexport") id = ET.SubElement(flexport, "id") id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
377,699
def download(self, path): service_get_resp = requests.get(self.location, cookies={"session": self.session}) payload = service_get_resp.json() download_get_resp = requests.get(payload["content"]) with open(path, "wb") as config_file: config_file.write(download_get_resp.content)
downloads a config resource to the path