Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
27,600
def histogram1d(data, bins=None, *args, **kwargs): import dask if not hasattr(data, "dask"): data = dask.array.from_array(data, chunks=int(data.shape[0] / options["chunk_split"])) if not kwargs.get("adaptive", True): raise RuntimeError("Only adaptive histograms supported for dask (currently).") kwargs["adaptive"] = True def block_hist(array): return original_h1(array, bins, *args, **kwargs) return _run_dask( name="dask_adaptive1d", data=data, compute=kwargs.pop("compute", True), method=kwargs.pop("dask_method", "threaded"), func=block_hist)
Facade function to create one-dimensional histogram using dask. Parameters ---------- data: dask.DaskArray or array-like See also -------- physt.histogram
27,601
def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]: host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None if not host_2: return host_1, None elif not host_1: return host_2, None preferred_host = self._happy_eyeballs_table.get_preferred( host_1, host_2) if preferred_host: return preferred_host, None else: return host_1, host_2
Get preferred host from DNS results.
27,602
def increment(self, key, amount=1): payload = { key: { : , : amount } } self.__class__.PUT(self._absolute_url, **payload) self.__dict__[key] += amount
Increment one value in the object. Note that this happens immediately: it does not wait for save() to be called
27,603
def do_connect(self): assert self.socket is None self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect(self._connect_address)
发起连接
27,604
def guess_line_ending(string): assert isinstance(string, str), .format(type(string)) crlf_count = string.count() lf_count = string.count() if crlf_count >= lf_count: return else: return
Return the most likely line delimiter from the string.
27,605
def tag_info_chart (self): ucsc = ["chr" + str(i) for i in range(1,23)].append([ "chrX", "chrY", "chrM"]) ensembl = list(range(1,23)).append([ "X", "Y", "MT"]) pconfig = { : , : , : , : } sample1 = next(iter(self.tagdir_data[])) chrFormat = next(iter(self.tagdir_data[][sample1])) if ("chr" in chrFormat): chrs = ucsc else: chrs = ensembl return bargraph.plot(self.tagdir_data[], chrs, pconfig)
Make the taginfo.txt plot
27,606
def validate_config_value(value, possible_values): if value not in possible_values: raise Exception( % (value, .join(e for e in possible_values)))
Validate a config value to make sure it is one of the possible values. Args: value: the config value to validate. possible_values: the possible values the value can be Raises: Exception if the value is not one of possible values.
27,607
def update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, last_waypoint_list, publish=True): from rafcon.gui.mygaphas.items.connection import TransitionView assert isinstance(transition_v, TransitionView) transition_m = transition_v.model waypoint_list = get_relative_positions_of_waypoints(transition_v) if waypoint_list != last_waypoint_list: transition_m.set_meta_data_editor(, waypoint_list) if publish: graphical_editor_view.emit(, transition_m, "waypoints", False)
This method updates the relative position meta data of the transitions waypoints if they changed :param graphical_editor_view: Graphical Editor the change occurred in :param transition_v: Transition that changed :param last_waypoint_list: List of waypoints before change :param bool publish: Whether to publish the changes using the meta signal
27,608
def classmarkChange(MobileStationClassmark3_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x16) c = MobileStationClassmark2() packet = a / b / c if MobileStationClassmark3_presence is 1: e = MobileStationClassmark3(ieiMSC3=0x20) packet = packet / e return packet
CLASSMARK CHANGE Section 9.1.11
27,609
def add_query(self, query, join_with=AND): if not isinstance(query, DomainCondition): query = DomainCondition.from_tuple(query) if len(self.query): self.query.append(join_with) self.query.append(query)
Join a new query to existing queries on the stack. Args: query (tuple or list or DomainCondition): The condition for the query. If a ``DomainCondition`` object is not provided, the input should conform to the interface defined in :func:`~.domain.DomainCondition.from_tuple`. join_with (str): The join string to apply, if other queries are already on the stack.
27,610
def author_name_contains_fullnames(author_name): def _is_initial(name_part): return len(name_part) == 1 or u in name_part parsed_name = ParsedName(author_name) if len(parsed_name) == 1: return False elif any([_is_initial(name_part) for name_part in parsed_name]): return False return True
Recognizes whether the name contains full name parts and not initials or only lastname. Returns: bool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is returned for 'Ellis, J.' or 'Ellis'.
27,611
def purge_results(self, client_id, msg): content = msg[] self.log.info("Dropping records with %s", content) msg_ids = content.get(, []) reply = dict(status=) if msg_ids == : try: self.db.drop_matching_records(dict(completed={:None})) except Exception: reply = error.wrap_exception() else: pending = filter(lambda m: m in self.pending, msg_ids) if pending: try: raise IndexError("msg pending: %r" % pending[0]) except: reply = error.wrap_exception() else: try: self.db.drop_matching_records(dict(msg_id={:msg_ids})) except Exception: reply = error.wrap_exception() if reply[] == : eids = content.get(, []) for eid in eids: if eid not in self.engines: try: raise IndexError("No such engine: %i" % eid) except: reply = error.wrap_exception() break uid = self.engines[eid].queue try: self.db.drop_matching_records(dict(engine_uuid=uid, completed={:None})) except Exception: reply = error.wrap_exception() break self.session.send(self.query, , content=reply, ident=client_id)
Purge results from memory. This method is more valuable before we move to a DB based message storage mechanism.
27,612
def handle_apply(args): python_version = args[] if python_version is None: python_version = "{}.{}".format(sys.version_info.major, sys.version_info.minor) apply_mutation( Path(args[]), cosmic_ray.plugins.get_operator(args[])(python_version), int(args[])) return ExitCode.OK
usage: {program} apply <module-path> <operator> <occurrence> Apply the specified mutation to the files on disk. This is primarily a debugging tool. options: --python-version=VERSION Python major.minor version (e.g. 3.6) of the code being mutated.
27,613
def _apply_line_rules(self, markdown_string): all_violations = [] lines = markdown_string.split("\n") line_rules = self.line_rules line_nr = 1 ignoring = False for line in lines: if ignoring: if line.strip() == : ignoring = False else: if line.strip() == : ignoring = True continue for rule in line_rules: violation = rule.validate(line) if violation: violation.line_nr = line_nr all_violations.append(violation) line_nr += 1 return all_violations
Iterates over the lines in a given markdown string and applies all the enabled line rules to each line
27,614
def add_camera_make_model(self, make, model): self._ef[][piexif.ImageIFD.Make] = make self._ef[][piexif.ImageIFD.Model] = model
Add camera make and model.
27,615
async def send(self, config, entry): if self.endpoint: LOGGER.debug("%s -> %s", entry.url, self.url) try: await self.endpoint.send(config, entry.url, self.url) except Exception as err: LOGGER.warning("Ping %s: got %s: %s", self.url, err.__class__.__name__, err)
Send a webmention to this target from the specified entry
27,616
def serialize(self, image): pic = mutagen.flac.Picture() pic.data = image.data pic.type = image.type_index pic.mime = image.mime_type pic.desc = image.desc or u return pic
Turn a Image into a mutagen.flac.Picture.
27,617
def show(cls, report_name, data): conn = Qubole.agent() return conn.get(cls.element_path(report_name), data)
Shows a report by issuing a GET request to the /reports/report_name endpoint. Args: `report_name`: the name of the report to show `data`: the parameters for the report
27,618
def get_user_sets(client_id, user_id): data = api_call(, .format(user_id), client_id=client_id) return [WordSet.from_dict(wordset) for wordset in data]
Find all user sets.
27,619
def pr_auc(fg_vals, bg_vals): y_true, y_score = values_to_labels(fg_vals, bg_vals) return average_precision_score(y_true, y_score)
Computes the Precision-Recall Area Under Curve (PR AUC) Parameters ---------- fg_vals : array_like list of values for positive set bg_vals : array_like list of values for negative set Returns ------- score : float PR AUC score
27,620
def parse(self, data_model, crit): tables = pd.DataFrame(data_model) data_model = {} for table_name in tables.columns: data_model[table_name] = pd.DataFrame(tables[table_name][]).T data_model[table_name] = data_model[table_name].where((pd.notnull(data_model[table_name])), None) zipped = list(zip(crit.keys(), crit.values())) crit_map = pd.DataFrame(zipped) crit_map.index = crit_map[0] crit_map.drop(0, axis=, inplace=True) crit_map.rename({1: }, axis=, inplace=True) crit_map.index.rename("", inplace=True) for table_name in [, , , , , , , , ]: crit_map.loc[table_name] = np.nan return data_model, crit_map
Take the relevant pieces of the data model json and parse into data model and criteria map. Parameters ---------- data_model : data model piece of json (nested dicts) crit : criteria map piece of json (nested dicts) Returns ---------- data_model : dictionary of DataFrames crit_map : DataFrame
27,621
def accepts(**schemas): validate = parse(schemas).validate @decorator def validating(func, *args, **kwargs): validate(inspect.getcallargs(func, *args, **kwargs), adapt=False) return func(*args, **kwargs) return validating
Create a decorator for validating function parameters. Example:: @accepts(a="number", body={"+field_ids": [int], "is_ok": bool}) def f(a, body): print (a, body["field_ids"], body.get("is_ok")) :param schemas: The schema for validating a given parameter.
27,622
def traverse(self, edge): query = self.statement rel, dst = edge.rel, edge.dst statement, params = ( SQL.compound_fwd_query(query, rel) if dst is None else SQL.compound_inv_query(query, rel, dst) ) return self.derived(statement, params, replace=True)
Traverse the graph, and selecting the destination nodes for a particular relation that the selected nodes are a source of, i.e. select the friends of my friends. You can traverse indefinitely. :param edge: The edge query. If the edge's destination node is specified then the source nodes will be selected.
27,623
def get_line_number_next_to_cursor_with_string_within(self, s): line_number, _ = self.get_cursor_position() text_buffer = self.text_view.get_buffer() line_iter = text_buffer.get_iter_at_line(line_number) before_line_number = None while line_iter.backward_line(): if s in self.get_text_of_line(line_iter): before_line_number = line_iter.get_line() break after_line_number = None while line_iter.forward_line(): if s in self.get_text_of_line(line_iter): after_line_number = line_iter.get_line() break if after_line_number is not None and before_line_number is None: return after_line_number, after_line_number - line_number elif before_line_number is not None and after_line_number is None: return before_line_number, line_number - before_line_number elif after_line_number is not None and before_line_number is not None: after_distance = after_line_number - line_number before_distance = line_number - before_line_number if after_distance < before_distance: return after_line_number, after_distance else: return before_line_number, before_distance else: return None, None
Find the closest occurrence of a string with respect to the cursor position in the text view
27,624
def get_servo_status(self): data = [] data.append(0x09) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE1) send_data(data) rxdata = [] try: rxdata = SERPORT.read(12) return ord(rxdata[9])&0xFF except: raise HerkulexError("could not communicate with motors")
Get the error status of servo This function gets the error status (if any) of the servo Args: none Returns: int: an integer corresponding to the servo status * refer datasheet
27,625
def create (raw_properties = []): assert (is_iterable_typed(raw_properties, property.Property) or is_iterable_typed(raw_properties, basestring)) if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property): x = raw_properties else: x = [property.create_from_string(ps) for ps in raw_properties] x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition)) key = tuple(p.id for p in x) if key not in __cache: __cache [key] = PropertySet(x) return __cache [key]
Creates a new 'PropertySet' instance for the given raw properties, or returns an already existing one.
27,626
def get_objective_lookup_session(self, proxy): if not self.supports_objective_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ObjectiveLookupSession(proxy=proxy, runtime=self._runtime) except AttributeError: raise return session
Gets the ``OsidSession`` associated with the objective lookup service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveLookupSession`` :rtype: ``osid.learning.ObjectiveLookupSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_objective_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_lookup()`` is ``true``.*
27,627
def add_default_values_of_scoped_variables_to_scoped_data(self): for key, scoped_var in self.scoped_variables.items(): self.scoped_data[str(scoped_var.data_port_id) + self.state_id] = \ ScopedData(scoped_var.name, scoped_var.default_value, scoped_var.data_type, self.state_id, ScopedVariable, parent=self)
Add the scoped variables default values to the scoped_data dictionary
27,628
def run_only_once(self, keyword): lock_name = % keyword try: self.acquire_lock(lock_name) passed = self.get_parallel_value_for_key(lock_name) if passed != : if passed == : raise AssertionError() return BuiltIn().run_keyword(keyword) self.set_parallel_value_for_key(lock_name, ) except: self.set_parallel_value_for_key(lock_name, ) raise finally: self.release_lock(lock_name)
Runs a keyword only once in one of the parallel processes. As the keyword will be called only in one process and the return value could basically be anything. The "Run Only Once" can't return the actual return value. If the keyword fails, "Run Only Once" fails. Others executing "Run Only Once" wait before going through this keyword before the actual command has been executed. NOTE! This is a potential "Shoot yourself in to knee" keyword Especially note that all the namespace changes are only visible in the process that actually executed the keyword. Also note that this might lead to odd situations if used inside of other keywords. Also at this point the keyword will be identified to be same if it has the same name.
27,629
def __new_argv(self, *new_pargs, **new_kargs): new_argv = self.argv.copy() new_extra_argv = list(self.extra_argv) for v in new_pargs: arg_name = None for name in self.pargl: if not name in new_argv: arg_name = name break if arg_name: new_argv[arg_name] = v elif self.var_pargs: new_extra_argv.append(v) else: num_prev_pargs = len([name for name in self.pargl if name in self.argv]) raise TypeError("%s() takes exactly %d positional arguments (%d given)" \ % (self.__name__, len(self.pargl), num_prev_pargs + len(new_pargs))) for k,v in new_kargs.items(): if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)): raise TypeError("%s() got an unexpected keyword argument " \ % (self.__name__, k)) new_argv[k] = v return (new_argv, new_extra_argv)
Calculate new argv and extra_argv values resulting from adding the specified positional and keyword arguments.
27,630
def _Register(self, conditions, callback): for condition in conditions: registered = self._registry.setdefault(condition, []) if callback and callback not in registered: registered.append(callback)
Map functions that should be called if the condition applies.
27,631
def _escape_xref(xref_match): xref = xref_match.group() xref = xref.replace(, ) xref = xref.replace(, ) xref = xref.replace(, ) return xref
Escape things that need to be escaped if they're in a cross-reference.
27,632
def inputfiles(self, inputtemplate=None): if isinstance(inputtemplate, InputTemplate): inputtemplate = inputtemplate.id for inputfile in self.input: if not inputtemplate or inputfile.metadata.inputtemplate == inputtemplate: yield inputfile
Generator yielding all inputfiles for the specified inputtemplate, if ``inputtemplate=None``, inputfiles are returned regardless of inputtemplate.
27,633
def parse_only_extr_license(self, extr_lic): ident = self.get_extr_license_ident(extr_lic) text = self.get_extr_license_text(extr_lic) comment = self.get_extr_lics_comment(extr_lic) xrefs = self.get_extr_lics_xref(extr_lic) name = self.get_extr_lic_name(extr_lic) if not ident: return lic = document.ExtractedLicense(ident) if text is not None: lic.text = text if name is not None: lic.full_name = name if comment is not None: lic.comment = comment lic.cross_ref = map(lambda x: six.text_type(x), xrefs) return lic
Return an ExtractedLicense object to represent a license object. But does not add it to the SPDXDocument model. Return None if failed.
27,634
def update_w(self): def update_single_w(i): FB = base.matrix(np.float64(np.dot(-self.data.T, W_hat[:,i]))) be = solvers.qp(HB, FB, INQa, INQb, EQa, EQb) self.beta[i,:] = np.array(be[]).reshape((1, self._num_samples)) HB = base.matrix(np.float64(np.dot(self.data[:,:].T, self.data[:,:]))) EQb = base.matrix(1.0, (1, 1)) W_hat = np.dot(self.data, pinv(self.H)) INQa = base.matrix(-np.eye(self._num_samples)) INQb = base.matrix(0.0, (self._num_samples, 1)) EQa = base.matrix(1.0, (1, self._num_samples)) for i in range(self._num_bases): update_single_w(i) self.W = np.dot(self.beta, self.data.T).T
alternating least squares step, update W under the convexity constraint
27,635
def poll(self, verbose_model_scoring_history = False): try: hidden = not H2OJob.__PROGRESS_BAR__ pb = ProgressBar(title=self._job_type + " progress", hidden=hidden) if verbose_model_scoring_history: pb.execute(self._refresh_job_status, print_verbose_info=lambda x: self._print_verbose_info() if int(x * 10) % 5 == 0 else " ") else: pb.execute(self._refresh_job_status) except StopIteration as e: if str(e) == "cancelled": h2o.api("POST /3/Jobs/%s/cancel" % self.job_key) self.status = "CANCELLED" assert self.status in {"DONE", "CANCELLED", "FAILED"} or self._poll_count <= 0, \ "Polling finished while the job has status %s" % self.status if self.warnings: for w in self.warnings: warnings.warn(w) if self.status == "CANCELLED": raise H2OJobCancelled("Job<%s> was cancelled by the user." % self.job_key) if self.status == "FAILED": if (isinstance(self.job, dict)) and ("stacktrace" in list(self.job)): raise EnvironmentError("Job with key {} failed with an exception: {}\nstacktrace: " "\n{}".format(self.job_key, self.exception, self.job["stacktrace"])) else: raise EnvironmentError("Job with key %s failed with an exception: %s" % (self.job_key, self.exception)) return self
Wait until the job finishes. This method will continuously query the server about the status of the job, until the job reaches a completion. During this time we will display (in stdout) a progress bar with % completion status.
27,636
def p_ioport_head_width(self, p): p[0] = self.create_ioport(p[1], p[3], width=p[2], lineno=p.lineno(3)) p.set_lineno(0, p.lineno(1))
ioport_head : sigtypes width portname
27,637
def update_vnic_template(self, host_id, vlan_id, physnet, vnic_template_path, vnic_template): ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info( , str(host_id)) return False vlan_name = self.make_vlan_name(vlan_id) with self.ucsm_connect_disconnect(ucsm_ip) as handle: if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error( , vlan_id) return False try: LOG.debug(, vnic_template_path) vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug(, vnic_template_full_path, physnet) mo = handle.query_dn(vnic_template_full_path) if not mo: LOG.error( , vnic_template_full_path) return False vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug(, vlan_dn) eth_if = self.ucsmsdk.vnicEtherIf( parent_mo_or_dn=mo, name=vlan_name, default_net="no") handle.add_mo(eth_if) if not eth_if: LOG.error( , {: vlan_name, : vnic_template_full_path}) return False handle.commit() return True except Exception as e: return self._handle_ucsm_exception(e, , vlan_id, ucsm_ip)
Updates VNIC Template with the vlan_id.
27,638
def in_special_context(node): global p0, p1, p2, pats_built if not pats_built: p0 = patcomp.compile_pattern(p0) p1 = patcomp.compile_pattern(p1) p2 = patcomp.compile_pattern(p2) pats_built = True patterns = [p0, p1, p2] for pattern, parent in zip(patterns, attr_chain(node, "parent")): results = {} if pattern.match(parent, results) and results["node"] is node: return True return False
Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests.
27,639
def pre_readline(self): if self.rl_do_indent: self.readline.insert_text(self._indent_current_str()) if self.rl_next_input is not None: self.readline.insert_text(self.rl_next_input) self.rl_next_input = None
readline hook to be used at the start of each line. Currently it handles auto-indent only.
27,640
def _compute_jsonclass(obj): module_name = inspect.getmodule(obj).__name__ json_class = obj.__class__.__name__ if module_name not in ("", "__main__"): json_class = "{0}.{1}".format(module_name, json_class) return [json_class, []]
Compute the content of the __jsonclass__ field for the given object :param obj: An object :return: The content of the __jsonclass__ field
27,641
def get_offsets(self): return dict( header_start=self._info[], data_start=self._info[], data_end=self._info[], )
returns ------- a dictionary with these entries header_start: byte offset from beginning of the file to the start of the header data_start: byte offset from beginning of the file to the start of the data section data_end: byte offset from beginning of the file to the end of the data section Note these are also in the information dictionary, which you can access with get_info()
27,642
def set_scf_algorithm_and_iterations(self, algorithm="diis", iterations=50): available_algorithms = {"diis", "dm", "diis_dm", "diis_gdm", "gdm", "rca", "rca_diis", "roothaan"} if algorithm.lower() not in available_algorithms: raise ValueError("Algorithm " + algorithm + " is not available in QChem") self.params["rem"]["scf_algorithm"] = algorithm.lower() self.params["rem"]["max_scf_cycles"] = iterations
Set algorithm used for converging SCF and max number of SCF iterations. Args: algorithm: The algorithm used for converging SCF. (str) iterations: The max number of SCF iterations. (Integer)
27,643
def plot(self, data, color=, symbol=None, line_kind=, width=1., marker_size=10., edge_color=, face_color=, edge_width=1., title=None, xlabel=None, ylabel=None): self._configure_2d() line = scene.LinePlot(data, connect=, color=color, symbol=symbol, line_kind=line_kind, width=width, marker_size=marker_size, edge_color=edge_color, face_color=face_color, edge_width=edge_width) self.view.add(line) self.view.camera.set_range() self.visuals.append(line) if title is not None: self.title.text = title if xlabel is not None: self.xlabel.text = xlabel if ylabel is not None: self.ylabel.text = ylabel return line
Plot a series of data using lines and markers Parameters ---------- data : array | two arrays Arguments can be passed as ``(Y,)``, ``(X, Y)`` or ``np.array((X, Y))``. color : instance of Color Color of the line. symbol : str Marker symbol to use. line_kind : str Kind of line to draw. For now, only solid lines (``'-'``) are supported. width : float Line width. marker_size : float Marker size. If `size == 0` markers will not be shown. edge_color : instance of Color Color of the marker edge. face_color : instance of Color Color of the marker face. edge_width : float Edge width of the marker. title : str | None The title string to be displayed above the plot xlabel : str | None The label to display along the bottom axis ylabel : str | None The label to display along the left axis. Returns ------- line : instance of LinePlot The line plot. See also -------- marker_types, LinePlot
27,644
def train(self, token, tag, previous=None, next=None): self._classifier.train(self._v(token, previous, next), type=tag)
Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples.
27,645
def validate_version(self, service_id, version_number): content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number)) return self._status(content)
Validate the version for a particular service and version.
27,646
def closest_leaf_to_root(self): best = (None,float()); d = dict() for node in self.traverse_preorder(): if node.edge_length is None: d[node] = 0 else: d[node] = node.edge_length if not node.is_root(): d[node] += d[node.parent] if node.is_leaf() and d[node] < best[1]: best = (node,d[node]) return best
Return the leaf that is closest to the root and the corresponding distance. Edges with no length will be considered to have a length of 0 Returns: ``tuple``: First value is the closest leaf to the root, and second value is the corresponding distance
27,647
def group_by(what, by): return proso.dict.group_keys_by_values({x: by(x) for x in what})
Take a list and apply the given function on each its value, then group the values by the function results. .. testsetup:: from proso.list import group_by .. doctest:: >>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0) {False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]} Args: what: a list which will be transformed by: a function which will be applied on values of the given list Returns: dict: values groupped by the function results
27,648
def configuration_ES(t0: date, t1: Optional[date] = None, steps_per_day: int = None) -> Tuple[np.ndarray, np.ndarray]: if steps_per_day is None: steps_per_day = 1 dt: float = 1.0 / float(steps_per_day) if t1 is not None: jd0: int = julian_day(t0) jd1: int = julian_day(t1) else: jd0: int = julian_day(t0) jd1: int = jd0 + dt jd: np.ndarray = np.arange(jd0, jd1, dt) sun_id: int = jpl_body_id[] pos_sun, vel_sun = jpl_kernel[0, sun_id].compute_and_differentiate(jd) earth_id: int = jpl_body_id[] pos_earth, vel_earth = jpl_kernel[0, earth_id].compute_and_differentiate(jd) q = np.vstack([pos_sun, pos_earth]).T * km2m v = np.vstack([vel_sun, vel_earth]).T * (km2m / day2sec) return q, v
Get the positions and velocities of the earth and sun from date t0 to t1. Returned as a tuple q, v q: Nx3 array of positions (x, y, z) in the J2000.0 coordinate frame.
27,649
def populateFromFile(self, dataUrl): self._dbFilePath = dataUrl self._db = Gff3DbBackend(self._dbFilePath)
Populates the instance variables of this FeatureSet from the specified data URL.
27,650
def get_alter_table_sql(self, diff): sql = self._get_simple_alter_table_sql(diff) if sql is not False: return sql from_table = diff.from_table if not isinstance(from_table, Table): raise DBALException( "SQLite platform requires for the alter table the table diff " "referencing the original table" ) table = from_table.clone() columns = OrderedDict() old_column_names = OrderedDict() new_column_names = OrderedDict() column_sql = [] for column_name, column in table.get_columns().items(): column_name = column_name.lower() columns[column_name] = column old_column_names[column_name] = column.get_quoted_name(self) new_column_names[column_name] = column.get_quoted_name(self) for column_name, column in diff.removed_columns.items(): column_name = column_name.lower() if column_name in columns: del columns[column_name] del old_column_names[column_name] del new_column_names[column_name] for old_column_name, column in diff.renamed_columns.items(): old_column_name = old_column_name.lower() if old_column_name in columns: del columns[old_column_name] columns[column.get_name().lower()] = column if old_column_name in new_column_names: new_column_names[old_column_name] = column.get_quoted_name(self) for old_column_name, column_diff in diff.changed_columns.items(): if old_column_name in columns: del columns[old_column_name] columns[column_diff.column.get_name().lower()] = column_diff.column if old_column_name in new_column_names: new_column_names[old_column_name] = column_diff.column.get_quoted_name( self ) for column_name, column in diff.added_columns.items(): columns[column_name.lower()] = column table_sql = [] data_table = Table("__temp__" + table.get_name()) new_table = Table( table.get_quoted_name(self), columns, self._get_primary_index_in_altered_table(diff), self._get_foreign_keys_in_altered_table(diff), table.get_options(), ) new_table.add_option("alter", True) sql = self.get_pre_alter_table_index_foreign_key_sql(diff) sql.append( "CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s" % ( data_table.get_quoted_name(self), ", ".join(old_column_names.values()), table.get_quoted_name(self), ) ) sql.append(self.get_drop_table_sql(from_table)) sql += self.get_create_table_sql(new_table) sql.append( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( new_table.get_quoted_name(self), ", ".join(new_column_names.values()), ", ".join(old_column_names.values()), data_table.get_name(), ) ) sql.append(self.get_drop_table_sql(data_table)) sql += self.get_post_alter_table_index_foreign_key_sql(diff) return sql
Get the ALTER TABLE SQL statement :param diff: The table diff :type diff: orator.dbal.table_diff.TableDiff :rtype: list
27,651
def is_transition_metal(self): ns = list(range(21, 31)) ns.extend(list(range(39, 49))) ns.append(57) ns.extend(list(range(72, 81))) ns.append(89) ns.extend(list(range(104, 113))) return self.Z in ns
True if element is a transition metal.
27,652
def getReffs(self, level=1, subreference=None) -> CtsReferenceSet: level += self.depth if not subreference: subreference = self.reference return self.textObject.getValidReff(level, reference=subreference)
Reference available at a given level :param level: Depth required. If not set, should retrieve first encountered level (1 based). 0 retrieves inside a range :param subreference: Subreference (optional) :returns: List of levels
27,653
def getAutoStartEnabled(self): command = settings = self.sendCommand(command) flags = int(settings[2], 16) return not (flags & 0x0040)
Returns True if enabled, False if disabled
27,654
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None: grammar_dictionary["string_set_vals"] = [, ] grammar_dictionary["value"].remove() grammar_dictionary["value"].remove() grammar_dictionary["limit"] = [, ] grammar_dictionary["expr"][1] = del grammar_dictionary["string"] del grammar_dictionary["number"]
Variables can be treated as numbers or strings if their type can be inferred - however, that can be difficult, so instead, we can just treat them all as values and be a bit looser on the typing we allow in our grammar. Here we just remove all references to number and string from the grammar, replacing them with value.
27,655
def dns_compress(pkt): if DNS not in pkt: raise Scapy_Exception("Can only compress DNS layers") pkt = pkt.copy() dns_pkt = pkt.getlayer(DNS) build_pkt = raw(dns_pkt) def field_gen(dns_pkt): for lay in [dns_pkt.qd, dns_pkt.an, dns_pkt.ns, dns_pkt.ar]: if lay is None: continue current = lay while not isinstance(current, NoPayload): if isinstance(current, InheritOriginDNSStrPacket): for field in current.fields_desc: if isinstance(field, DNSStrField) or \ (isinstance(field, MultipleTypeField) and current.type in [2, 5, 12]): dat = current.getfieldval(field.name) yield current, field.name, dat current = current.payload def possible_shortens(dat): yield dat for x in range(1, dat.count(b".")): yield dat.split(b".", x)[x] data = {} burned_data = 0 for current, name, dat in field_gen(dns_pkt): for part in possible_shortens(dat): encoded = dns_encode(part, check_built=True) if part not in data: data[part].append((current, name)) burned_data += len(encoded) - 2 break for ck in data: replacements = data[ck] replace_pointer = replacements.pop(0)[2] for rep in replacements: val = rep[0].getfieldval(rep[1]) assert val.endswith(ck) kept_string = dns_encode(val[:-len(ck)], check_built=True)[:-1] new_val = kept_string + replace_pointer rep[0].setfieldval(rep[1], new_val) try: del(rep[0].rdlen) except AttributeError: pass if not isinstance(pkt, DNS) and pkt.getlayer(DNS).underlayer: pkt.getlayer(DNS).underlayer.remove_payload() return pkt / dns_pkt return dns_pkt
This function compresses a DNS packet according to compression rules.
27,656
def replace_mutating_webhook_configuration(self, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_mutating_webhook_configuration_with_http_info(name, body, **kwargs) else: (data) = self.replace_mutating_webhook_configuration_with_http_info(name, body, **kwargs) return data
replace the specified MutatingWebhookConfiguration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_mutating_webhook_configuration(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the MutatingWebhookConfiguration (required) :param V1beta1MutatingWebhookConfiguration body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1MutatingWebhookConfiguration If the method is called asynchronously, returns the request thread.
27,657
def read(self, address, length_bytes, x, y, p=0): connection = self._get_connection(x, y) return connection.read(self.scp_data_length, self.scp_window_size, x, y, p, address, length_bytes)
Read a bytestring from an address in memory. Parameters ---------- address : int The address at which to start reading the data. length_bytes : int The number of bytes to read from memory. Large reads are transparently broken into multiple SCP read commands. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring.
27,658
def report(self): message = print(message.format(self.elapsed_time, self.frames, self.step))
Prints in standard output report about animation rendering. Namely, it prints seconds spent, number of frames and step size that is used in functional animation.
27,659
def transformer_symshard_base(): hparams = common_hparams.basic_params1() hparams.hidden_size = 256 hparams.batch_size = 2048 hparams.max_length = 0 hparams.layer_prepostprocess_dropout = 0.2 hparams.add_hparam("attention_dropout", 0.1) hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("relu_dropout_broadcast_dims", "1") hparams.layer_prepostprocess_dropout = 0.1 hparams.layer_prepostprocess_dropout_broadcast_dims = "1" hparams.label_smoothing = 0.1 hparams.clip_grad_norm = 0. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.initializer_gain = 1.0 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.shared_embedding_and_softmax_weights = True hparams.no_data_parallelism = True hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.add_hparam("filter_size", 1280) hparams.add_hparam("mix_fraction", 0.5) hparams.add_hparam("multihead_attention_num_heads", 4) hparams.add_hparam("multihead_attention_key_channels", 0) hparams.add_hparam("multihead_attention_value_channels", 0) hparams.add_hparam("pos", "timing") hparams.add_hparam( "encoder_layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 6 + "n,d") hparams.add_hparam( "decoder_layers", ("n,att,m,d,a," "n,enc-att,m,d,a," "n,ffn,m,d,a,") * 6 + "n,d") hparams.add_hparam("num_model_shards", 8) return hparams
Set of hyperparameters.
27,660
def color_range(color, N=20): color = normalize(color) org = color color = hex_to_hsv(color) HSV_tuples = [(color[0], x, color[2]) for x in np.arange(0, 1, 2.0 / N)] HSV_tuples.extend([(color[0], color[1], x) for x in np.arange(0, 1, 2.0 / N)]) hex_out = [] for c in HSV_tuples: c = colorsys.hsv_to_rgb(*c) c = [int(_ * 255) for _ in c] hex_out.append(" if org not in hex_out: hex_out.append(org) hex_out.sort() return hex_out
Generates a scale of colours from a base colour Parameters: ----------- color : string Color representation in hex N : int number of colours to generate Example: color_range('#ff9933',20)
27,661
def _get_app_module_path(module_label): app_name = module_label.rsplit(, 1)[0] for app in settings.INSTALLED_APPS: if app.endswith( + app_name) or app == app_name: return app return None
Given a module label, loop over the apps specified in the INSTALLED_APPS to find the corresponding application module path.
27,662
async def get(self): self._parent._check_closing() async with self._parent._async_not_empty: self._parent._sync_mutex.acquire() locked = True try: do_wait = True while do_wait: do_wait = self._parent._qsize() == 0 if do_wait: locked = False self._parent._sync_mutex.release() await self._parent._async_not_empty.wait() self._parent._sync_mutex.acquire() locked = True item = self._parent._get() self._parent._async_not_full.notify() self._parent._notify_sync_not_full() return item finally: if locked: self._parent._sync_mutex.release()
Remove and return an item from the queue. If queue is empty, wait until an item is available. This method is a coroutine.
27,663
def _retrieve_device_cache(proxy=None): global DEVICE_CACHE if not DEVICE_CACHE: if proxy and salt.utils.napalm.is_proxy(__opts__): if in proxy: DEVICE_CACHE = proxy[]() elif not proxy and salt.utils.napalm.is_minion(__opts__): DEVICE_CACHE = salt.utils.napalm.get_device(__opts__) return DEVICE_CACHE
Loads the network device details if not cached already.
27,664
def wait(self): self.log.debug("acquiring wait lock to wait for completion") with self._wait_lock: self.log.debug("got wait lock") witnessed_end = False if self.exit_code is None: self.log.debug("exit code not set, waiting on pid") pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) self.exit_code = handle_process_exit_code(exit_code) witnessed_end = True else: self.log.debug("exit code already set (%d), no need to wait", self.exit_code) self._quit_threads.set() if self._input_thread: self._input_thread.join() timer = threading.Timer(2.0, self._stop_output_event.set) timer.start() self._output_thread.join() timer.cancel() self._background_thread.join() if witnessed_end: self._process_just_ended() return self.exit_code
waits for the process to complete, handles the exit code
27,665
def _lookup_node_parent(self, node): if not node in self.CACHED_ANCESTRY_DICT: ancestry_dict = dict( (c, p) for p in self._impl_document.getiterator() for c in p) self.CACHED_ANCESTRY_DICT = ancestry_dict return self.CACHED_ANCESTRY_DICT[node]
Return the parent of the given node, based on an internal dictionary mapping of child nodes to the child's parent required since ElementTree doesn't make info about node ancestry/parentage available.
27,666
def accept( self ): if ( not self.uiNameTXT.text() ): QMessageBox.information(self, , ) return prof = self.profile() if ( not prof ): prof = XViewProfile() prof.setName(nativestring(self.uiNameTXT.text())) prof.setVersion(self.uiVersionSPN.value()) prof.setDescription(nativestring(self.uiDescriptionTXT.toPlainText())) prof.setIcon(self.uiIconBTN.filepath()) super(XViewProfileDialog, self).accept()
Saves the data to the profile before closing.
27,667
def slughifi(value, overwrite_char_map={}): if type(value) != text_type: value = value.decode(, ) char_map.update(overwrite_char_map) value = re.sub(, replace_char, value) value = slugify(value) return value.encode(, ).decode()
High Fidelity slugify - slughifi.py, v 0.1 Examples : >>> text = 'C\'est déjà l\'été.' >>> slughifi(text) 'cest-deja-lete' >>> slughifi(text, overwrite_char_map={u'\': '-',}) 'c-est-deja-l-ete' >>> slughifi(text, do_slugify=False) "C'est deja l'ete." # Normal slugify removes accented characters >>> slugify(text) 'cest-dj-lt'
27,668
def WriteFixedString(self, value, length): towrite = value.encode() slen = len(towrite) if slen > length: raise Exception("string longer than fixed length: %s " % length) self.WriteBytes(towrite) diff = length - slen while diff > 0: self.WriteByte(0) diff -= 1
Write a string value to the stream. Args: value (str): value to write to the stream. length (int): length of the string to write.
27,669
def set(self, key, value): if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
Set a configuration property.
27,670
def int_flags(flags, mapper=const.PERM_STRING_MAP): r = 0 if not flags: return r if isinstance(flags, six.integer_types): return flags if not isinstance(flags, six.string_types): raise TypeError("`flags` needs to be a string or integer type") for f in flags: for f_i, f_s in mapper: if f_s == f: r = r | f_i return r
Converts string permission flags into integer permission flags as specified in const.PERM_STRING_MAP Arguments: - flags <str>: one or more flags For example: "crud" or "ru" or "r" - mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping int permission flag to string permission flag. If not specified will default to const.PERM_STRING_MAP. Returns: - int
27,671
def _walk(self, target, visitor): visited = set() def walk(current): if current not in visited: visited.add(current) keep_going = visitor(current) if keep_going: for dependency in self.dependencies(current): walk(dependency) walk(target)
Walks the dependency graph for the given target. :param target: The target to start the walk from. :param visitor: A function that takes a target and returns `True` if its dependencies should also be visited.
27,672
def filter(self, info, releases): for version in list(releases.keys()): if any(pattern.match(version) for pattern in self.patterns): del releases[version]
Remove all release versions that match any of the specificed patterns.
27,673
def list(self): <GUID>TitleKBGUIDDescriptionDownloadedInstalledMandatoryUserInputEULAAcceptedSeverityNeedsRebootRebootBehaviorCategories<category 1><category 2> if self.count() == 0: return log.debug() results = {} for update in self.updates: results[update.Identity.UpdateID] = { : update.Identity.UpdateID, : six.text_type(update.Title), : self.update_types[update.Type], : update.Description, : bool(update.IsDownloaded), : bool(update.IsInstalled), : bool(update.IsMandatory), : bool(update.EulaAccepted), : bool(update.RebootRequired), : six.text_type(update.MsrcSeverity), : bool(update.InstallationBehavior.CanRequestUserInput), : self.reboot_behavior[ update.InstallationBehavior.RebootBehavior], : [ + item for item in update.KBArticleIDs], : [item.Name for item in update.Categories] } return results
Create a dictionary with the details for the updates in the collection. Returns: dict: Details about each update .. code-block:: cfg List of Updates: {'<GUID>': {'Title': <title>, 'KB': <KB>, 'GUID': <the globally unique identifier for the update> 'Description': <description>, 'Downloaded': <has the update been downloaded>, 'Installed': <has the update been installed>, 'Mandatory': <is the update mandatory>, 'UserInput': <is user input required>, 'EULAAccepted': <has the EULA been accepted>, 'Severity': <update severity>, 'NeedsReboot': <is the update installed and awaiting reboot>, 'RebootBehavior': <will the update require a reboot>, 'Categories': [ '<category 1>', '<category 2>', ...] } } Code Example: .. code-block:: python import salt.utils.win_update updates = salt.utils.win_update.Updates() updates.list()
27,674
def clean_pe_name(self, nlog, root): use_output_name = getattr(config, , {}).get(, False) if use_output_name: name = re.search(r, nlog) else: name = re.search(r, nlog) if not name: return None name = name.group(1) name = self.clean_s_name(name, root) return name
additional name cleaning for paired end data
27,675
def process_link(self, env, refnode, has_explicit_title, title, target): refnode[] = env.temp_data.get() refnode[] = env.temp_data.get() if not has_explicit_title: title = title.lstrip() target = target.lstrip() if title[0:1] == : title = title[1:] dot = title.rfind() if dot != -1: title = title[dot+1:] if target[0:1] == : target = target[1:] refnode[] = True return title, target
Called after parsing title and target text, and creating the reference node. Alter the reference node and return it with chapel module and class information, if relevant.
27,676
def block_jids(self, jids_to_block): yield from self._check_for_blocking() if not jids_to_block: return cmd = blocking_xso.BlockCommand(jids_to_block) iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=cmd, ) yield from self.client.send(iq)
Add the JIDs in the sequence `jids_to_block` to the client's blocklist.
27,677
def generate(bits, progress_func=None): signing_key = ECDSA.generate() key = ECDSAKey(vals=(signing_key, signing_key.get_verifying_key())) return key
Generate a new private RSA key. This factory function can be used to generate a new host key or authentication key. @param bits: number of bits the generated key should be. @type bits: int @param progress_func: an optional function to call at key points in key generation (used by C{pyCrypto.PublicKey}). @type progress_func: function @return: new private key @rtype: L{RSAKey}
27,678
def get_display_opts(options, argv = sys.argv): from Xlib import display, Xatom import os name = os.path.splitext(os.path.basename(argv[0]))[0] optdb = ResourceDB() leftargv = optdb.getopt(name, argv[1:], options) dname = optdb.get(name + , name + , None) d = display.Display(dname) rdbstring = d.screen(0).root.get_full_property(Xatom.RESOURCE_MANAGER, Xatom.STRING) if rdbstring: data = rdbstring.value else: data = None db = ResourceDB(string = data) db.update(optdb) return d, name, db, leftargv
display, name, db, args = get_display_opts(options, [argv]) Parse X OPTIONS from ARGV (or sys.argv if not provided). Connect to the display specified by a *.display resource if one is set, or to the default X display otherwise. Extract the RESOURCE_MANAGER property and insert all resources from ARGV. The four return values are: DISPLAY -- the display object NAME -- the application name (the filname of ARGV[0]) DB -- the created resource database ARGS -- any remaining arguments
27,679
def _insert_continuation_prompt(self, cursor): if self._continuation_prompt_html is None: self._insert_plain_text(cursor, self._continuation_prompt) else: self._continuation_prompt = self._insert_html_fetching_plain_text( cursor, self._continuation_prompt_html)
Inserts new continuation prompt using the specified cursor.
27,680
def reqContractDetails(self, contract: Contract) -> List[ContractDetails]: return self._run(self.reqContractDetailsAsync(contract))
Get a list of contract details that match the given contract. If the returned list is empty then the contract is not known; If the list has multiple values then the contract is ambiguous. The fully qualified contract is available in the the ContractDetails.contract attribute. This method is blocking. https://interactivebrokers.github.io/tws-api/contract_details.html Args: contract: The contract to get details for.
27,681
def execute(self, conn, logical_file_name, transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteDupicates. Expects db connection from upper layer.") print(self.sql) self.dbi.processData(self.sql, logical_file_name, conn, transaction)
simple execute
27,682
def normalize_nfc(txt): if isinstance(txt, bytes): txt = txt.decode() return unicodedata.normalize("NFC", txt).encode()
Normalize message to NFC and return bytes suitable for protobuf. This seems to be bitcoin-qt standard of doing things.
27,683
def mount_point(cls, file_path): mount = None for mp in cls.mounts(): mp_path = mp.path() if file_path.startswith(mp_path) is True: if mount is None or len(mount.path()) <= len(mp_path): mount = mp return mount
Return mount point that, where the given path is reside on :param file_path: target path to search :return: WMountPoint or None (if file path is outside current mount points)
27,684
def MergeAllSummaries(period=0, run_alone=False, key=None): if key is None: key = tf.GraphKeys.SUMMARIES period = int(period) if run_alone: return MergeAllSummaries_RunAlone(period, key) else: return MergeAllSummaries_RunWithOp(period, key)
This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``.
27,685
def take_action(name=None, call=None, command=None, data=None, method=, location=DEFAULT_LOCATION): action caller = inspect.stack()[1][3] if call != : raise SaltCloudSystemExit( ) if data: data = salt.utils.json.dumps(data) ret = [] try: ret = query(command=command, data=data, method=method, location=location) log.info(, caller, name) except Exception as exc: if in six.text_type(exc): ret = [200, {}] else: log.error( , caller, name, exc, exc_info_on_loglevel=logging.DEBUG ) ret = [100, {}] return ret
take action call used by start,stop, reboot :param name: name given to the machine :param call: call value in this case is 'action' :command: api path :data: any data to be passed to the api, must be in json format :method: GET,POST,or DELETE :location: data center to execute the command on :return: true if successful
27,686
def get_sql(self): test_method = [ self.is_time, self.is_date, self.is_datetime, self.is_decimal, self.is_year, self.is_tinyint, self.is_smallint, self.is_mediumint, self.is_int, self.is_bigint, self.is_tinytext, self.is_varchar, self.is_mediumtext, self.is_longtext, ] for method in test_method: if method(): return self.sql
Retrieve the data type for a data record.
27,687
def get_or_exception(cls, id): obj = cls.get(id) if obj is None: raise ModelNotFoundError() return obj
Tries to retrieve an instance of this model from the database or raises an exception in case of failure
27,688
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): store_samples, fields = _prep_fields_param(fields) fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) if log is not None: it = _chunk_iter_progress(it, log, prefix=) chunks = [d[0] for d in it] if chunks: output = dict() if len(samples) > 0 and store_samples: output[] = samples keys = sorted(chunks[0].keys()) for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found.
27,689
def visit_str(self, node, parent): return nodes.Const( node.s, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, )
visit a String/Bytes node by returning a fresh instance of Const
27,690
def orderrun_detail(backend, kitchen, summary, nodestatus, runstatus, log, timing, test, all_things, order_id, order_run_id, disp_order_id, disp_order_run_id): err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen) if use_kitchen is None: raise click.ClickException(err_str) pd = dict() if all_things: pd[] = True pd[] = True pd[] = True pd[] = True pd[] = True if summary: pd[] = True if log: pd[] = True if timing: pd[] = True if test: pd[] = True if nodestatus: pd[] = True if runstatus: pd[] = True if disp_order_id: pd[] = True if disp_order_run_id: pd[] = True if not runstatus and \ not all_things and \ not test and \ not timing and \ not log and \ not nodestatus and \ not summary and \ not disp_order_id and \ not disp_order_run_id: pd[] = True if order_id is not None and order_run_id is not None: raise click.ClickException("Cannot specify both the Order Id and the OrderRun Id") if order_id is not None: pd[DKCloudCommandRunner.ORDER_ID] = order_id.strip() elif order_run_id is not None: pd[DKCloudCommandRunner.ORDER_RUN_ID] = order_run_id.strip() check_and_print(DKCloudCommandRunner.orderrun_detail(backend.dki, use_kitchen, pd))
Display information about an Order-Run
27,691
def create_arguments(primary, pyfunction, call_node, scope): args = list(call_node.args) args.extend(call_node.keywords) called = call_node.func if _is_method_call(primary, pyfunction) and \ isinstance(called, ast.Attribute): args.insert(0, called.value) return Arguments(args, scope)
A factory for creating `Arguments`
27,692
def _create_pax_generic_header(cls, pax_headers, type, encoding): binary = False for keyword, value in pax_headers.items(): try: value.encode("utf8", "strict") except UnicodeEncodeError: binary = True break records = b"" if binary: records += b"21 hdrcharset=BINARY\n" for keyword, value in pax_headers.items(): keyword = keyword.encode("utf8") if binary: n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = POSIX_MAGIC return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ cls._create_payload(records)
Return a POSIX.1-2008 extended or global header sequence that contains a list of keyword, value pairs. The values must be strings.
27,693
def stream(self, item, *, device_id=None, quality=, session_token=None): if device_id is None: device_id = self.device_id stream_url = self.stream_url( item, device_id=device_id, quality=quality, session_token=session_token ) response = self.session.get(stream_url) audio = response.content return audio
Get MP3 stream of a podcast episode, library song, station_song, or store song. Note: Streaming requires a ``device_id`` from a valid, linked mobile device. Parameters: item (str): A podcast episode, library song, station_song, or store song. A Google Music subscription is required to stream store songs. device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps). Default: ``'hi'``. session_token (str): Session token from a station dict required for unsubscribed users to stream a station song. station['sessionToken'] as returend by :meth:`station` only exists for free accounts. Returns: bytes: An MP3 file.
27,694
def bures_angle(rho0: Density, rho1: Density) -> float: return np.arccos(np.sqrt(fidelity(rho0, rho1)))
Return the Bures angle between mixed quantum states Note: Bures angle cannot be calculated within the tensor backend.
27,695
def doane(data): from scipy.stats import skew n = len(data) sigma = np.sqrt(6. * (n - 2.) / (n + 1.) / (n + 3.)) return 1 + np.log2(n) + \ np.log2(1 + np.abs(skew(data)) / sigma)
Modified Doane modified
27,696
def use_model_attr(attr): def use_model_validator(instance, attribute, value): getattr(instance, attr)(instance, attribute, value) return use_model_validator
Use the validator set on a separate attribute on the class.
27,697
def anim_to_html(anim, fps=None, embed_frames=True, default_mode=): if fps is None and hasattr(anim, ): fps = 1000. / anim._interval plt.close(anim._fig) if hasattr(anim, "_html_representation"): return anim._html_representation else: anim.save(f.name, writer=HTMLWriter(fps=fps, embed_frames=embed_frames, default_mode=default_mode)) html = open(f.name).read() anim._html_representation = html return html
Generate HTML representation of the animation
27,698
def _uminumaxvmin(self,*args,**kwargs): delta= kwargs.pop(,self._delta) if ((self._c and not ( in kwargs and not kwargs[]))\ or (ext_loaded and (( in kwargs and kwargs[])))) \ and _check_c(self._pot): if len(args) == 5: R,vR,vT, z, vz= args elif len(args) == 6: R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz if isinstance(R,float): R= nu.array([R]) vR= nu.array([vR]) vT= nu.array([vT]) z= nu.array([z]) vz= nu.array([vz]) Lz= R*vT if self._useu0: if in kwargs: u0= nu.asarray(kwargs[]) else: E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii]) +vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))]) u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\ E,Lz,self._pot,delta)[0] kwargs.pop(,None) else: u0= None umin, umax, vmin, err= \ actionAngleStaeckel_c.actionAngleUminUmaxVminStaeckel_c(\ self._pot,delta,R,vR,vT,z,vz,u0=u0) if err == 0: return (umin,umax,vmin) else: raise RuntimeError("C-code for calculation actions failed; try with c=False") else: if in kwargs and kwargs[] and not self._c: warnings.warn("C module not used because potential does not have a C implementation",galpyWarning) kwargs.pop(,None) if (len(args) == 5 or len(args) == 6) \ and isinstance(args[0],nu.ndarray): oumin= nu.zeros((len(args[0]))) oumax= nu.zeros((len(args[0]))) ovmin= nu.zeros((len(args[0]))) for ii in range(len(args[0])): if len(args) == 5: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii]) elif len(args) == 6: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii],args[5][ii]) tkwargs= copy.copy(kwargs) try: tkwargs[]= delta[ii] except TypeError: tkwargs[]= delta tumin,tumax,tvmin= self._uminumaxvmin(\ *targs,**tkwargs) oumin[ii]= tumin oumax[ii]= tumax ovmin[ii]= tvmin return (oumin,oumax,ovmin) else: aASingle= actionAngleStaeckelSingle(*args,pot=self._pot, delta=delta) umin, umax= aASingle.calcUminUmax() vmin= aASingle.calcVmin() return (umin,umax,vmin)
NAME: _uminumaxvmin PURPOSE: evaluate u_min, u_max, and v_min INPUT: Either: a) R,vR,vT,z,vz b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well c= True/False; overrides the object's c= keyword to use C or not OUTPUT: (umin,umax,vmin) HISTORY: 2017-12-12 - Written - Bovy (UofT)
27,699
def make_response(self, rv, status=200, headers=None, mime=): if not isinstance(rv, Response): resp = Response( response=rv, headers=headers, mimetype=mime, status=status ) else: resp = rv return resp
Create a response object using the :class:`flask.Response` class. :param rv: Response value. If the value is not an instance of :class:`werkzeug.wrappers.Response` it will be converted into a Response object. :param status: specify the HTTP status code for this response. :param mime: Specify the mimetype for this request. :param headers: Specify dict of headers for the response.