Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
14,500
def insert(self, resource, value): parts = resource_parts_re.split(resource) if parts[-1] == : return self.lock.acquire() db = self.db for i in range(1, len(parts), 2): if parts[i - 1] not in db: db[parts[i - 1]] = ({}, {}) if in parts[i]: db = db[parts[i - 1]][1] else: db = db[parts[i - 1]][0] if parts[-1] in db: db[parts[-1]] = db[parts[-1]][:2] + (value, ) else: db[parts[-1]] = ({}, {}, value) self.lock.release()
insert(resource, value) Insert a resource entry into the database. RESOURCE is a string and VALUE can be any Python value.
14,501
def isPointVisible(self, x, y): class POINT(ctypes.Structure): _fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)] pt = POINT() pt.x = x pt.y = y MONITOR_DEFAULTTONULL = 0 hmon = self._user32.MonitorFromPoint(pt, MONITOR_DEFAULTTONULL) if hmon == 0: return False return True
Checks if a point is visible on any monitor.
14,502
def setInverted(self, state): collapsed = self.isCollapsed() self._inverted = state if self.isCollapsible(): self.setCollapsed(collapsed)
Sets whether or not to invert the check state for collapsing. :param state | <bool>
14,503
def to_profile_info(self, serialize_credentials=False): result = { : self.profile_name, : self.target_name, : self.config.to_dict(), : self.threads, : self.credentials.incorporate(), } if serialize_credentials: result[] = result[].serialize() return result
Unlike to_project_config, this dict is not a mirror of any existing on-disk data structure. It's used when creating a new profile from an existing one. :param serialize_credentials bool: If True, serialize the credentials. Otherwise, the Credentials object will be copied. :returns dict: The serialized profile.
14,504
def Diag(a): r = np.zeros(2 * a.shape, dtype=a.dtype) for idx, v in np.ndenumerate(a): r[2 * idx] = v return r,
Diag op.
14,505
def failure_raiser(*validation_func, **kwargs ): failure_type, help_msg = pop_kwargs(kwargs, [(, None), (, None)], allow_others=True) kw_context_args = kwargs main_func = _process_validation_function_s(list(validation_func)) return _failure_raiser(main_func, failure_type=failure_type, help_msg=help_msg, **kw_context_args)
This function is automatically used if you provide a tuple `(<function>, <msg>_or_<Failure_type>)`, to any of the methods in this page or to one of the `valid8` decorators. It transforms the provided `<function>` into a failure raiser, raising a subclass of `Failure` in case of failure (either not returning `True` or raising an exception) :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param failure_type: a subclass of `WrappingFailure` that should be raised in case of failure :param help_msg: a string help message for the raised `WrappingFailure`. Optional (default = WrappingFailure with no help message). :param kw_context_args :return:
14,506
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): if (tf.nest.is_nested(dtype) or tf.nest.is_nested(dtype_hint)): if dtype is None: fn = lambda v, pd: tf.convert_to_tensor(v, dtype_hint=pd, name=name) return tf.nest.map_structure(fn, value, dtype_hint) elif dtype_hint is None: fn = lambda v, d: tf.convert_to_tensor(v, dtype=d, name=name) return tf.nest.map_structure(fn, value, dtype_hint) else: fn = lambda v, d, pd: tf.convert_to_tensor( v, dtype=d, dtype_hint=pd, name=name) return tf.nest.map_structure(fn, value, dtype, dtype_hint) return tf.convert_to_tensor( value=value, dtype=dtype, dtype_hint=dtype_hint, name=name)
Converts the given `value` to a (structure of) `Tensor`. This function converts Python objects of various types to a (structure of) `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: Args: value: An object whose structure matches that of `dtype ` and/or `dtype_hint` and for which each leaf has a registered `Tensor` conversion function. dtype: Optional (structure of) element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional (structure of) element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: tensor: A (structure of) `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
14,507
def save_to_file(self, path, filename, **params): url = ensure_trailing_slash(self.url + path.lstrip()) content = self._request(, url, params=params).content with open(filename, ) as f: f.write(content)
Saves binary content to a file with name filename. filename should include the appropriate file extension, such as .xlsx or .txt, e.g., filename = 'sample.xlsx'. Useful for downloading .xlsx files.
14,508
def time(host=None, port=None, db=None, password=None): * server = _connect(host, port, db, password) return server.time()[0]
Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time
14,509
def K(self, parm): return ARD_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10)
Returns the Gram Matrix Parameters ---------- parm : np.ndarray Parameters for the Gram Matrix Returns ---------- - Gram Matrix (np.ndarray)
14,510
def _astorestr(ins): output = _addr(ins.quad[1]) op = ins.quad[2] indirect = op[0] == if indirect: op = op[1:] immediate = op[0] == if immediate: op = op[1:] temporal = op[0] != if not temporal: op = op[1:] if is_int(op): op = str(int(op) & 0xFFFF) if indirect: if immediate: output.append( % op) else: output.append( % op) output.append() REQUIRES.add() else: return output
Stores a string value into a memory address. It copies content of 2nd operand (string), into 1st, reallocating dynamic memory for the 1st str. These instruction DOES ALLOW immediate strings for the 2nd parameter, starting with '#'.
14,511
def apply(self, event = None): for section in self.config.sections(): for option, o in self.config.config[section].items(): if not o[]: continue return False self.config.set(section, option, value) return True
Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config.
14,512
def send_status_response(environ, start_response, e, add_headers=None, is_head=False): status = get_http_status_string(e) headers = [] if add_headers: headers.extend(add_headers) if e in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT): start_response( status, [("Content-Length", "0"), ("Date", get_rfc1123_time())] + headers ) return [b""] if e in (HTTP_OK, HTTP_CREATED): e = DAVError(e) assert isinstance(e, DAVError) content_type, body = e.get_response_page() if is_head: body = compat.b_empty assert compat.is_bytes(body), body start_response( status, [ ("Content-Type", content_type), ("Date", get_rfc1123_time()), ("Content-Length", str(len(body))), ] + headers, ) return [body]
Start a WSGI response for a DAVError or status code.
14,513
def spec_formatter(cls, spec): " Formats the elements of an argument set appropriately" return type(spec)((k, str(v)) for (k,v) in spec.items())
Formats the elements of an argument set appropriately
14,514
def loadGmesh(filename, c="gold", alpha=1, wire=False, bc=None): if not os.path.exists(filename): colors.printc("~noentry Error in loadGmesh: Cannot find", filename, c=1) return None f = open(filename, "r") lines = f.readlines() f.close() nnodes = 0 index_nodes = 0 for i, line in enumerate(lines): if "$Nodes" in line: index_nodes = i + 1 nnodes = int(lines[index_nodes]) break node_coords = [] for i in range(index_nodes + 1, index_nodes + 1 + nnodes): cn = lines[i].split() node_coords.append([float(cn[1]), float(cn[2]), float(cn[3])]) nelements = 0 index_elements = 0 for i, line in enumerate(lines): if "$Elements" in line: index_elements = i + 1 nelements = int(lines[index_elements]) break elements = [] for i in range(index_elements + 1, index_elements + 1 + nelements): ele = lines[i].split() elements.append([int(ele[-3]), int(ele[-2]), int(ele[-1])]) poly = buildPolyData(node_coords, elements, indexOffset=1) return Actor(poly, c, alpha, wire, bc)
Reads a `gmesh` file format. Return an ``Actor(vtkActor)`` object.
14,515
def _maybe_assert_valid_concentration(self, concentration, validate_args): if not validate_args: return concentration return distribution_util.with_dependencies([ assert_util.assert_positive( concentration, message="Concentration parameter must be positive."), assert_util.assert_rank_at_least( concentration, 1, message="Concentration parameter must have >=1 dimensions."), assert_util.assert_less( 1, tf.shape(input=concentration)[-1], message="Concentration parameter must have event_size >= 2."), ], concentration)
Checks the validity of the concentration parameter.
14,516
def _create_relational_field(self, attr, options): options[] = attr.py_type options[] = not attr.is_required return EntityField, options
Creates the form element for working with entity relationships.
14,517
def format(self, indent_level, indent_size=4): name = self.format_name(, indent_size) if self.long_desc is not None: name += name += self.wrap_lines( % str(self._literal), 1, indent_size) return self.wrap_lines(name, indent_level, indent_size)
Format this verifier Returns: string: A formatted string
14,518
def after_request(self, fn): self._defer(lambda app: app.after_request(fn)) return fn
Register a function to be run after each request. Your function must take one parameter, an instance of :attr:`response_class` and return a new response object or the same (see :meth:`process_response`). As of Flask 0.7 this function might not be executed at the end of the request in case an unhandled exception occurred.
14,519
def state_create(history_id_key, table_name, collision_checker, always_set=[]): def wrap( check ): def wrapped_check( state_engine, nameop, block_id, checked_ops ): rc = check( state_engine, nameop, block_id, checked_ops ) try: assert in nameop.keys(), "Missing __preorder__" except Exception, e: log.exception(e) log.error("FATAL: missing fields") os.abort() nameop[] = table_name nameop[] = history_id_key nameop[] = True nameop[] = always_set invariant_tags = state_create_invariant_tags() for tag in invariant_tags: assert tag in nameop, "BUG: missing invariant tag " % tag for required_field in CONSENSUS_FIELDS_REQUIRED: assert required_field in nameop, .format(required_field) rc = state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker ) if rc: log.debug("COLLISION on %s " % (history_id_key, nameop[history_id_key])) rc = False else: rc = True return rc return wrapped_check return wrap
Decorator for the check() method on state-creating operations. Makes sure that: * there is a __preorder__ field set, which contains the state-creating operation's associated preorder * there is a __table__ field set, which contains the table into which to insert this state into * there is a __history_id_key__ field set, which identifies the table's primary key name * there are no unexpired, duplicate instances of this state with this history id. (i.e. if we're preordering a name that had previously expired, we need to preserve its history)
14,520
def parse_match_settings(match_settings, config: ConfigObject): match_settings.game_mode = config.get(MATCH_CONFIGURATION_HEADER, GAME_MODE) match_settings.game_map = config.get(MATCH_CONFIGURATION_HEADER, GAME_MAP) match_settings.skip_replays = config.getboolean(MATCH_CONFIGURATION_HEADER, SKIP_REPLAYS) match_settings.instant_start = config.getboolean(MATCH_CONFIGURATION_HEADER, INSTANT_START) parse_mutator_settings(match_settings.mutators, config)
Parses the matching settings modifying the match settings object. :param match_settings: :param config: :return:
14,521
def com_google_fonts_check_os2_metrics_match_hhea(ttFont): if ttFont["OS/2"].sTypoAscender != ttFont["hhea"].ascent: yield FAIL, Message("ascender", "OS/2 sTypoAscender and hhea ascent must be equal.") elif ttFont["OS/2"].sTypoDescender != ttFont["hhea"].descent: yield FAIL, Message("descender", "OS/2 sTypoDescender and hhea descent must be equal.") else: yield PASS, ("OS/2.sTypoAscender/Descender values" " match hhea.ascent/descent.")
Checking OS/2 Metrics match hhea Metrics. OS/2 and hhea vertical metric values should match. This will produce the same linespacing on Mac, GNU+Linux and Windows. Mac OS X uses the hhea values. Windows uses OS/2 or Win, depending on the OS or fsSelection bit value.
14,522
def missing_whitespace_around_operator(logical_line, tokens): r parens = 0 need_space = False prev_type = tokenize.OP prev_text = prev_end = None for token_type, text, start, end, line in tokens: if token_type in SKIP_COMMENTS: continue if text in (, ): parens += 1 elif text == : parens -= 1 if need_space: if start != prev_end: if need_space is not True and not need_space[1]: yield (need_space[0], "E225 missing whitespace around operator") need_space = False elif text == and prev_text in (, ): else prev_text not in KEYWORDS): need_space = None elif text in WS_OPTIONAL_OPERATORS: need_space = None if need_space is None: need_space = (prev_end, start != prev_end) elif need_space and start == prev_end: yield prev_end, "E225 missing whitespace around operator" need_space = False prev_type = token_type prev_text = text prev_end = end
r"""Surround operators with a single space on either side. - Always surround these binary operators with a single space on either side: assignment (=), augmented assignment (+=, -= etc.), comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), Booleans (and, or, not). - If operators with different priorities are used, consider adding whitespace around the operators with the lowest priorities. Okay: i = i + 1 Okay: submitted += 1 Okay: x = x * 2 - 1 Okay: hypot2 = x * x + y * y Okay: c = (a + b) * (a - b) Okay: foo(bar, key='word', *args, **kwargs) Okay: alpha[:-i] E225: i=i+1 E225: submitted +=1 E225: x = x /2 - 1 E225: z = x **y E226: c = (a+b) * (a-b) E226: hypot2 = x*x + y*y E227: c = a|b E228: msg = fmt%(errno, errmsg)
14,523
def clear(self): self._filters = [] self._order_by = OrderedDict() self._selects = set() self._negation = False self._attribute = None self._chain = None self._search = None return self
Clear everything :rtype: Query
14,524
def predictions_variance(df, filepath=None): df = df.filter(regex="^VAR:") by_readout = df.mean(axis=0).reset_index(level=0) by_readout.columns = [, ] by_readout[] = by_readout.Readout.map(lambda n: n[4:]) g1 = sns.factorplot(x=, y=, data=by_readout, kind=, aspect=2) for tick in g1.ax.get_xticklabels(): tick.set_rotation(90) if filepath: g1.savefig(os.path.join(filepath, )) return g1
Plots the mean variance prediction for each readout Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `VAR:` filepath: str Absolute path to a folder where to write the plots Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
14,525
def _code_line(self, line): assert self._containers container = self._containers[-1] text = line while text: if text.startswith(): r = re.match(r, text) n = len(r.group(1)) container.addElement(S(c=n)) text = text[n:] elif in text: assert not text.startswith() i = text.index() container.addElement(Span(text=text[:i])) text = text[i:] else: container.addElement(Span(text=text)) text =
Add a code line.
14,526
def resolve_aliases(self, chunks): state for idx, _ in enumerate(chunks): new_state = self.aliases.get(chunks[idx][]) if new_state is not None: chunks[idx][] = new_state
Preserve backward compatibility by rewriting the 'state' key in the low chunks if it is using a legacy type.
14,527
def _unordered_iter(self): try: rlist = self.get(0) except error.TimeoutError: pending = set(self.msg_ids) while pending: try: self._client.wait(pending, 1e-3) except error.TimeoutError: pass ready = pending.difference(self._client.outstanding) pending = pending.difference(ready) while ready: msg_id = ready.pop() ar = AsyncResult(self._client, msg_id, self._fname) rlist = ar.get() try: for r in rlist: yield r except TypeError: yield rlist else: for r in rlist: yield r
iterator for results *as they arrive*, on FCFS basis, ignoring submission order.
14,528
def is_kanji(data): data_len = len(data) if not data_len or data_len % 2: return False if _PY2: data = (ord(c) for c in data) data_iter = iter(data) for i in range(0, data_len, 2): code = (next(data_iter) << 8) | next(data_iter) if not (0x8140 <= code <= 0x9ffc or 0xe040 <= code <= 0xebbf): return False return True
\ Returns if the `data` can be encoded in "kanji" mode. :param bytes data: The data to check. :rtype: bool
14,529
def supported_alleles(self): if not in self._cache: result = set(self.allele_to_allele_specific_models) if self.allele_to_fixed_length_sequence: result = result.union(self.allele_to_fixed_length_sequence) self._cache["supported_alleles"] = sorted(result) return self._cache["supported_alleles"]
Alleles for which predictions can be made. Returns ------- list of string
14,530
def map_prop_value_as_index(prp, lst): return from_pairs(map(lambda item: (prop(prp, item), item), lst))
Returns the given prop of each item in the list :param prp: :param lst: :return:
14,531
def extend_right_to(self, window, max_size): self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
Adjust the size to make our window end where the right window begins, but don't get larger than max_size
14,532
def get_facts(self): vendor = u uptime = -1 serial_number, fqdn, os_version, hostname, domain_name = (,) * 5 show_ver = self._send_command() show_hosts = self._send_command() show_ip_int_br = self._send_command() for line in show_ver.splitlines(): if in line: hostname, uptime_str = line.split() uptime = self.parse_uptime(uptime_str) hostname = hostname.strip() if in line: _, serial_number = line.split("Processor board ID ") serial_number = serial_number.strip() if re.search(r"Cisco IOS Software", line): try: _, os_version = line.split("Cisco IOS Software, ") except ValueError: _, os_version = re.split(r"Cisco IOS Software \[.*?\], ", line) os_version = os_version.strip() elif re.search(r"IOS (tm).+Software", line): _, os_version = line.split("IOS (tm) ") os_version = os_version.strip() for line in show_hosts.splitlines(): if in line: _, domain_name = line.split("Default domain is ") domain_name = domain_name.strip() break if domain_name != and hostname != : fqdn = u.format(hostname, domain_name) try: match_model = re.search(r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE) model = match_model.group(1) except AttributeError: model = u interface_list = [] show_ip_int_br = show_ip_int_br.strip() for line in show_ip_int_br.splitlines(): if in line: continue interface = line.split()[0] interface_list.append(interface) return { : uptime, : vendor, : py23_compat.text_type(os_version), : py23_compat.text_type(serial_number), : py23_compat.text_type(model), : py23_compat.text_type(hostname), : fqdn, : interface_list }
Return a set of facts from the devices.
14,533
def solvedbi_sm(ah, rho, b, c=None, axis=4): r a = np.conj(ah) if c is None: c = solvedbi_sm_c(ah, a, rho, axis) if have_numexpr: cb = inner(c, b, axis=axis) return ne.evaluate() else: return (b - (a * inner(c, b, axis=axis))) / rho
r""" Solve a diagonal block linear system with a scaled identity term using the Sherman-Morrison equation. The solution is obtained by independently solving a set of linear systems of the form (see :cite:`wohlberg-2016-efficient`) .. math:: (\rho I + \mathbf{a} \mathbf{a}^H ) \; \mathbf{x} = \mathbf{b} \;\;. In this equation inner products and matrix products are taken along the specified axis of the corresponding multi-dimensional arrays; the solutions are independent over the other axes. Parameters ---------- ah : array_like Linear system component :math:`\mathbf{a}^H` rho : float Linear system parameter :math:`\rho` b : array_like Linear system component :math:`\mathbf{b}` c : array_like, optional (default None) Solution component :math:`\mathbf{c}` that may be pre-computed using :func:`solvedbi_sm_c` and cached for re-use. axis : int, optional (default 4) Axis along which to solve the linear system Returns ------- x : ndarray Linear system solution :math:`\mathbf{x}`
14,534
def gridsearch(self, X, y, weights=None, return_scores=False, keep_best=True, objective=, progress=True, **param_grids): if not self._is_fitted: self._validate_params() self._validate_data_dep_params(X) y = check_y(y, self.link, self.distribution, verbose=self.verbose) X = check_X(X, verbose=self.verbose) check_X_y(X, y) if weights is not None: weights = np.array(weights).astype().ravel() weights = check_array(weights, name=, ndim=1, verbose=self.verbose) check_lengths(y, weights) else: weights = np.ones_like(y).astype() if objective not in [, , , , ]: raise ValueError("objective mut be in "\ "[, , , , ], but found objective = {}".format(objective)) if self.distribution._known_scale: if objective == : raise ValueError(\ ) if objective == : objective = else: if objective == : raise ValueError(\ ) if objective == : objective = if not bool(param_grids): param_grids[] = np.logspace(-3, 3, 11) admissible_params = list(self.get_params()) + self._plural params = [] grids = [] for param, grid in list(param_grids.items()): if param not in (admissible_params): raise ValueError(.format(param)) if not (isiterable(grid) and (len(grid) > 1)): \ raise ValueError( \ .format(param, grid)) if any(isiterable(g) for g in grid): target_len = len(flatten(getattr(self, param))) cartesian = (not isinstance(grid, np.ndarray) or grid.ndim != 2) grid = [np.atleast_1d(g) for g in grid] msg = \ .format(param, target_len, len(grid)) if cartesian: if len(grid) != target_len: raise ValueError(msg) grid = combine(*grid) if not all([len(subgrid) == target_len for subgrid in grid]): raise ValueError(msg) params.append(param) grids.append(grid) param_grid_list = [] for candidate in combine(*grids): param_grid_list.append(dict(zip(params,candidate))) best_model = None best_score = np.inf scores = [] models = [] if self._is_fitted: models.append(self) scores.append(self.statistics_[objective]) best_model = models[-1] best_score = scores[-1] if progress: pbar = ProgressBar() else: pbar = lambda x: x for param_grid in pbar(param_grid_list): try: gam = deepcopy(self) gam.set_params(self.get_params()) gam.set_params(**param_grid) if models: coef = models[-1].coef_ gam.set_params(coef_=coef, force=True, verbose=False) gam.fit(X, y, weights) except ValueError as error: msg = str(error) + + str(param_grid) msg += if self.verbose: warnings.warn(msg) continue models.append(gam) scores.append(gam.statistics_[objective]) if scores[-1] < best_score: best_model = models[-1] best_score = scores[-1] if len(models) == 0: msg = if self.verbose: warnings.warn(msg) return self if keep_best: self.set_params(deep=True, force=True, **best_model.get_params(deep=True)) if return_scores: return OrderedDict(zip(models, scores)) else: return self
Performs a grid search over a space of parameters for a given objective Warnings -------- ``gridsearch`` is lazy and will not remove useless combinations from the search space, eg. >>> n_splines=np.arange(5,10), fit_splines=[True, False] will result in 10 loops, of which 5 are equivalent because ``fit_splines = False`` Also, it is not recommended to search over a grid that alternates between known scales and unknown scales, as the scores of the candidate models will not be comparable. Parameters ---------- X : array-like input data of shape (n_samples, m_features) y : array-like label data of shape (n_samples,) weights : array-like shape (n_samples,), optional sample weights return_scores : boolean, optional whether to return the hyperpamaters and score for each element in the grid keep_best : boolean, optional whether to keep the best GAM as self. objective : {'auto', 'AIC', 'AICc', 'GCV', 'UBRE'}, optional Metric to optimize. If `auto`, then grid search will optimize `GCV` for models with unknown scale and `UBRE` for models with known scale. progress : bool, optional whether to display a progress bar **kwargs pairs of parameters and iterables of floats, or parameters and iterables of iterables of floats. If no parameter are specified, ``lam=np.logspace(-3, 3, 11)`` is used. This results in a 11 points, placed diagonally across lam space. If grid is iterable of iterables of floats, the outer iterable must have length ``m_features``. the cartesian product of the subgrids in the grid will be tested. If grid is a 2d numpy array, each row of the array will be tested. The method will make a grid of all the combinations of the parameters and fit a GAM to each combination. Returns ------- if ``return_scores=True``: model_scores: dict containing each fitted model as keys and corresponding objective scores as values else: self: ie possibly the newly fitted model Examples -------- For a model with 4 terms, and where we expect 4 lam values, our search space for lam must have 4 dimensions. We can search the space in 3 ways: 1. via cartesian product by specifying the grid as a list. our grid search will consider ``11 ** 4`` points: >>> lam = np.logspace(-3, 3, 11) >>> lams = [lam] * 4 >>> gam.gridsearch(X, y, lam=lams) 2. directly by specifying the grid as a np.ndarray. This is useful for when the dimensionality of the search space is very large, and we would prefer to execute a randomized search: >>> lams = np.exp(np.random.random(50, 4) * 6 - 3) >>> gam.gridsearch(X, y, lam=lams) 3. copying grids for parameters with multiple dimensions. if we specify a 1D np.ndarray for lam, we are implicitly testing the space where all points have the same value >>> gam.gridsearch(lam=np.logspace(-3, 3, 11)) is equivalent to: >>> lam = np.logspace(-3, 3, 11) >>> lams = np.array([lam] * 4) >>> gam.gridsearch(X, y, lam=lams)
14,535
def copy_path_flat(self): path = cairo.cairo_copy_path_flat(self._pointer) result = list(_iter_path(path)) cairo.cairo_path_destroy(path) return result
Return a flattened copy of the current path This method is like :meth:`copy_path` except that any curves in the path will be approximated with piecewise-linear approximations, (accurate to within the current tolerance value, see :meth:`set_tolerance`). That is, the result is guaranteed to not have any elements of type :obj:`CURVE_TO <PATH_CURVE_TO>` which will instead be replaced by a series of :obj:`LINE_TO <PATH_LINE_TO>` elements. :returns: A list of ``(path_operation, coordinates)`` tuples. See :meth:`copy_path` for the data structure.
14,536
def default_ubuntu_tr(mod): pkg = % mod.lower() py2pkg = pkg py3pkg = % mod.lower() return (pkg, py2pkg, py3pkg)
Default translation function for Ubuntu based systems
14,537
def run( draco_query: List[str], constants: Dict[str, str] = None, files: List[str] = None, relax_hard=False, silence_warnings=False, debug=False, clear_cache=False, ) -> Optional[Result]: if clear_cache and file_cache: logger.warning("Cleared file cache") file_cache.clear() stderr, stdout = run_clingo( draco_query, constants, files, relax_hard, silence_warnings, debug ) try: json_result = json.loads(stdout) except json.JSONDecodeError: logger.error("stdout: %s", stdout) logger.error("stderr: %s", stderr) raise if stderr: logger.error(stderr) result = json_result["Result"] if result == "UNSATISFIABLE": logger.info("Constraints are unsatisfiable.") return None elif result == "OPTIMUM FOUND": answers = json_result["Call"][0]["Witnesses"][-1] logger.debug(answers["Value"]) return Result( clyngor.Answers(answers["Value"]).sorted, cost=json_result["Models"]["Costs"][0], ) elif result == "SATISFIABLE": answers = json_result["Call"][0]["Witnesses"][-1] assert ( json_result["Models"]["Number"] == 1 ), "Should not have more than one model if we don't optimize" logger.debug(answers["Value"]) return Result(clyngor.Answers(answers["Value"]).sorted) else: logger.error("Unsupported result: %s", result) return None
Run clingo to compute a completion of a partial spec or violations.
14,538
def read(self, page): log.debug("read pages {0} to {1}".format(page, page+3)) data = self.transceive("\x30"+chr(page % 256), timeout=0.005) if len(data) == 1 and data[0] & 0xFA == 0x00: log.debug("received nak response") self.target.sel_req = self.target.sdd_res[:] self._target = self.clf.sense(self.target) raise Type2TagCommandError( INVALID_PAGE_ERROR if self.target else nfc.tag.RECEIVE_ERROR) if len(data) != 16: log.debug("invalid response " + hexlify(data)) raise Type2TagCommandError(INVALID_RESPONSE_ERROR) return data
Send a READ command to retrieve data from the tag. The *page* argument specifies the offset in multiples of 4 bytes (i.e. page number 1 will return bytes 4 to 19). The data returned is a byte array of length 16 or None if the block is outside the readable memory range. Command execution errors raise :exc:`Type2TagCommandError`.
14,539
def grid_linspace(bounds, count): bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: raise ValueError() count = np.asanyarray(count, dtype=np.int) if count.shape == (): count = np.tile(count, bounds.shape[1]) grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)] grid = np.vstack(np.meshgrid(*grid_elements) ).reshape(bounds.shape[1], -1).T return grid
Return a grid spaced inside a bounding box with edges spaced using np.linspace. Parameters --------- bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]] count: int, or (dimension,) int, number of samples per side Returns ------- grid: (n, dimension) float, points in the specified bounds
14,540
def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y, method=): new_wd = int(round(scale_x * (x2 - x1 + 1))) new_ht = int(round(scale_y * (y2 - y1 + 1))) return self.get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht, )
Extract a region of the image defined by corners (x1, y1) and (x2, y2) and scale it by scale factors (scale_x, scale_y). `method` describes the method of interpolation used, where the default "basic" is nearest neighbor.
14,541
def polygon(self): ring = ogr.Geometry(ogr.wkbLinearRing) for coord in self.ll, self.lr, self.ur, self.ul, self.ll: ring.AddPoint_2D(*coord) polyg = ogr.Geometry(ogr.wkbPolygon) polyg.AddGeometryDirectly(ring) return polyg
Returns an OGR Geometry for this envelope.
14,542
def start(self): c_logs_ingested = Counter( , , [, , ], ) c_messages_published = Counter( , , [, , ], ) self._setup_ipc() log.debug(, self._listener_type) self._setup_listener() self.listener.start() thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),)) thread.start() signal.signal(signal.SIGTERM, self._exit_gracefully) self.__up = True while self.__up: try: log_message, log_source = self.listener.receive() except ListenerException as lerr: if self.__up is False: log.info() return else: log.error(lerr, exc_info=True) raise NapalmLogsExit(lerr) log.debug(, log_message, log_source) if not log_message: log.info(, log_source) continue c_logs_ingested.labels(listener_type=self._listener_type, address=self.address, port=self.port).inc() self.pub.send(umsgpack.packb((log_message, log_source))) c_messages_published.labels(listener_type=self._listener_type, address=self.address, port=self.port).inc()
Listen to messages and publish them.
14,543
def read_file_1st_col_only(fname): lst = [] with open(fname, ) as f: _ = f.readline() for line in f: lst.append(line.split()[0]) return lst
read a CSV file (ref_classes.csv) and return the list of names
14,544
def complete(request, provider): data = request.GET.copy() data.update(request.POST) if not in request.session: request.session[] = request.GET.get("next") or settings.LOGIN_REDIRECT_URL backend = get_backend(provider) response = backend.validate(request, data) if isinstance(response, HttpResponseRedirect): return response if request.user.is_authenticated(): success = backend.login_user(request) backend.merge_accounts(request) else: success = backend.login_user(request) if not success and not settings.REGISTRATION_ALLOWED: messages.warning(request, lang.REGISTRATION_DISABLED) return redirect(settings.REGISTRATION_DISABLED_REDIRECT) if success: return redirect(request.session.pop(, settings.LOGIN_REDIRECT_URL)) return backend.complete(request, response)
After first step of net authentication, we must validate the response. If everything is ok, we must do the following: 1. If user is already authenticated: a. Try to login him again (strange variation but we must take it to account). b. Create new netID record in database. c. Merge authenticated account with newly created netID record. d. Redirect user to 'next' url stored in session. 2. If user is anonymouse: a. Try to log him by identity and redirect to 'next' url. b. Create new netID record in database. c. Try to automaticaly fill all extra fields with information returned form server. If successfull, login the user and redirect to 'next' url. d. Redirect user to extra page where he can fill all extra fields by hand.
14,545
def sort_by_name(names): def last_name_key(full_name): parts = full_name.split() if len(parts) == 1: return full_name.upper() last_first = parts[-1] + + .join(parts[:-1]) return last_first.upper() return sorted(set(names), key=last_name_key)
Sort by last name, uniquely.
14,546
def pairinplace(args): from jcvi.utils.iter import pairwise p = OptionParser(pairinplace.__doc__) p.set_rclip() p.set_tag() p.add_option("--base", help="Base name for the output files [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args base = opts.base or op.basename(fastqfile).split(".")[0] frags = base + ".frags.fastq" pairs = base + ".pairs.fastq" if fastqfile.endswith(".gz"): frags += ".gz" pairs += ".gz" fragsfw = must_open(frags, "w") pairsfw = must_open(pairs, "w") N = opts.rclip tag = opts.tag strip_name = (lambda x: x[:-N]) if N else None fh_iter = iter_fastq(fastqfile, key=strip_name) skipflag = False for a, b in pairwise(fh_iter): if b is None: break if skipflag: skipflag = False continue if a.name == b.name: if tag: a.name += "/1" b.name += "/2" print(a, file=pairsfw) print(b, file=pairsfw) skipflag = True else: print(a, file=fragsfw) if not skipflag: print(a, file=fragsfw) logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags)) return pairs
%prog pairinplace bulk.fastq Pair up the records in bulk.fastq by comparing the names for adjancent records. If they match, print to bulk.pairs.fastq, else print to bulk.frags.fastq.
14,547
def process_strings(self, string, docstrings=False): m = RE_STRING_TYPE.match(string) stype = self.get_string_type(m.group(1) if m.group(1) else ) if not self.match_string(stype) and not docstrings: return , False is_bytes = in stype is_raw = in stype is_format = in stype content = m.group(3) if is_raw and (not is_format or not self.decode_escapes): string = self.norm_nl(content) elif is_raw and is_format: string = self.norm_nl(FE_RFESC.sub(self.replace_unicode, content)) elif is_bytes: string = self.norm_nl(RE_BESC.sub(self.replace_bytes, content)) elif is_format: string = self.norm_nl(RE_FESC.sub(self.replace_unicode, content)) else: string = self.norm_nl(RE_ESC.sub(self.replace_unicode, content)) return textwrap.dedent(RE_NON_PRINTABLE.sub(, string) if is_bytes else string), is_bytes
Process escapes.
14,548
def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets) lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordbags: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
14,549
def get_user(self, username): cmd = ["glsuser", "-u", username, "--raw"] results = self._read_output(cmd) if len(results) == 0: return None elif len(results) > 1: logger.error( "Command returned multiple results for ." % username) raise RuntimeError( "Command returned multiple results for ." % username) the_result = results[0] the_name = the_result["Name"] if username.lower() != the_name.lower(): logger.error( "We expected username but got username ." % (username, the_name)) raise RuntimeError( "We expected username but got username ." % (username, the_name)) return the_result
Get the user details from MAM.
14,550
def _collapse_preconditions(base_preconditions: List[List[Contract]], bases_have_func: bool, preconditions: List[List[Contract]], func: Callable[..., Any]) -> List[List[Contract]]: if not base_preconditions and bases_have_func and preconditions: raise TypeError(("The function {} can not weaken the preconditions because the bases specify " "no preconditions at all. Hence this function must accept all possible input since " "the preconditions are OR'ed and no precondition implies a dummy precondition which is always " "fulfilled.").format(func.__qualname__)) return base_preconditions + preconditions
Collapse function preconditions with the preconditions collected from the base classes. :param base_preconditions: preconditions collected from the base classes (grouped by base class) :param bases_have_func: True if one of the base classes has the function :param preconditions: preconditions of the function (before the collapse) :param func: function whose preconditions we are collapsing :return: collapsed sequence of precondition groups
14,551
def get_signin_url(self, service=): alias = self.get_account_alias() if not alias: raise Exception() return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service)
Get the URL where IAM users can use their login profile to sign in to this account's console. :type service: string :param service: Default service to go to in the console.
14,552
def _load_lib(): lib_path = find_lib_path() if len(lib_path) == 0: return None lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.LGBM_GetLastError.restype = ctypes.c_char_p return lib
Load LightGBM library.
14,553
def cast_from_bunq_response(cls, bunq_response): return cls( bunq_response.value, bunq_response.headers, bunq_response.pagination )
:type bunq_response: BunqResponse
14,554
def buildWorkbenchWithLauncher(): workbench = ui.Workbench() tools = [exercises.SearchTool()] launcher = ui.Launcher(workbench, tools) workbench.display(launcher) return workbench, launcher
Builds a workbench. The workbench has a launcher with all of the default tools. The launcher will be displayed on the workbench.
14,555
def not_next(e): def match_not_next(s, grm=None, pos=0): try: e(s, grm, pos) except PegreError as ex: return PegreResult(s, Ignore, (pos, pos)) else: raise PegreError(, pos) return match_not_next
Create a PEG function for negative lookahead.
14,556
def get_config_value(name, fallback=None): cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.get(, name, fallback)
Gets a config by name. In the case where the config name is not found, will use fallback value.
14,557
def get_indexed_node(manager, prop, value, node_type=, lookup_func=, legacy=True): q = .format(label=node_type, prop=prop, lookup_func=lookup_func) with manager.session as s: for result in s.run(q, {: value}): if legacy: yield result[].properties else: yield result[]
:param manager: Neo4jDBSessionManager :param prop: Indexed property :param value: Indexed value :param node_type: Label used for index :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type prop: str :type value: str :type node_type: str :type lookup_func: str :type legacy: bool :return: Dict or Node object :rtype: dict|Node
14,558
def format_request_email_title(increq, **ctx): template = current_app.config["COMMUNITIES_REQUEST_EMAIL_TITLE_TEMPLATE"], return format_request_email_templ(increq, template, **ctx)
Format the email message title for inclusion request notification. :param increq: Inclusion request object for which the request is made. :type increq: `invenio_communities.models.InclusionRequest` :param ctx: Optional extra context parameters passed to formatter. :type ctx: dict. :returns: Email message title. :rtype: str
14,559
def _init_grps(code2nt): seen = set() seen_add = seen.add groups = [nt.group for nt in code2nt.values()] return [g for g in groups if not (g in seen or seen_add(g))]
Return list of groups in same order as in code2nt
14,560
def revoke_auth(preserve_minion_cache=False): preserve_minion_cache* masters = list() ret = True if in __opts__: for master_uri in __opts__[]: masters.append(master_uri) else: masters.append(__opts__[]) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) tok = channel.auth.gen_token(b) load = {: , : __opts__[], : tok, : preserve_minion_cache} try: channel.send(load) except SaltReqTimeoutError: ret = False finally: channel.close() return ret
The minion sends a request to the master to revoke its own key. Note that the minion session will be revoked and the minion may not be able to return the result of this command back to the master. If the 'preserve_minion_cache' flag is set to True, the master cache for this minion will not be removed. CLI Example: .. code-block:: bash salt '*' saltutil.revoke_auth
14,561
def setup_menu(self): copy_action = create_action(self, _(), shortcut=keybinding(), icon=ima.icon(), triggered=self.copy, context=Qt.WidgetShortcut) functions = ((_("To bool"), bool), (_("To complex"), complex), (_("To int"), int), (_("To float"), float), (_("To str"), to_text_string)) types_in_menu = [copy_action] for name, func in functions: slot = lambda func=func: self.change_type(func) types_in_menu += [create_action(self, name, triggered=slot, context=Qt.WidgetShortcut)] menu = QMenu(self) add_actions(menu, types_in_menu) return menu
Setup context menu.
14,562
def boll(self, n, dev, array=False): mid = self.sma(n, array) std = self.std(n, array) up = mid + std * dev down = mid - std * dev return up, down
布林通道
14,563
def date_to_epiweek(date=datetime.date.today()) -> Epiweek: year = date.year start_dates = list(map(_start_date_of_year, [year - 1, year, year + 1])) start_date = start_dates[1] if start_dates[1] > date: start_date = start_dates[0] elif date >= start_dates[2]: start_date = start_dates[2] return Epiweek( year=(start_date + datetime.timedelta(days=7)).year, week=((date - start_date).days // 7) + 1, day=(date.isoweekday() % 7) + 1 )
Convert python date to Epiweek
14,564
def eth_sendTransaction(self, from_, to=None, gas=None, gas_price=None, value=None, data=None, nonce=None): obj = {} obj[] = from_ if to is not None: obj[] = to if gas is not None: obj[] = hex(gas) if gas_price is not None: obj[] = hex(gas_price) if value is not None: obj[] = hex(ether_to_wei(value)) if data is not None: obj[] = data if nonce is not None: obj[] = hex(nonce) return (yield from self.rpc_call(, [obj]))
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction :param from_: From account address :type from_: str :param to: To account address (optional) :type to: str :param gas: Gas amount for current transaction (optional) :type gas: int :param gas_price: Gas price for current transaction (optional) :type gas_price: int :param value: Amount of ether to send (optional) :type value: int :param data: Additional data for transaction (optional) :type data: hex :param nonce: Unique nonce for transaction (optional) :type nonce: int :return: txhash :rtype: str
14,565
def get_workflows() -> dict: keys = DB.get_keys("workflow_definitions:*") known_workflows = dict() for key in keys: values = key.split() if values[1] not in known_workflows: known_workflows[values[1]] = list() known_workflows[values[1]].append(values[2]) return known_workflows
Get dict of ALL known workflow definitions. Returns list[dict]
14,566
def input_validation(group_idx, a, size=None, order=, axis=None, ravel_group_idx=True, check_bounds=True): if not isinstance(a, (int, float, complex)): a = np.asanyarray(a) group_idx = np.asanyarray(group_idx) if not np.issubdtype(group_idx.dtype, np.integer): raise TypeError("group_idx must be of integer type") if check_bounds and np.any(group_idx < 0): raise ValueError("negative indices not supported") ndim_idx = np.ndim(group_idx) ndim_a = np.ndim(a) if axis is None: if ndim_a > 1: raise ValueError("a must be scalar or 1 dimensional, use .ravel to" " flatten. Alternatively specify axis.") elif axis >= ndim_a or axis < -ndim_a: raise ValueError("axis arg too large for np.ndim(a)") else: axis = axis if axis >= 0 else ndim_a + axis if ndim_idx > 1: raise NotImplementedError("only 1d indexing currently" "supported with axis arg.") elif a.shape[axis] != len(group_idx): raise ValueError("a.shape[axis] doesns a bit simpler than group_idx = np.ravel_multi_index(group_idx, size, order=order, mode=) flat_size = np.prod(size) ndim_idx = ndim_a return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size if ndim_idx == 1: if size is None: size = np.max(group_idx) + 1 else: if not np.isscalar(size): raise ValueError("output size must be scalar or None") if check_bounds and np.any(group_idx > size - 1): raise ValueError("one or more indices are too large for " "size %d" % size) flat_size = size else: if size is None: size = np.max(group_idx, axis=1) + 1 elif np.isscalar(size): raise ValueError("output size must be of length %d" % len(group_idx)) elif len(size) != len(group_idx): raise ValueError("%d sizes given, but %d output dimensions " "specified in index" % (len(size), len(group_idx))) if ravel_group_idx: group_idx = np.ravel_multi_index(group_idx, size, order=order, mode=) flat_size = np.prod(size) if not (np.ndim(a) == 0 or len(a) == group_idx.size): raise ValueError("group_idx and a must be of the same length, or a" " can be scalar") return group_idx, a, flat_size, ndim_idx, size
Do some fairly extensive checking of group_idx and a, trying to give the user as much help as possible with what is wrong. Also, convert ndim-indexing to 1d indexing.
14,567
def get_sea_names(): global _SEA_NAMES if _SEA_NAMES is None: resource_text = get_data("cc_plugin_ncei", "data/seanames.xml") parser = etree.XMLParser(remove_blank_text=True) root = etree.fromstring(resource_text, parser) buf = {} for seaname in root.findall(): name = seaname.find().text buf[name] = seaname.find().text if seaname.find() is not None else _SEA_NAMES = buf return _SEA_NAMES
Returns a list of NODC sea names source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
14,568
def import_app(files, category, overwrite, id, name): platform = _get_platform() org = platform.get_organization(QUBELL["organization"]) if category: category = org.categories[category] regex = re.compile(r"^(.*?)(-v(\d+)|)\.[^.]+$") if (id or name) and len(files) > 1: raise Exception("--id and --name are supported only for single-file mode") for filename in files: click.echo("Importing " + filename, nl=False) if not name: match = regex.match(basename(filename)) if not match: click.echo(_color("RED", "FAIL") + " unknown filename format") break name = regex.match(basename(filename)).group(1) click.echo(" => ", nl=False) app = None try: app = org.get_application(id=id, name=name) if app and not overwrite: click.echo("%s %s already exists %s" % ( app.id, _color("BLUE", app and app.name or name), _color("RED", "FAIL"))) break except NotFoundError: if id: click.echo("%s %s not found %s" % ( id or "", _color("BLUE", app and app.name or name), _color("RED", "FAIL"))) break click.echo(_color("BLUE", app and app.name or name) + " ", nl=False) try: with file(filename, "r") as f: if app: app.update(name=app.name, category=category and category.id or app.category, manifest=Manifest(content=f.read())) else: app = org.application(id=id, name=name, manifest=Manifest(content=f.read())) if category: app.update(category=category.id) click.echo(app.id + _color("GREEN", " OK")) except IOError as e: click.echo(_color("RED", " FAIL") + " " + e.message) break
Upload application from file. By default, file name will be used as application name, with "-vXX.YYY" suffix stripped. Application is looked up by one of these classifiers, in order of priority: app-id, app-name, filename. If app-id is provided, looks up existing application and updates its manifest. If app-id is NOT specified, looks up by name, or creates new application.
14,569
def print_params(self, allpars=False, loglevel=logging.INFO): pars = self.get_params() o = o += % ( , , , , , , , ) o += * 80 + src_pars = collections.OrderedDict() for p in pars: src_pars.setdefault(p[], []) src_pars[p[]] += [p] free_sources = [] for k, v in src_pars.items(): for p in v: if not p[]: continue free_sources += [k] for k, v in src_pars.items(): if not allpars and k not in free_sources: continue o += % k for p in v: o += % (p[], p[]) o += % (p[], p[]) o += % (p[], p[], p[]) if p[]: o += else: o += o += self.logger.log(loglevel, o)
Print information about the model parameters (values, errors, bounds, scale).
14,570
def _content_blocks(self, r): return (self._block_rows - self._left_zero_blocks(r) - self._right_zero_blocks(r))
Number of content blocks in block row `r`.
14,571
def reserve_udp_port(self, port, project): if port in self._used_udp_ports: raise HTTPConflict(text="UDP port {} already in use on host {}".format(port, self._console_host)) if port < self._udp_port_range[0] or port > self._udp_port_range[1]: raise HTTPConflict(text="UDP port {} is outside the range {}-{}".format(port, self._udp_port_range[0], self._udp_port_range[1])) self._used_udp_ports.add(port) project.record_udp_port(port) log.debug("UDP port {} has been reserved".format(port))
Reserve a specific UDP port number :param port: UDP port number :param project: Project instance
14,572
def delete(self, num_iid, properties, session, item_price=None, item_num=None, lang=None): request = TOPRequest() request[] = num_iid request[] = properties if item_num!=None: request[] = item_num if item_price!=None: request[] = item_price if lang!=None: request[] = lang self.create(self.execute(request, session)[]) return self
taobao.item.sku.delete 删除SKU 删除一个sku的数据 需要删除的sku通过属性properties进行匹配查找
14,573
def get_description(self): vo = ffi.cast(, self.pointer) return _to_string(vips_lib.vips_object_get_description(vo))
Get the description of a GObject.
14,574
def fetch_one(self, *args, **kwargs): bson_obj = self.fetch(*args, **kwargs) count = bson_obj.count() if count > 1: raise MultipleResultsFound("%s results found" % count) elif count == 1: return next(bson_obj)
return one document which match the structure of the object `fetch_one()` takes the same arguments than the the pymongo.collection.find method. If multiple documents are found, raise a MultipleResultsFound exception. If no document is found, return None The query is launch against the db and collection of the object.
14,575
def with_local_env_strategy(molecule, strategy, reorder=True, extend_structure=True): mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="") coords = molecule.cart_coords if extend_structure: a = max(coords[:, 0]) - min(coords[:, 0]) + 100 b = max(coords[:, 1]) - min(coords[:, 1]) + 100 c = max(coords[:, 2]) - min(coords[:, 2]) + 100 molecule = molecule.get_boxed_structure(a, b, c, no_cross=True) for n in range(len(molecule)): neighbors = strategy.get_nn_info(molecule, n) for neighbor in neighbors: if not np.array_equal(neighbor[], [0, 0, 0]): continue mg.add_edge(from_index=n, to_index=neighbor[], weight=neighbor[], warn_duplicates=False) if reorder: n = len(mg.molecule) mapping = {i: (n-i) for i in range(n)} mapping = {i: (j-1) for i, j in mapping.items()} mg.graph = nx.relabel_nodes(mg.graph, mapping) duplicates = [] for edge in mg.graph.edges: if edge[2] != 0: duplicates.append(edge) for duplicate in duplicates: mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2]) mg.set_node_attributes() return mg
Constructor for MoleculeGraph, using a strategy from :Class: `pymatgen.analysis.local_env`. :param molecule: Molecule object :param strategy: an instance of a :Class: `pymatgen.analysis.local_env.NearNeighbors` object :param reorder: bool, representing if graph nodes need to be reordered following the application of the local_env strategy :param extend_structure: If True (default), then a large artificial box will be placed around the Molecule, because some strategies assume periodic boundary conditions. :return: mg, a MoleculeGraph
14,576
def removeBinder(self, name): root = self.etree t_bindings = root.find() t_binder = t_bindings.find(name) if t_binder : t_bindings.remove(t_binder) return True return False
Remove a binder from a table
14,577
def clubConsumables(self, fast=False): method = url = rc = self.__request__(method, url) events = [self.pin.event(, )] self.pin.send(events, fast=fast) events = [self.pin.event(, )] self.pin.send(events, fast=fast) events = [self.pin.event(, )] self.pin.send(events, fast=fast) return [itemParse(i) for i in rc.get(, ())]
Return all consumables from club.
14,578
def send_rpc(self, address, rpc_id, call_payload, timeout=3.0): if not self.connected: raise HardwareError("Cannot send an RPC if we are not in a connected state") if timeout is None: timeout = 3.0 status = -1 payload = b recording = None if self.connection_interrupted: self._try_reconnect() if self._record is not None: recording = _RecordedRPC(self.connection_string, address, rpc_id, call_payload) recording.start() try: payload = self._loop.run_coroutine(self.adapter.send_rpc(0, address, rpc_id, call_payload, timeout)) status, payload = pack_rpc_response(payload, None) except VALID_RPC_EXCEPTIONS as exc: status, payload = pack_rpc_response(payload, exc) if self._record is not None: recording.finish(status, payload) self._recording.append(recording) if self.connection_interrupted: self._try_reconnect() return unpack_rpc_response(status, payload, rpc_id, address)
Send an rpc to our connected device. The device must already be connected and the rpc interface open. This method will synchronously send an RPC and wait for the response. Any RPC errors will be raised as exceptions and if there were no errors, the RPC's response payload will be returned as a binary bytearray. See :meth:`AbstractDeviceAdapter.send_rpc` for documentation of the possible exceptions that can be raised here. Args: address (int): The tile address containing the RPC rpc_id (int): The ID of the RPC that we wish to call. call_payload (bytes): The payload containing encoded arguments for the RPC. timeout (float): The maximum number of seconds to wait for the RPC to finish. Defaults to 3s. Returns: bytearray: The RPC's response payload.
14,579
def create(cls, community, record, user=None, expires_at=None, notify=True): if expires_at and expires_at < datetime.utcnow(): raise InclusionRequestExpiryTimeError( community=community, record=record) if community.has_record(record): raise InclusionRequestObsoleteError( community=community, record=record) try: with db.session.begin_nested(): obj = cls( id_community=community.id, id_record=record.id, user=user, expires_at=expires_at ) db.session.add(obj) except (IntegrityError, FlushError): raise InclusionRequestExistsError( community=community, record=record) inclusion_request_created.send( current_app._get_current_object(), request=obj, notify=notify ) return obj
Create a record inclusion request to a community. :param community: Community object. :param record: Record API object. :param expires_at: Time after which the request expires and shouldn't be resolved anymore.
14,580
def _parse_extra(self, fp): comment = section = fp.seek(0) for line in fp: line = line.rstrip() if not line: if comment: comment += continue if line.startswith(): comment += line + continue if line.startswith(): section = line.strip() self._add_dot_key(section) if comment: self._comments[section] = comment.rstrip() elif CONFIG_KEY_RE.match(line): key = line.split(, 1)[0].strip() self._add_dot_key(section, key) if comment: self._comments[(section, key)] = comment.rstrip() comment = if comment: self._comments[self.LAST_COMMENT_KEY] = comment
Parse and store the config comments and create maps for dot notion lookup
14,581
def plotfft(s, fmax, doplot=False): fs = abs(np.fft.fft(s)) f = linspace(0, fmax / 2, len(s) / 2) if doplot: pass return (f[1:int(len(s) / 2)].copy(), fs[1:int(len(s) / 2)].copy())
This functions computes the fft of a signal, returning the frequency and their magnitude values. Parameters ---------- s: array-like the input signal. fmax: int the sampling frequency. doplot: boolean a variable to indicate whether the plot is done or not. Returns ------- f: array-like the frequency values (xx axis) fs: array-like the amplitude of the frequency values (yy axis)
14,582
def _publish(self, msg): connection = self._connection.clone() publish = connection.ensure(self.producer, self.producer.publish, errback=self.__error_callback, max_retries=MQ_PRODUCER_MAX_RETRIES) publish(json.dumps(msg), exchange=self._exchange, routing_key=self._routing_key, declare=[self._queue]) logging.debug(, msg)
Publish, handling retries, a message in the queue. :param msg: Object which represents the message to be sent in the queue. Note that this object should be serializable in the configured format (by default JSON).
14,583
def cli_form(self, *args): if args[0] == : for schema in schemastore: self.log(schema, , schemastore[schema][], pretty=True) else: self.log(schemastore[args[0]][], pretty=True)
Display a schemata's form definition
14,584
def run(self): try: super().run() except Exception as e: print(trace_info()) finally: if not self.no_ack: self.ch.basic_ack(delivery_tag=self.method.delivery_tag)
线程启动 :return:
14,585
def release(self, resource): with self.releaser: resource.claimed = False self.releaser.notify_all()
release(resource) Returns a resource to the pool. Most of the time you will want to use :meth:`transaction`, but if you use :meth:`acquire`, you must release the acquired resource back to the pool when finished. Failure to do so could result in deadlock. :param resource: Resource
14,586
def extent_string_to_array(extent_text): coordinates = extent_text.replace(, ).split() count = len(coordinates) if count != 4: message = ( % count) LOGGER.error(message) return None try: coordinates = [float(i) for i in coordinates] except ValueError as e: message = str(e) LOGGER.error(message) return None return coordinates
Convert an extent string to an array. .. versionadded: 2.2.0 :param extent_text: String representing an extent e.g. 109.829170982, -8.13333290561, 111.005344795, -7.49226294379 :type extent_text: str :returns: A list of floats, or None :rtype: list, None
14,587
def columns(self): res = [col[] for col in self.column_definitions] res.extend([col[] for col in self.foreign_key_definitions]) return res
Return names of all the addressable columns (including foreign keys) referenced in user supplied model
14,588
def set_back_led_output(self, value): return self.write(request.SetBackLEDOutput(self.seq, value))
value can be between 0x00 and 0xFF
14,589
def setting_ctx(num_gpus): if num_gpus > 0: ctx = [mx.gpu(i) for i in range(num_gpus)] else: ctx = [mx.cpu()] return ctx
Description : set gpu module
14,590
def _convert_from_thrift_endpoint(self, thrift_endpoint): ipv4 = None ipv6 = None port = struct.unpack(, struct.pack(, thrift_endpoint.port))[0] if thrift_endpoint.ipv4 != 0: ipv4 = socket.inet_ntop( socket.AF_INET, struct.pack(, thrift_endpoint.ipv4), ) if thrift_endpoint.ipv6: ipv6 = socket.inet_ntop(socket.AF_INET6, thrift_endpoint.ipv6) return Endpoint( service_name=thrift_endpoint.service_name, ipv4=ipv4, ipv6=ipv6, port=port, )
Accepts a thrift decoded endpoint and converts it to an Endpoint. :param thrift_endpoint: thrift encoded endpoint :type thrift_endpoint: thrift endpoint :returns: decoded endpoint :rtype: Encoding
14,591
def get_authority(config, metrics, rrset_channel, **kwargs): builder = authority.GCEAuthorityBuilder( config, metrics, rrset_channel, **kwargs) return builder.build_authority()
Get a GCEAuthority client. A factory function that validates configuration and creates a proper GCEAuthority. Args: config (dict): GCEAuthority related configuration. metrics (obj): :interface:`IMetricRelay` implementation. rrset_channel (asyncio.Queue): Queue used for sending messages to the reconciler plugin. kw (dict): Additional keyword arguments to pass to the Authority. Returns: A :class:`GCEAuthority` instance.
14,592
def slots_class_sealer(fields, defaults): class __slots_meta__(type): def __new__(mcs, name, bases, namespace): if "__slots__" not in namespace: namespace["__slots__"] = fields return type.__new__(mcs, name, bases, namespace) class __slots_base__(_with_metaclass(__slots_meta__, object)): __slots__ = () def __init__(self, *args, **kwargs): pass return class_sealer(fields, defaults, base=__slots_base__)
This sealer makes a container class that uses ``__slots__`` (it uses :func:`class_sealer` internally). The resulting class has a metaclass that forcibly sets ``__slots__`` on subclasses.
14,593
def process_gene_interaction(self, limit): raw = .join((self.rawdir, self.files[][])) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing gene interaction associations") line_counter = 0 with gzip.open(raw, ) as csvfile: filereader = csv.reader( io.TextIOWrapper(csvfile, newline=""), delimiter=, quotechar=" num_interactors = (len(row) - 5) / 3 if num_interactors != 2: LOG.info( "Skipping interactions with !=2 participants:\n %s", str(row)) continue gene_a_id = +row[5] gene_b_id = +row[8] if self.test_mode \ and gene_a_id not in self.test_ids[] \ and gene_b_id not in self.test_ids[]: continue assoc = InteractionAssoc( graph, self.name, gene_a_id, gene_b_id, interaction_type_id) assoc.set_association_id(interaction_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() model.addDescription(assoc_id, summary) if not self.test_mode and limit is not None and line_counter > limit: break return
The gene interaction file includes identified interactions, that are between two or more gene (products). In the case of interactions with >2 genes, this requires creating groups of genes that are involved in the interaction. From the wormbase help list: In the example WBInteraction000007779 it would likely be misleading to suggest that lin-12 interacts with (suppresses in this case) smo-1 ALONE or that lin-12 suppresses let-60 ALONE; the observation in the paper; see Table V in paper PMID:15990876 was that a lin-12 allele (heterozygous lin-12(n941/+)) could suppress the "multivulva" phenotype induced synthetically by simultaneous perturbation of BOTH smo-1 (by RNAi) AND let-60 (by the n2021 allele). So this is necessarily a three-gene interaction. Therefore, we can create groups of genes based on their "status" of Effector | Effected. Status: IN PROGRESS :param limit: :return:
14,594
def _read_unquote(ctx: ReaderContext) -> LispForm: start = ctx.reader.advance() assert start == "~" with ctx.unquoted(): next_char = ctx.reader.peek() if next_char == "@": ctx.reader.advance() next_form = _read_next_consuming_comment(ctx) return llist.l(_UNQUOTE_SPLICING, next_form) else: next_form = _read_next_consuming_comment(ctx) return llist.l(_UNQUOTE, next_form)
Read an unquoted form and handle any special logic of unquoting. Unquoted forms can take two, well... forms: `~form` is read as `(unquote form)` and any nested forms are read literally and passed along to the compiler untouched. `~@form` is read as `(unquote-splicing form)` which tells the compiler to splice in the contents of a sequential form such as a list or vector into the final compiled form. This helps macro writers create longer forms such as function calls, function bodies, or data structures with the contents of another collection they have.
14,595
def mv_normal_cov_like(x, mu, C): R if len(np.shape(x)) > 1: return np.sum([flib.cov_mvnorm(r, mu, C) for r in x]) else: return flib.cov_mvnorm(x, mu, C)
R""" Multivariate normal log-likelihood parameterized by a covariance matrix. .. math:: f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\} :Parameters: - `x` : (n,k) - `mu` : (k) Location parameter. - `C` : (k,k) Positive definite covariance matrix. .. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
14,596
def reset(self): for name in self.__dict__: if name.startswith("_"): continue attr = getattr(self, name) setattr(self, name, attr and attr.__class__())
Reset all fields of this object to class defaults
14,597
def probe(self, ipaddr=None): if ipaddr is None: ipaddr = self._broadcast_addr cmd = {"payloadtype": PayloadType.GET, "target": ipaddr} self._send_command(cmd)
Probe given address for bulb.
14,598
def check_packed_data(self, ds): ret_val = [] for name, var in ds.variables.items(): add_offset = getattr(var, , None) scale_factor = getattr(var, , None) if not (add_offset or scale_factor): continue valid = True reasoning = [] reasoning) ret_val.append(result) return ret_val
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor, and have add_offset added to them. The units of a variable should be representative of the unpacked data. If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset attributes are of a different data type from the variable (containing the packed data) then the unpacked data should match the type of these attributes, which must both be of type float or both be of type double. An additional restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is not advised to unpack an int into a float as there is a potential precision loss. When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min, valid_max, valid_range) must be of the same data type as the packed data. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
14,599
def get_last_content(request, page_id): content_type = request.GET.get() language_id = request.GET.get() page = get_object_or_404(Page, pk=page_id) placeholders = get_placeholders(page.get_template()) _template = template.loader.get_template(page.get_template()) for placeholder in placeholders: if placeholder.name == content_type: context = RequestContext(request, { : page, : language_id }) with context.bind_template(_template.template): content = placeholder.render(context) return HttpResponse(content) raise Http404
Get the latest content for a particular type