Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
4,600
def load(kls, url, getter=None, parser=None, url_load_hook=None, sep=consts.private.SCOPE_SEPARATOR, prim=None, mime_codec=None, resolver=None): logger.info(.format(url)) url = utils.normalize_url(url) app = kls(url, url_load_hook=url_load_hook, sep=sep, prim=prim, mime_codec=mime_codec, resolver=resolver) app.__raw, app.__version = app.load_obj(url, getter=getter, parser=parser) if app.__version not in [, ]: raise NotImplementedError(.format(self.__version)) p = six.moves.urllib.parse.urlparse(url) if p.scheme: app.schemes.append(p.scheme) return app
load json as a raw App :param str url: url of path of Swagger API definition :param getter: customized Getter :type getter: sub class/instance of Getter :param parser: the parser to parse the loaded json. :type parser: pyswagger.base.Context :param dict app_cache: the cache shared by related App :param func url_load_hook: hook to patch the url to load json :param str sep: scope-separater used in this App :param prim pyswager.primitives.Primitive: factory for primitives in Swagger :param mime_codec pyswagger.primitives.MimeCodec: MIME codec :param resolver: pyswagger.resolve.Resolver: customized resolver used as default when none is provided when resolving :return: the created App object :rtype: App :raises ValueError: if url is wrong :raises NotImplementedError: the swagger version is not supported.
4,601
def muted(*streams): devnull = open(os.devnull, ) try: old_streams = [os.dup(s.fileno()) for s in streams] for s in streams: os.dup2(devnull.fileno(), s.fileno()) yield finally: for o,n in zip(old_streams, streams): os.dup2(o, n.fileno()) devnull.close()
A context manager to redirect stdout and/or stderr to /dev/null. Examples: with muted(sys.stdout): ... with muted(sys.stderr): ... with muted(sys.stdout, sys.stderr): ...
4,602
def _handle_input_request(self, msg): self.log.debug("input: %s", msg.get(, )) if self._hidden: raise RuntimeError() self.kernel_manager.sub_channel.flush() def callback(line): self.kernel_manager.stdin_channel.input(line) if self._reading: self.log.debug("Got second input request, assuming first was interrupted.") self._reading = False self._readline(msg[][], callback=callback)
Handle requests for raw_input.
4,603
def tokenize(args): if args.profile and not Path(args.profile).exists(): raise ParserError() _write(args, Tokenizer(profile=args.profile)(_read(args), column=args.mapping))
Tokenize a string (passed as argument or read from stdin) segments [--profile=PATH/TO/PROFILE] tokenize [STRING]
4,604
def _populate_random_tournament_row_col(n, r, row, col): k = 0 for i in range(n): for j in range(i+1, n): if r[k] < 0.5: row[k], col[k] = i, j else: row[k], col[k] = j, i k += 1
Populate ndarrays `row` and `col` with directed edge indices determined by random numbers in `r` for a tournament graph with n nodes, which has num_edges = n * (n-1) // 2 edges. Parameters ---------- n : scalar(int) Number of nodes. r : ndarray(float, ndim=1) ndarray of length num_edges containing random numbers in [0, 1). row, col : ndarray(int, ndim=1) ndarrays of length num_edges to be modified in place.
4,605
def render_template(template_name_or_list, **context): ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app)
Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template.
4,606
def _coarsenImage(image, f): from skimage.morphology import square from skimage.filters import rank from skimage.transform._warps import rescale selem = square(f) arri = rank.mean(image, selem=selem) return rescale(arri, 1 / f, order=0)
seems to be a more precise (but slower) way to down-scale an image
4,607
def get_or_new_from_json_dict_with_types( data, cls_map, type_key= ): if isinstance(data, tuple(cls_map.values())): return data elif isinstance(data, dict): type_val = data[type_key] if type_val in cls_map: return cls_map[type_val].new_from_json_dict(data) return None
Get `cls` object w/ deserialization from json by using type key hint if needed. If data is instance of one of cls, return data. Else if data is instance of dict, create instance from dict. Else, return None. :param data: :param cls_map: :param type_key: :rtype: object :return:
4,608
def gen_multi_data(n=5000): X, y = toy_classification(return_X_y=True, n=10000) lgam = LogisticGAM(s(0) + s(1) + s(2) + s(3) + s(4) + f(5)) lgam.fit(X, y) plt.figure() for i, term in enumerate(lgam.terms): if term.isintercept: continue plt.plot(lgam.partial_dependence(term=i)) plt.savefig(, dpi=300) plt.figure() plt.plot(lgam.logs_[]) plt.savefig(, dpi=300)
multivariate Logistic problem
4,609
def _releaseConnection(self, dbConn, cursor): self._logger.debug("Releasing connection") cursor.close() dbConn.close() return
Release database connection and cursor; passed as a callback to ConnectionWrapper
4,610
def run(self, node, expr=None, lineno=None, with_raise=True): if time.time() - self.start_time > self.max_time: raise RuntimeError(ERR_MAX_TIME.format(self.max_time)) out = None if len(self.error) > 0: return out if node is None: return out if isinstance(node, str): node = self.parse(node) if lineno is not None: self.lineno = lineno if expr is not None: self.expr = expr try: handler = self.node_handlers[node.__class__.__name__.lower()] except KeyError: return self.unimplemented(node) try: ret = handler(node) if isinstance(ret, enumerate): ret = list(ret) return ret except: if with_raise: self.raise_exception(node, expr=expr)
Execute parsed Ast representation for an expression.
4,611
def jinja_filter_param_value_str(value, str_quote_style="", bool_is_str=False): if (type(value) == bool) and not bool_is_str: if (value) == True: return else: return elif type(value) == str or ((type(value) == bool) and bool_is_str): return str_quote_style + str(value) + str_quote_style else: return str(value)
Convert a parameter value to string suitable to be passed to an EDA tool Rules: - Booleans are represented as 0/1 or "true"/"false" depending on the bool_is_str argument - Strings are either passed through or enclosed in the characters specified in str_quote_style (e.g. '"' or '\\"') - Everything else (including int, float, etc.) are converted using the str() function.
4,612
def create_subtask(self, cor, name=None, stop_timeout=1.0): if self.stopped: raise InternalError("Cannot add a subtask to a parent that is already stopped") subtask = BackgroundTask(cor, name, loop=self._loop, stop_timeout=stop_timeout) self.add_subtask(subtask) return subtask
Create and add a subtask from a coroutine. This function will create a BackgroundTask and then call self.add_subtask() on it. Args: cor (coroutine): The coroutine that should be wrapped in a background task. name (str): An optional name for the task. stop_timeout (float): The maximum time to wait for this subtask to die after stopping it. Returns: Backgroundtask: The created subtask.
4,613
def read_string(self, where, max_length=None, force=False): s = io.BytesIO() while True: c = self.read_int(where, 8, force) if issymbolic(c) or c == 0: break if max_length is not None: if max_length == 0: break max_length = max_length - 1 s.write(Operators.CHR(c)) where += 1 return s.getvalue().decode()
Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte. :param int where: Address to read string from :param int max_length: The size in bytes to cap the string at, or None [default] for no limit. :param force: whether to ignore memory permissions :return: string read :rtype: str
4,614
def kde_statsmodels_u(data, grid, **kwargs): kde = KDEUnivariate(data) kde.fit(**kwargs) return kde.evaluate(grid)
Univariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x 1` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x 1` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
4,615
def get_commands(self, source=None): commands = [] for key, value in self: if (source is None) or (key in source): commands.append( % (key, value)) return .join(commands)
Return a string containing multiple `reStructuredText` replacements with the substitutions currently defined. Some examples based on the subpackage |optiontools|: >>> from hydpy.core.autodoctools import Substituter >>> substituter = Substituter() >>> from hydpy.core import optiontools >>> substituter.add_module(optiontools) When calling |Substituter.get_commands| with the `source` argument, the complete `short2long` and `medium2long` mappings are translated into replacement commands (only a few of them are shown): >>> print(substituter.get_commands()) .. |Options.autocompile| replace:: \ :const:`~hydpy.core.optiontools.Options.autocompile` .. |Options.checkseries| replace:: \ :const:`~hydpy.core.optiontools.Options.checkseries` ... .. |optiontools.Options.warntrim| replace:: \ :const:`~hydpy.core.optiontools.Options.warntrim` .. |optiontools.Options| replace:: \ :class:`~hydpy.core.optiontools.Options` Through passing a string (usually the source code of a file to be documented), only the replacement commands relevant for this string are translated: >>> from hydpy.core import objecttools >>> import inspect >>> source = inspect.getsource(objecttools) >>> print(substituter.get_commands(source)) .. |Options.reprdigits| replace:: \ :const:`~hydpy.core.optiontools.Options.reprdigits`
4,616
def clean_image(self): self.instance.user = self.user data = self.cleaned_data.get() return data
It seems like in Django 1.5 something has changed. When Django tries to validate the form, it checks if the generated filename fit into the max_length. But at this point, self.instance.user is not yet set so our filename generation function cannot create the new file path because it needs the user id. Setting self.instance.user at this point seems to work as a workaround.
4,617
def update_definition_properties(self, document, project, definition_id): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if definition_id is not None: route_values[] = self._serialize.url(, definition_id, ) content = self._serialize.body(document, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content, media_type=) return self._deserialize(, response)
UpdateDefinitionProperties. [Preview API] Updates properties for a definition. :param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
4,618
def set_attribute_xsi_type(self, el, **kw): if kw.get(, self.typed): namespaceURI,typeName = kw.get(, _get_xsitype(self)) if namespaceURI and typeName: self.logger.debug("attribute: (%s, %s)", namespaceURI, typeName) el.setAttributeType(namespaceURI, typeName)
if typed, set the xsi:type attribute Paramters: el -- MessageInterface representing the element
4,619
def complete_definition(subj: Node, source_graph: Graph, target_graph: Optional[Graph]=None) -> PrettyGraph: if target_graph is None: target_graph = PrettyGraph() for p, o in source_graph.predicate_objects(subj): target_graph.add((subj, p, o)) if isinstance(o, BNode): complete_definition(o, source_graph, target_graph) return target_graph
Return the transitive closure of subject. :param subj: URI or BNode for subject :param source_graph: Graph containing defininition :param target_graph: return graph (for recursion) :return: target_graph
4,620
def root_is_purelib(name, wheeldir): name_folded = name.replace("-", "_") for item in os.listdir(wheeldir): match = dist_info_re.match(item) if match and match.group() == name_folded: with open(os.path.join(wheeldir, item, )) as wheel: for line in wheel: line = line.lower().rstrip() if line == "root-is-purelib: true": return True return False
Return True if the extracted wheel in wheeldir should go into purelib.
4,621
def put(self, measurementId): json = request.get_json() try: start = self._calculateStartTime(json) except ValueError: return , 400 duration = json[] if in json else 10 if start is None: return , 400 else: scheduled, message = self._measurementController.schedule(measurementId, duration, start, description=json.get()) return message, 200 if scheduled else 400
Initiates a new measurement. Accepts a json payload with the following attributes; * duration: in seconds * startTime OR delay: a date in YMD_HMS format or a delay in seconds * description: some free text information about the measurement :return:
4,622
def teal(theTask, parent=None, loadOnly=False, returnAs="dict", canExecute=True, strict=False, errorsToTerm=False, autoClose=True, defaults=False): if loadOnly: obj = None try: obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults) except Exception as re: else: popUpErr(parent=parent, message=re.message, title="Bad Parameters") if returnAs is None: return if returnAs == "dict": if dlg is None or dlg.canceled(): return None else: return dlg.getTaskParsObj() if dlg is None or dlg.canceled(): return -1 if dlg.executed(): return 1 return 0
Start the GUI session, or simply load a task's ConfigObj.
4,623
def _parse_envi(meta): def parsevec(s): return np.fromstring(s.strip(), dtype=, sep=) def default(s): return s.strip() parse = {: parsevec, : parsevec} parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()} return parsed_meta
Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns='ENVI') call. Returns ------- parsed_meta : dict Dictionary containing the original keys and the parsed values
4,624
def get_hops(self, start, end=None, forward=True): if forward: return list(self._iterbfs(start=start, end=end, forward=True)) else: return list(self._iterbfs(start=start, end=end, forward=False))
Computes the hop distance to all nodes centered around a specified node. First order neighbours are at hop 1, their neigbours are at hop 2 etc. Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward parameter. If the distance between all neighbouring nodes is 1 the hop number corresponds to the shortest distance between the nodes. :param start: the starting node :param end: ending node (optional). When not specified will search the whole graph. :param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}. :return: returns a list of tuples where each tuple contains the node and the hop. Typical usage:: >>> print graph.get_hops(1, 8) >>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)] # node 1 is at 0 hops # node 2 is at 1 hop # ... # node 8 is at 5 hops
4,625
def write_observation_zone(self, **kw): assert in kw if kw[] == ObservationZoneType.LINE: assert in kw elif kw[] == ObservationZoneType.CYLINDER: assert in kw elif kw[] == ObservationZoneType.SECTOR: assert in kw assert in kw assert in kw elif kw[] == ObservationZoneType.SYMMETRIC_QUADRANT: assert in kw elif kw[] == ObservationZoneType.CUSTOM_KEYHOLE: assert in kw assert in kw assert in kw self.write_tag(, **kw)
Write an observation zone declaration to the file:: writer.write_observation_zone( type=ObservationZoneType.CYLINDER, radius=30000, ) # <ObservationZone type="Cylinder" radius="30000"/> The required parameters depend on the type parameter. Different observation zone types require different parameters. :param type: observation zone type (one of the constants in :class:`~aerofiles.xcsoar.constants.ObservationZoneType`) :param length: length of the line (only used with type :const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`) :param radius: (outer) radius of the observation zone (used with types :const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`, :const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`, :const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and :const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`) :param inner_radius: inner radius of the observation zone (only used with type :const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`) :param angle: angle of the observation zone (only used with type :const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`) :param start_radial: start radial of the observation zone (only used with type :const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`) :param end_radial: end radial of the observation zone (only used with type :const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
4,626
def _call_scope(self, scope, *args, **kwargs): query = self.get_query() original_where_count = len(query.wheres) result = getattr(self._model, scope)(self, *args, **kwargs) if self._should_nest_wheres_for_scope(query, original_where_count): self._nest_wheres_for_scope( query, [0, original_where_count, len(query.wheres)] ) return result or self
Call the given model scope. :param scope: The scope to call :type scope: str
4,627
def chisquare(observe, expect, error, ddof, verbose=True): chisq = 0 error = error.flatten() observe = observe.flatten() expect = expect.flatten() for i, el in enumerate(observe): chisq = chisq + _np.power((el - expect[i]) / error[i], 2) red_chisq = chisq / (len(observe) - ddof) if verbose: print(.format(red_chisq)) return red_chisq
Finds the reduced chi square difference of *observe* and *expect* with a given *error* and *ddof* degrees of freedom. *verbose* flag determines if the reduced chi square is printed to the terminal.
4,628
def process_exception(self, e, uuid, routing_key, body, tb=None): msg = e.message if hasattr(e, "message") else str(e) exception_type = str(e.__class__) exception_name = str(e.__class__.__name__) print "Sending exception %s: %s for UUID %s." % ( exception_name, msg, uuid ) self.sendMessage( self.output_exchange, routing_key, str(body), properties=pika.BasicProperties( content_type="application/text", delivery_mode=2, headers={ "exception": msg, "exception_type": exception_type, "exception_name": exception_name, "traceback": tb, "UUID": uuid } ) )
Callback called when exception was raised. This method serializes the exception and sends it over AMQP back to caller. Args: e (obj): Instance of the exception. uuid (str): UUID of the message that caused the exception to raise. routing_key (str): Which routing key was used. body (str): Body of the exception - the longer text. tb (str, default None): Traceback (stacktrace)v of the exception.
4,629
def get_bounding_box(points): assert len(points) > 0, "At least one point has to be given." min_x, max_x = points[0][], points[0][] min_y, max_y = points[0][], points[0][] for point in points: min_x, max_x = min(min_x, point[]), max(max_x, point[]) min_y, max_y = min(min_y, point[]), max(max_y, point[]) p1 = Point(min_x, min_y) p2 = Point(max_x, max_y) return BoundingBox(p1, p2)
Get the bounding box of a list of points. Parameters ---------- points : list of points Returns ------- BoundingBox
4,630
def ising_simulated_annealing(h, J, beta_range=None, num_sweeps=1000): if beta_range is None: beta_init = .1 sigmas = {v: abs(h[v]) for v in h} for u, v in J: sigmas[u] += abs(J[(u, v)]) sigmas[v] += abs(J[(u, v)]) if sigmas: beta_final = 2. * max(itervalues(sigmas)) else: beta_final = 0.0 else: if not isinstance(beta_range, (tuple, list)): raise TypeError(" should be a tuple of length 2") if any(not isinstance(b, (int, float)) for b in beta_range): raise TypeError("values in should be numeric") if any(b <= 0 for b in beta_range): raise ValueError("beta values in should be positive") if len(beta_range) != 2: raise ValueError(" should be a tuple of length 2") beta_init, beta_final = beta_range if not isinstance(num_sweeps, int): raise TypeError(" should be a positive int") if num_sweeps <= 0: raise ValueError(" should be a positive int") betas = [beta_init + i * (beta_final - beta_init) / (num_sweeps - 1.) for i in range(num_sweeps)] adj = {n: set() for n in h} for n0, n1 in J: adj[n0].add(n1) adj[n1].add(n0) __, colors = greedy_coloring(adj) spins = {v: random.choice((-1, 1)) for v in h} for beta in betas: energy_diff_h = {v: -2 * spins[v] * h[v] for v in h} for color in colors: nodes = colors[color] energy_diff_J = {} for v0 in nodes: ediff = 0 for v1 in adj[v0]: if (v0, v1) in J: ediff += spins[v0] * spins[v1] * J[(v0, v1)] if (v1, v0) in J: ediff += spins[v0] * spins[v1] * J[(v1, v0)] energy_diff_J[v0] = -2. * ediff for v in nodes: logp = math.log(random.uniform(0, 1)) if logp < -1. * beta * (energy_diff_h[v] + energy_diff_J[v]): spins[v] *= -1 return spins, ising_energy(spins, h, J)
Tries to find the spins that minimize the given Ising problem. Args: h (dict): A dictionary of the linear biases in the Ising problem. Should be of the form {v: bias, ...} for each variable v in the Ising problem. J (dict): A dictionary of the quadratic biases in the Ising problem. Should be a dict of the form {(u, v): bias, ...} for each edge (u, v) in the Ising problem. If J[(u, v)] and J[(v, u)] exist then the biases are added. beta_range (tuple, optional): A 2-tuple defining the beginning and end of the beta schedule (beta is the inverse temperature). The schedule is applied linearly in beta. Default is chosen based on the total bias associated with each node. num_sweeps (int, optional): The number of sweeps or steps. Default is 1000. Returns: dict: A sample as a dictionary of spins. float: The energy of the returned sample. Raises: TypeError: If the values in `beta_range` are not numeric. TypeError: If `num_sweeps` is not an int. TypeError: If `beta_range` is not a tuple. ValueError: If the values in `beta_range` are not positive. ValueError: If `beta_range` is not a 2-tuple. ValueError: If `num_sweeps` is not positive. https://en.wikipedia.org/wiki/Simulated_annealing
4,631
def description(self, description): self._data[] = description request = self._base_request request[] = description return self._tc_requests.update(request, owner=self.owner)
Updates the security labels description. Args: description:
4,632
def authors(self): out = [] order = auth = namedtuple(, order) for author in self._citeInfoMatrix.get(): author = {k.split(":", 1)[-1]: v for k, v in author.items()} new = auth(name=author.get(), id=author.get(), surname=author.get(), initials=author.get(), url=author.get()) out.append(new) return out or None
A list of namedtuples storing author information, where each namedtuple corresponds to one author. The information in each namedtuple is (name surname initials id url). All entries are strings.
4,633
def refresh(self, conditional=False): headers = {} if conditional: if self.last_modified: headers[] = self.last_modified elif self.etag: headers[] = self.etag headers = headers or None json = self._json(self._get(self._api, headers=headers), 200) if json is not None: self.__init__(json, self._session) return self
Re-retrieve the information for this object and returns the refreshed instance. :param bool conditional: If True, then we will search for a stored header ('Last-Modified', or 'ETag') on the object and send that as described in the `Conditional Requests`_ section of the docs :returns: self The reasoning for the return value is the following example: :: repos = [r.refresh() for r in g.iter_repos('kennethreitz')] Without the return value, that would be an array of ``None``'s and you would otherwise have to do: :: repos = [r for i in g.iter_repos('kennethreitz')] [r.refresh() for r in repos] Which is really an anti-pattern. .. versionchanged:: 0.5 .. _Conditional Requests: http://developer.github.com/v3/#conditional-requests
4,634
def release(self, shortname): url = % shortname releases = yield self._get(url) if not releases: raise ReleaseNotFoundException( % shortname) release = Release.fromDict(releases[0]) release.connection = self defer.returnValue(release)
Get a specific release by its shortname. :param shortname: str, eg. "ceph-3-0" :returns: deferred that when fired returns a Release (Munch, dict-like) object representing this release. :raises: ReleaseNotFoundException if this release does not exist.
4,635
def make_hidden(self, request, queryset): queryset.update(status=HIDDEN) EntryPublishedVectorBuilder().cache_flush() self.message_user( request, _())
Set entries selected as hidden.
4,636
def remove(self, uuid, project=None): uuid = uuid.split()[0] with self._lock: run([, , .format(**locals()), , uuid, ])
Remove a task from Taskwarrior uuid -- the UID of the task project -- not used
4,637
def build_template(self, template, template_file, package): try: from Cheetah.Compiler import Compiler except ImportError: self.announce("unable to import Cheetah.Compiler, build failed") raise else: comp = Compiler(file=template_file, moduleName=template) conf_fn = DEFAULT_CONFIG if exists(conf_fn): with open(conf_fn, "rt") as config: comp.updateSettingsFromConfigFileObj(config) comp.setShBang("") comp.addModuleHeader("pylint: disable=C,W,R,F") outfd = join(self.build_lib, *package.split(".")) outfn = join(outfd, template + ".py") if not exists(outfd): makedirs(outfd) if newer(template_file, outfn): self.announce("compiling %s -> %s" % (template_file, outfd), 2) with open(outfn, "w") as output: output.write(str(comp))
Compile the cheetah template in src into a python file in build
4,638
def histogram(self, counts, bin_edges, linestyle=): if len(bin_edges) - 1 != len(counts): raise RuntimeError( ) x = [] y = [] if self.use_radians: circle = 2 * np.pi else: circle = 360. step = circle / 1800. for i in range(len(bin_edges) - 1): for bin_edge in np.arange(bin_edges[i], bin_edges[i + 1], step=step): x.append(bin_edge) y.append(counts[i]) x.append(bin_edges[i + 1]) y.append(counts[i]) if bin_edges[-1] % circle == bin_edges[0] % circle: x.append(bin_edges[0]) y.append(counts[0]) self.plot(x, y, mark=None, linestyle=linestyle)
Plot a polar histogram. The user needs to supply the histogram. This method only plots the results. You can use NumPy's histogram function. :param counts: array containing the count values. :param bin_edges: array containing the bin edges in degrees (or radians). :param linestyle: the line style used to connect the data points. May be None, or any line style accepted by TikZ (e.g. solid, dashed, dotted, thick, or even combinations like "red,thick,dashed"). Example:: >>> plot = artist.PolarPlot() >>> x = np.random.uniform(0, 360, size=1000) >>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37)) >>> plot.histogram(n, bins)
4,639
def markowitz_portfolio(cov_mat, exp_rets, target_ret, allow_short=False, market_neutral=False): if not isinstance(cov_mat, pd.DataFrame): raise ValueError("Covariance matrix is not a DataFrame") if not isinstance(exp_rets, pd.Series): raise ValueError("Expected returns is not a Series") if not isinstance(target_ret, float): raise ValueError("Target return is not a float") if not cov_mat.index.equals(exp_rets.index): raise ValueError("Indices do not match") if market_neutral and not allow_short: warnings.warn("A market neutral portfolio implies shorting") allow_short=True n = len(cov_mat) P = opt.matrix(cov_mat.values) q = opt.matrix(0.0, (n, 1)) if not allow_short: G = opt.matrix(np.vstack((-exp_rets.values, -np.identity(n)))) h = opt.matrix(np.vstack((-target_ret, +np.zeros((n, 1))))) else: G = opt.matrix(-exp_rets.values).T h = opt.matrix(-target_ret) A = opt.matrix(1.0, (1, n)) if not market_neutral: b = opt.matrix(1.0) else: b = opt.matrix(0.0) optsolvers.options[] = False sol = optsolvers.qp(P, q, G, h, A, b) if sol[] != : warnings.warn("Convergence problem") weights = pd.Series(sol[], index=cov_mat.index) return weights
Computes a Markowitz portfolio. Parameters ---------- cov_mat: pandas.DataFrame Covariance matrix of asset returns. exp_rets: pandas.Series Expected asset returns (often historical returns). target_ret: float Target return of portfolio. allow_short: bool, optional If 'False' construct a long-only portfolio. If 'True' allow shorting, i.e. negative weights. market_neutral: bool, optional If 'False' sum of weights equals one. If 'True' sum of weights equal zero, i.e. create a market neutral portfolio (implies allow_short=True). Returns ------- weights: pandas.Series Optimal asset weights.
4,640
def scan_file(path): path = os.path.abspath(path) if settings.USE_CLAMD: return clamd.scan_file(path) else: return clamscan.scan_file(path)
Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on :attr:`settings.USE_CLAMD`. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: ValueError: When the server is not running. AssertionError: When the internal file doesn't exists.
4,641
def _get_line_offset(self): nlines = int(self.mda[]) loff = np.float32(self.mda[]) if self.is_segmented: segment_number = self.mda[] - 1 loff -= (self.mda[] - segment_number - 1) * nlines elif self.area_id in (NORTH_HEMIS, SOUTH_HEMIS): loff = nlines - loff elif self.area_id == UNKNOWN_AREA: logger.error() return loff
Get line offset for the current segment Read line offset from the file and adapt it to the current segment or half disk scan so that y(l) ~ l - loff because this is what get_geostationary_area_extent() expects.
4,642
def page_not_found(request, template_name="errors/404.html"): context = { "STATIC_URL": settings.STATIC_URL, "request_path": request.path, } t = get_template(template_name) return HttpResponseNotFound(t.render(context, request))
Mimics Django's 404 handler but with a different template path.
4,643
def non_rotational_device(self, name, controller_port, device, non_rotational): if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(controller_port, baseinteger): raise TypeError("controller_port can only be an instance of type baseinteger") if not isinstance(device, baseinteger): raise TypeError("device can only be an instance of type baseinteger") if not isinstance(non_rotational, bool): raise TypeError("non_rotational can only be an instance of type bool") self._call("nonRotationalDevice", in_p=[name, controller_port, device, non_rotational])
Sets a flag in the device information which indicates that the medium is not based on rotational technology, i.e. that the access times are more or less independent of the position on the medium. This may or may not be supported by a particular drive, and is silently ignored in the latter case. At the moment only hard disks (which is a misnomer in this context) accept this setting. Changing the setting while the VM is running is forbidden. The device must already exist; see :py:func:`IMachine.attach_device` for how to attach a new device. The @a controllerPort and @a device parameters specify the device slot and have have the same meaning as with :py:func:`IMachine.attach_device` . in name of type str Name of the storage controller. in controller_port of type int Storage controller port. in device of type int Device slot in the given port. in non_rotational of type bool New value for the non-rotational device flag. raises :class:`OleErrorInvalidarg` SATA device, SATA port, IDE port or IDE slot out of range. raises :class:`VBoxErrorInvalidObjectState` Attempt to modify an unregistered virtual machine. raises :class:`VBoxErrorInvalidVmState` Invalid machine state.
4,644
def alliance(self) -> Union[EveAllianceInfo, None]: if self.alliance_id is None: return None return EveAllianceInfo.objects.get(alliance_id=self.alliance_id)
Pseudo foreign key from alliance_id to EveAllianceInfo :raises: EveAllianceInfo.DoesNotExist :return: EveAllianceInfo or None
4,645
def find(key): docs = list(collection.find({KEY_FIELD: key})) if not docs: return None pickled_value = docs[0][VALUE_FIELD] return pickle.loads(pickled_value)
Return the value associated with a key. If there is no value with the given key, returns ``None``.
4,646
def _tf_repeat(self, a, repeats): if len(a.get_shape()) != 1: raise AssertionError("This is not a 1D Tensor") a = tf.expand_dims(a, -1) a = tf.tile(a, [1, repeats]) a = self.tf_flatten(a) return a
Tensorflow version of np.repeat for 1D
4,647
def file_and_line(self): ret = "%s:%d" % (self.source_file.path, self.lineno) if self.from_source_file: ret += " (%s:%d)" % (self.from_source_file.path, self.from_lineno) return ret
Return the filename and line number where this rule originally appears, in the form "foo.scss:3". Used for error messages.
4,648
def dump(voevent, file, pretty_print=True, xml_declaration=True): file.write(dumps(voevent, pretty_print, xml_declaration))
Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps`
4,649
def _get_branches(self, closed=False): if self._empty: return {} def _branchtags(localrepo): bt = {} bt_closed = {} for bn, heads in localrepo.branchmap().iteritems(): tip = heads[-1] if in localrepo.changelog.read(tip)[5]: bt_closed[bn] = tip else: bt[bn] = tip if closed: bt.update(bt_closed) return bt sortkey = lambda ctx: ctx[0] _branches = [(safe_unicode(n), hex(h),) for n, h in _branchtags(self._repo).items()] return OrderedDict(sorted(_branches, key=sortkey, reverse=False))
Get's branches for this repository Returns only not closed branches by default :param closed: return also closed branches for mercurial
4,650
def data(self): if not self._data: self._data = self.content_parser(self.content) return self._data
A :class:`dict` of data parsed from :attr:`.content`.
4,651
def collect_fields(node): fields = set() for leaf in node: if leaf.get(, None) == "Field": fields.add(leaf["name"]["value"]) if leaf.get("selection_set", None): fields = fields.union(collect_fields(leaf["selection_set"]["selections"])) return fields
Get all the unique field names that are eligible for optimization Requested a function like this be added to the ``info`` object upstream in graphene_django: https://github.com/graphql-python/graphene-django/issues/230
4,652
def invariant(self): assert isinstance(self.description, str) assert isinstance(self.singleNodeOnly, bool) assert isinstance(self.inputs, dict) assert isinstance(self.outputs, dict) assert isinstance(self.parameters, dict) assert isinstance(self.commands, dict) hasDefaultInput = False for k, v in self.inputs.items(): assert isinstance(k, str) assert isinstance(v, InputSpec) v.invariant() if v.isDefaultInput: assert not hasDefaultInput hasDefaultInput = True hasDefaultOutput = False for k, v in self.outputs.items(): assert isinstance(k, str) assert isinstance(v, OutputSpec) v.invariant() if v.isDefaultOutput: assert not hasDefaultOutput hasDefaultOutput = True for k, v in self.parameters.items(): assert isinstance(k, str) assert isinstance(v, ParameterSpec) v.invariant() for k, v in self.commands.items(): assert isinstance(k, str) assert isinstance(v, CommandSpec) v.invariant()
Verify the validity of the node spec object The type of each sub-object is verified and then the validity of each node spec item is verified by calling it invariant() method. It also makes sure that there is at most one default input and one default output.
4,653
def query_mxrecords(self): import dns.resolver logging.info() answers = dns.resolver.query(self.domain, ) addresses = [answer.exchange.to_text() for answer in answers] logging.info( .format( len(addresses), .join(addresses))) return addresses
Looks up for the MX DNS records of the recipient SMTP server
4,654
def update_message_type(self, message_type): self._validate_uuid(message_type.message_type_id) url = "/notification/v1/message-type/{}".format( message_type.message_type_id) response = NWS_DAO().putURL( url, self._write_headers(), self._json_body( message_type.json_data())) if response.status != 204: raise DataFailureException(url, response.status, response.data) return response.status
Update an existing message type :param message_type: is the updated message type that the client wants to update
4,655
def get_projection_on_elements(self, structure): dico = {} for spin in self.data.keys(): dico[spin] = [[defaultdict(float) for i in range(self.nkpoints)] for j in range(self.nbands)] for iat in range(self.nions): name = structure.species[iat].symbol for spin, d in self.data.items(): for k, b in itertools.product(range(self.nkpoints), range(self.nbands)): dico[spin][b][k][name] = np.sum(d[k, b, iat, :]) return dico
Method returning a dictionary of projections on elements. Args: structure (Structure): Input structure. Returns: a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
4,656
def inorder(self, funct, stopOn=None): if stopOn is None: for i in self.children: i.inorder(funct) else: for i in self.children: if i.inorder(funct) == stopOn: return stopOn return funct(self)
Iterates in order, calling the function with the current node. If stopOn is set to True or False, it will stop on true or false.
4,657
def p_file_chksum_1(self, p): try: if six.PY2: value = p[2].decode(encoding=) else: value = p[2] self.builder.set_file_chksum(self.document, value) except OrderError: self.order_error(, , p.lineno(1)) except CardinalityError: self.more_than_one_error(, p.lineno(1))
file_chksum : FILE_CHKSUM CHKSUM
4,658
def wait(self, timeout=None): logger = logging.getLogger(__name__) if int(self.max_sleep_interval) < int(self._min_sleep_interval): self.max_sleep_interval = int(self._min_sleep_interval) t0 = time.time() sleep_seconds = min(5, self.max_sleep_interval) status = self.status prev_status = status while status < COMPLETED: logger.debug("sleep for %d seconds", sleep_seconds) time.sleep(sleep_seconds) if 2*sleep_seconds <= self.max_sleep_interval: sleep_seconds *= 2 if timeout is not None: if int(time.time() - t0) > int(timeout): return status = self.status if status != prev_status: sleep_seconds = min(5, self.max_sleep_interval) prev_status = status
Wait until the result is available or until roughly timeout seconds pass.
4,659
def _extract_ips(self, data): pl-krk-2-int-301-c2-int-1OS-EXT-IPS-MAC:mac_addrfa:16:3e:29:f1:bbversionaddr10.185.138.36OS-EXT-IPS:typefixed result = [] for region in data.items(): for interface in region[1]: result.append(interface[]) return result
Extract ip addressess from openstack structure { 'pl-krk-2-int-301-c2-int-1': [ { 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb', 'version': 4, 'addr': '10.185.138.36', 'OS-EXT-IPS:type': 'fixed' } ] } :arg data: dict :returns list
4,660
def _set_show_vcs(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_vcs.show_vcs, is_leaf=True, yang_name="show-vcs", rest_name="show-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "rpc", : , }) self.__show_vcs = t if hasattr(self, ): self._set()
Setter method for show_vcs, mapped from YANG variable /brocade_vcs_rpc/show_vcs (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_vcs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_vcs() directly.
4,661
def Woldesemayat_Ghajar(x, rhol, rhog, sigma, m, D, P, angle=0, g=g): r vgs = m*x/(rhog*pi/4*D**2) vls = m*(1-x)/(rhol*pi/4*D**2) first = vgs*(1 + (vls/vgs)**((rhog/rhol)**0.1)) second = 2.9*((g*D*sigma*(1 + cos(radians(angle)))*(rhol-rhog))/rhol**2)**0.25 third = (1.22 + 1.22*sin(radians(angle)))**(101325./P) return vgs/(first + second*third)
r'''Calculates void fraction in two-phase flow according to the model of [1]_. .. math:: \alpha = \frac{v_{gs}}{v_{gs}\left(1 + \left(\frac{v_{ls}}{v_{gs}} \right)^{\left(\frac{\rho_g}{\rho_l}\right)^{0.1}}\right) + 2.9\left[\frac{gD\sigma(1+\cos\theta)(\rho_l-\rho_g)} {\rho_l^2}\right]^{0.25}(1.22 + 1.22\sin\theta)^{\frac{P}{P_{atm}}}} .. math:: v_{gs} = \frac{mx}{\rho_g \frac{\pi}{4}D^2} .. math:: v_{ls} = \frac{m(1-x)}{\rho_l \frac{\pi}{4}D^2} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] sigma : float Surface tension of liquid [N/m] m : float Mass flow rate of both phases, [kg/s] D : float Diameter of the channel, [m] P : float Pressure of the fluid, [Pa] angle : float Angle of the channel with respect to the horizontal (vertical = 90), [degrees] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- Strongly recommended. Examples -------- >>> Woldesemayat_Ghajar(0.4, 800., 2.5, sigma=0.2, m=1, D=0.3, P=1E6, angle=45) 0.7640815513429202 References ---------- .. [1] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void Fraction Correlations for Different Flow Patterns in Horizontal and Upward Inclined Pipes." International Journal of Multiphase Flow 33, no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.
4,662
def entrez(args): p = OptionParser(entrez.__doc__) allowed_databases = {"fasta": ["genome", "nuccore", "nucgss", "protein", "nucest"], "asn.1": ["genome", "nuccore", "nucgss", "protein", "gene"], "xml": ["genome", "nuccore", "nucgss", "nucest", "gene"], "gb": ["genome", "nuccore", "nucgss"], "est": ["nucest"], "gss": ["nucgss"], "acc": ["nuccore"], } valid_formats = tuple(allowed_databases.keys()) valid_databases = ("genome", "nuccore", "nucest", "nucgss", "protein", "gene") p.add_option("--noversion", dest="noversion", default=False, action="store_true", help="Remove trailing accession versions") p.add_option("--format", default="fasta", choices=valid_formats, help="download format [default: %default]") p.add_option("--database", default="nuccore", choices=valid_databases, help="search database [default: %default]") p.add_option("--retmax", default=1000000, type="int", help="how many results to return [default: %default]") p.add_option("--skipcheck", default=False, action="store_true", help="turn off prompt to check file existence [default: %default]") p.add_option("--batchsize", default=500, type="int", help="download the results in batch for speed-up [default: %default]") p.set_outdir(outdir=None) p.add_option("--outprefix", default="out", help="output file name prefix [default: %default]") p.set_email() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) filename, = args if op.exists(filename): pf = filename.rsplit(".", 1)[0] list_of_terms = [row.strip() for row in open(filename)] if opts.noversion: list_of_terms = [x.rsplit(".", 1)[0] for x in list_of_terms] else: pf = filename list_of_terms = [filename.strip()] fmt = opts.format database = opts.database batchsize = opts.batchsize assert database in allowed_databases[fmt], \ "For output format , allowed databases are: {1}".\ format(fmt, allowed_databases[fmt]) assert batchsize >= 1, "batchsize must >= 1" if " " in pf: pf = opts.outprefix outfile = "{0}.{1}".format(pf, fmt) outdir = opts.outdir if outdir: mkdir(outdir) if not outdir: fw = must_open(outfile, "w", checkexists=True, skipcheck=opts.skipcheck) if fw is None: return seen = set() totalsize = 0 for id, size, term, handle in batch_entrez(list_of_terms, retmax=opts.retmax, rettype=fmt, db=database, batchsize=batchsize, email=opts.email): if outdir: outfile = urljoin(outdir, "{0}.{1}".format(term, fmt)) fw = must_open(outfile, "w", checkexists=True, skipcheck=opts.skipcheck) if fw is None: continue rec = handle.read() if id in seen: logging.error("Duplicate key ({0}) found".format(rec)) continue totalsize += size print(rec, file=fw) print(file=fw) seen.add(id) if seen: print("A total of {0} {1} records downloaded.". format(totalsize, fmt.upper()), file=sys.stderr) return outfile
%prog entrez <filename|term> `filename` contains a list of terms to search. Or just one term. If the results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed the download.
4,663
async def _auth_plain(self, username, password): mechanism = "PLAIN" credentials = "\0{}\0{}".format(username, password) encoded_credentials = SMTP.b64enc(credentials) try: code, message = await self.do_cmd( "AUTH", mechanism, encoded_credentials, success=(235, 503) ) except SMTPCommandFailedError as e: raise SMTPAuthenticationError(e.code, e.message, mechanism) return code, message
Performs an authentication attempt using the PLAIN mechanism. Protocol: 1. Format the username and password in a suitable way ; 2. The formatted string is base64-encoded ; 3. The string 'AUTH PLAIN' and a space character are prepended to the base64-encoded username and password and sent to the server ; 4. If the server replies with a 235 return code, user is authenticated. Args: username (str): Identifier of the user trying to authenticate. password (str): Password for the user. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPAuthenticationError: If the authentication attempt fails. Returns: (int, str): A (code, message) 2-tuple containing the server response.
4,664
def readExcel(usr_path=""): global cwd, files start = clock() files[".xls"] = [] __read(usr_path, ".xls") end = clock() logger_benchmark.info(log_benchmark("readExcel", start, end)) return cwd
Read Excel file(s) Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return str cwd: Current working directory
4,665
def get_relationship_dicts(self): if not self.relationships: return None for goid, goobj in self.go2obj.items(): for reltyp, relset in goobj.relationship.items(): relfwd_goids = set(o.id for o in relset) print("CountRelativesInit RELLLLS", goid, goobj.id, reltyp, relfwd_goids)
Given GO DAG relationships, return summaries per GO ID.
4,666
def _to_query_json(self): return { : self._quote, : self._delimiter, : self._encoding.upper(), : self._skip_leading_rows, : self._allow_quoted_newlines, : self._allow_jagged_rows }
Return the options as a dictionary to be used as JSON in a query job.
4,667
def _get_section(name, source): pattern = re.compile( .format(name=name), re.IGNORECASE | re.MULTILINE) usage = None for section in pattern.findall(source): usage = _merge_section(usage, section.strip()) return usage
Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section.
4,668
def create_from_file_extension(cls, file_extension): ext = os.path.splitext(file_extension)[1] if typepy.is_null_string(ext): file_extension = file_extension else: file_extension = ext file_extension = file_extension.lstrip(".").lower() for table_format in TableFormat: if file_extension not in table_format.file_extensions: continue if table_format.format_attribute & FormatAttr.SECONDARY_EXT: continue return table_format.writer_class() raise WriterNotFoundError( "\n".join( [ "{:s} (unknown file extension).".format(file_extension), "", "acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())), ] ) )
Create a table writer class instance from a file extension. Supported file extensions are as follows: ================== =================================== Extension Writer Class ================== =================================== ``".csv"`` :py:class:`~.CsvTableWriter` ``".htm"`` :py:class:`~.HtmlTableWriter` ``".html"`` :py:class:`~.HtmlTableWriter` ``".js"`` :py:class:`~.JavaScriptTableWriter` ``".json"`` :py:class:`~.JsonTableWriter` ``".jsonl"`` :py:class:`~.JsonLinesTableWriter` ``".ltsv"`` :py:class:`~.LtsvTableWriter` ``".ldjson"`` :py:class:`~.JsonLinesTableWriter` ``".md"`` :py:class:`~.MarkdownTableWriter` ``".ndjson"`` :py:class:`~.JsonLinesTableWriter` ``".py"`` :py:class:`~.PythonCodeTableWriter` ``".rst"`` :py:class:`~.RstGridTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".xls"`` :py:class:`~.ExcelXlsTableWriter` ``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter` ``".sqlite"`` :py:class:`~.SqliteTableWriter` ``".sqlite3"`` :py:class:`~.SqliteTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".toml"`` :py:class:`~.TomlTableWriter` ================== =================================== :param str file_extension: File extension string (case insensitive). :return: Writer instance that coincides with the ``file_extension``. :rtype: :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface` :raises pytablewriter.WriterNotFoundError: |WriterNotFoundError_desc| the file extension.
4,669
def send(self, message, binary=False): if binary is None: binary = not isinstance(message, six.string_types) opcode = self.OPCODE_BINARY if binary else self.OPCODE_TEXT try: self.send_frame(message, opcode) except WebSocketError: raise WebSocketError("Socket is dead")
Send a frame over the websocket with message as its payload
4,670
def stem(self, word): word = normalize(, text_type(word.lower())) word = word.translate(self._umlauts) wlen = len(word) - 1 if wlen > 3: if wlen > 5: if word[-3:] == : return word[:-3] if wlen > 4: if word[-2:] in {, , , }: return word[:-2] if word[-1] in {, , , }: return word[:-1] return word
Return CLEF German stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = CLEFGerman() >>> stmr.stem('lesen') 'lese' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabier'
4,671
def get_message(self, set_slave_ok, sock_info, use_cmd=False): if set_slave_ok: flags = self.flags | 4 else: flags = self.flags ns = self.namespace() spec = self.spec if use_cmd: spec = self.as_command(sock_info)[0] if sock_info.op_msg_enabled: request_id, msg, size, _ = _op_msg( 0, spec, self.db, self.read_preference, set_slave_ok, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size ns = _UJOIN % (self.db, "$cmd") ntoreturn = -1 else: ntoreturn = self.batch_size == 1 and 2 or self.batch_size if self.limit: if ntoreturn: ntoreturn = min(self.limit, ntoreturn) else: ntoreturn = self.limit if sock_info.is_mongos: spec = _maybe_add_read_preference(spec, self.read_preference) return query(flags, ns, self.ntoskip, ntoreturn, spec, None if use_cmd else self.fields, self.codec_options, ctx=sock_info.compression_context)
Get a query message, possibly setting the slaveOk bit.
4,672
def indexbox(message="Shall I continue?", title="", choices=["Yes", "No"]): reply = buttonbox(message, title, choices) index = -1 for choice in choices: index = index + 1 if reply == choice: return index
Original doc: Display a buttonbox with the specified choices. Return the index of the choice selected.
4,673
async def submit_batches(self, request): timer_ctx = self._post_batches_total_time.time() self._post_batches_count.inc() if request.headers[] != : LOGGER.debug( , request.headers[]) self._post_batches_error.inc() raise errors.SubmissionWrongContentType() body = await request.read() if not body: LOGGER.debug() self._post_batches_error.inc() raise errors.NoBatchesSubmitted() try: batch_list = BatchList() batch_list.ParseFromString(body) except DecodeError: LOGGER.debug(, body) self._post_batches_error.inc() raise errors.BadProtobufSubmitted() error_traps = [error_handlers.BatchInvalidTrap, error_handlers.BatchQueueFullTrap] validator_query = client_batch_submit_pb2.ClientBatchSubmitRequest( batches=batch_list.batches) with self._post_batches_validator_time.time(): await self._query_validator( Message.CLIENT_BATCH_SUBMIT_REQUEST, client_batch_submit_pb2.ClientBatchSubmitResponse, validator_query, error_traps) id_string = .join(b.header_signature for b in batch_list.batches) status = 202 link = self._build_url(request, path=, id=id_string) retval = self._wrap_response( request, metadata={: link}, status=status) timer_ctx.stop() return retval
Accepts a binary encoded BatchList and submits it to the validator. Request: body: octet-stream BatchList of one or more Batches Response: status: - 202: Batches submitted and pending link: /batches or /batch_statuses link for submitted batches
4,674
def deploy_master_contract(self, deployer_account=None, deployer_private_key=None) -> str: assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_contract = self.get_contract() tx = safe_contract.constructor().buildTransaction({: deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status contract_address = tx_receipt.contractAddress master_safe = self.get_contract(contract_address) tx = master_safe.functions.setup( ["0x0000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000003"], 2, NULL_ADDRESS, b, NULL_ADDRESS, 0, NULL_ADDRESS ).buildTransaction({: deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status logger.info("Deployed and initialized Safe Master Contract=%s by %s", contract_address, deployer_address) return contract_address
Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address
4,675
def soaproot(self, node): root = node.getAttribute(, ns=soapenc) if root is None: return True else: return root.value ==
Get whether the specified I{node} is a soap encoded root. This is determined by examining @soapenc:root='1'. The node is considered to be a root when the attribute is not specified. @param node: A node to evaluate. @type node: L{Element} @return: True if a soap encoded root. @rtype: bool
4,676
def _build_session(self, name, start_info, end_info): assert start_info is not None result = api_pb2.Session( name=name, start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values(name), monitor_url=start_info.monitor_url) if end_info is not None: result.status = end_info.status result.end_time_secs = end_info.end_time_secs return result
Builds a session object.
4,677
def expire_soon(self, seconds): if self.expiration_time: return self.expiration_time < int(time.time()) + int(seconds) else: return False
Returns ``True`` if credentials expire sooner than specified. :param int seconds: Number of seconds. :returns: ``True`` if credentials expire sooner than specified, else ``False``.
4,678
def show_corrections(self, status=None, nids=None): nrows, ncols = get_terminal_size() count = 0 for task in self.iflat_tasks(status=status, nids=nids): if task.num_corrections == 0: continue count += 1 print(make_banner(str(task), width=ncols, mark="=")) for corr in task.corrections: pprint(corr) if not count: print("No correction found.") return count
Show the corrections applied to the flow at run-time. Args: status: if not None, only the tasks with this status are select. nids: optional list of node identifiers used to filter the tasks. Return: The number of corrections found.
4,679
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict): data_shapes = {k: v.shape for k, v in arg_dict.items()} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) internal_sym = self.sym.get_internals()[sym_name] data_inputs = {k: mx.nd.empty(v, ctx=self.ctx) for k, v in self.data_shapes.items() if k in internal_sym.list_arguments()} params = {k: v for k, v in self.params.items() if k in internal_sym.list_arguments()} aux_states = {k: v for k, v in self.aux_states.items() if k in internal_sym.list_auxiliary_states()} exe = internal_sym.bind(ctx=self.ctx, args=dict(params, **data_inputs), args_grad=None, grad_req=, aux_states=aux_states, shared_exec=self.exe) for k, v in arg_dict.items(): exe.arg_dict[k][:] = v exe.forward(is_train=False) assert 1 == len(exe.outputs) for output in exe.outputs: output.wait_to_read() return exe.outputs[0]
View the internal symbols using the forward function. :param sym_name: :param bucket_kwargs: :param input_dict: :return:
4,680
def _shutdown(self, manual): if self._ssl is None: return while True: result = libssl.SSL_shutdown(self._ssl) continue else: break elif error == LibsslConst.SSL_ERROR_WANT_WRITE: self._raw_write() continue else: handle_openssl_error(0, TLSError) if manual: self._local_closed = True libssl.SSL_free(self._ssl) self._ssl = None self._rbio = None self._wbio = None try: self._socket.shutdown(socket_.SHUT_RDWR) except (socket_.error): pass
Shuts down the TLS session and then shuts down the underlying socket :param manual: A boolean if the connection was manually shutdown
4,681
def get_url_shortener(): try: backend_module = import_module(URL_SHORTENER_BACKEND) backend = getattr(backend_module, ) except (ImportError, AttributeError): warnings.warn( % URL_SHORTENER_BACKEND, RuntimeWarning) backend = default_backend except ImproperlyConfigured as e: warnings.warn(str(e), RuntimeWarning) backend = default_backend return backend
Return the selected URL shortener backend.
4,682
def register_model(model_name, dataset_name, model_func): model_map = _get_model_map(dataset_name) if model_name in model_map: raise ValueError("Model \"%s\" is already registered for dataset" "\"%s\"" % (model_name, dataset_name)) model_map[model_name] = model_func
Register a new model that can be obtained with `get_model_config`.
4,683
def match(self, environ): verb = environ[].upper() path = environ[] or target = None if verb == : methods = [, verb, , ] else: methods = [, verb, ] for method in methods: if method in self.static and path in self.static[method]: target, getargs = self.static[method][path] return target, getargs(path) if getargs else {} elif method in self.dyna_regexes: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: target, getargs = rules[match.lastindex - 1] return target, getargs(path) if getargs else {} allowed = set([]) nocheck = set(methods) for method in set(self.static) - nocheck: if path in self.static[method]: allowed.add(verb) for method in set(self.dyna_regexes) - allowed - nocheck: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: allowed.add(method) if allowed: allow_header = ",".join(sorted(allowed)) raise HTTPError(405, "Method not allowed.", Allow=allow_header) raise HTTPError(404, "Not found: " + repr(path))
Return a (target, url_agrs) tuple or raise HTTPError(400/404/405).
4,684
def step(self, action): input_1 = self._make_input(action[:7], self.env._right_hand_quat) if self.env.mujoco_robot.name == "sawyer": velocities = self.controller.get_control(**input_1) low_action = np.concatenate([velocities, action[7:]]) elif self.env.mujoco_robot.name == "baxter": input_2 = self._make_input(action[7:14], self.env._left_hand_quat) velocities = self.controller.get_control(input_1, input_2) low_action = np.concatenate([velocities, action[14:]]) else: raise Exception( "Only Sawyer and Baxter robot environments are supported for IK " "control currently." ) for i in range(self.action_repeat): ret = self.env.step(low_action) velocities = self.controller.get_control() if self.env.mujoco_robot.name == "sawyer": low_action = np.concatenate([velocities, action[7:]]) else: low_action = np.concatenate([velocities, action[14:]]) return ret
Move the end effector(s) according to the input control. Args: action (numpy array): The array should have the corresponding elements. 0-2: The desired change in end effector position in x, y, and z. 3-6: The desired change in orientation, expressed as a (x, y, z, w) quaternion. Note that this quaternion encodes a relative rotation with respect to the current gripper orientation. If the current rotation is r, this corresponds to a quaternion d such that r * d will be the new rotation. *: Controls for gripper actuation. Note: When wrapping around a Baxter environment, the indices 0-6 inidicate the right hand. Indices 7-13 indicate the left hand, and the rest (*) are the gripper inputs (first right, then left).
4,685
def transform_to_chomsky_normal_form(grammar, inplace=False): if inplace is False: grammar = copy(grammar) fill = TerminalsFilling(grammar) to_process = Queue() for r in grammar.rules: to_process.put(r) while not to_process.empty(): rule = to_process.get() if len(rule.right) > 2: grammar.rules.remove(rule) created_nonterm = type("ChomskyGroup[" + rule.__name__ + "]", (ChomskyGroupNonterminal,), ChomskyGroupNonterminal.__dict__.copy()) created_nonterm.group = rule.right[1:] created_left_rule = type("ChomskySplit[" + rule.__name__ + "]", (ChomskySplitRule,), ChomskySplitRule.__dict__.copy()) created_left_rule.rule = ([rule.fromSymbol], [rule.right[0], created_nonterm]) created_left_rule.from_rule = rule created_right_rule = type("ChomskyRest[" + rule.__name__ + "]", (ChomskyRestRule,), ChomskyRestRule.__dict__.copy()) created_right_rule.rule = ([created_nonterm], rule.right[1:]) created_right_rule.from_rule = rule grammar.nonterminals.add(created_nonterm) grammar.rules.add(created_left_rule, created_right_rule) to_process.put(created_left_rule) to_process.put(created_right_rule) elif len(rule.right) == 2: if rule.right[0] in grammar.terminals: grammar.rules.remove(rule) symb = fill.get(rule.right[0]) created = type("ChomskyLeft[" + rule.__name__ + "]", (ChomskyTerminalReplaceRule,), ChomskyTerminalReplaceRule.__dict__.copy()) created.rule = ([rule.fromSymbol], [symb, rule.right[1]]) created.from_rule = rule created.replaced_index = 0 grammar.rules.add(created) to_process.put(created) elif rule.right[1] in grammar.terminals: grammar.rules.remove(rule) symb = fill.get(rule.right[1]) created = type("ChomskyRight[" + rule.__name__ + "]", (ChomskyTerminalReplaceRule,), ChomskyTerminalReplaceRule.__dict__.copy()) created.rule = ([rule.fromSymbol], [rule.right[0], symb]) created.from_rule = rule created.replaced_index = 1 grammar.rules.add(created) to_process.put(created) return grammar
Transform grammar to Chomsky Normal Form. :param grammar: Grammar to transform. :param inplace: True if transformation should be performed in place. False by default. :return: Grammar in Chomsky Normal Form.
4,686
def exploit(self): search = ServiceSearch() host_search = HostSearch() services = search.get_services(tags=[]) services = [service for service in services] if len(services) == 0: print_error("No services found that are vulnerable for MS17-010") return if self.auto: print_success("Found {} services vulnerable for MS17-010".format(len(services))) for service in services: print_success("Exploiting " + str(service.address)) host = host_search.id_to_object(str(service.address)) system_os = if host.os: system_os = host.os else: system_os = self.detect_os(str(service.address)) host.os = system_os host.save() text = self.exploit_single(str(service.address), system_os) print_notification(text) else: service_list = [] for service in services: host = host_search.id_to_object(str(service.address)) system_os = if host.os: system_os = host.os else: system_os = self.detect_os(str(service.address)) host.os = system_os host.save() service_list.append({: service.address, : system_os, : "{ip} ({os}) {hostname}".format(ip=service.address, os=system_os, hostname=host.hostname)}) draw_interface(service_list, self.callback, "Exploiting {ip} with OS: {os}")
Starts the exploiting phase, you should run setup before running this function. if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown.
4,687
def get_file_url(self): if self.has_file_url(): return self._get_asset_content( Id(self.my_osid_object._my_map[][]), self.my_osid_object._my_map[][]).get_url() raise IllegalState()
stub
4,688
def parse_emails(values): emails = [] if isinstance(values, str): values = [values] for value in values: matches = re_emails.findall(value) emails.extend([match[2] for match in matches]) return emails
Take a string or list of strings and try to extract all the emails
4,689
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs): r def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok): exner = pok * np.exp(-ka * iter_log_p) t = a * iter_log_p + b f = isentlevs_nd - t * exner fp = exner * (ka * t - a) return iter_log_p - (f / fp) tmpk_out = kwargs.pop(, False) max_iters = kwargs.pop(, 50) eps = kwargs.pop(, 1e-6) axis = kwargs.pop(, 0) bottom_up_search = kwargs.pop(, True) ndim = temperature.ndim pres = pressure.to() temperature = temperature.to() slices = [np.newaxis] * ndim slices[axis] = slice(None) slices = tuple(slices) pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units sort_pres = np.argsort(pres.m, axis=axis) sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis) sorter = broadcast_indices(pres, sort_pres, ndim, axis) levs = pres[sorter] tmpk = temperature[sorter] theta_levels = np.asanyarray(theta_levels.to()).reshape(-1) isentlevels = theta_levels[np.argsort(theta_levels)] shape = list(temperature.shape) shape[axis] = isentlevels.size isentlevs_nd = np.broadcast_to(isentlevels[slices], shape) log_p = np.log(levs.m) pok = mpconsts.P0 ** ka above, below, good = find_bounding_indices(pres_theta.m, theta_levels, axis, from_below=bottom_up_search) a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below]) b = tmpk.m[above] - a * log_p[above] isentprs = 0.5 * (log_p[above] + log_p[below]) good &= ~np.isnan(a) log_p_solved = so.fixed_point(_isen_iter, isentprs[good], args=(isentlevs_nd[good], ka, a[good], b[good], pok.m), xtol=eps, maxiter=max_iters) isentprs[good] = np.exp(log_p_solved) isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan ret = [isentprs * units.hPa] if tmpk_out: ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin) if args: others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args), axis=axis) if len(args) > 1: ret.extend(others) else: ret.append(others) return ret
r"""Interpolate data in isobaric coordinates to isentropic coordinates. Parameters ---------- theta_levels : array One-dimensional array of desired theta surfaces pressure : array One-dimensional array of pressure levels temperature : array Array of temperature args : array, optional Any additional variables will be interpolated to each isentropic level. Returns ------- list List with pressure at each isentropic level, followed by each additional argument interpolated to isentropic coordinates. Other Parameters ---------------- axis : int, optional The axis corresponding to the vertical in the temperature array, defaults to 0. tmpk_out : bool, optional If true, will calculate temperature and output as the last item in the output list. Defaults to False. max_iters : int, optional The maximum number of iterations to use in calculation, defaults to 50. eps : float, optional The desired absolute error in the calculated value, defaults to 1e-6. bottom_up_search : bool, optional Controls whether to search for theta levels bottom-up, or top-down. Defaults to True, which is bottom-up search. Notes ----- Input variable arrays must have the same number of vertical levels as the pressure levels array. Pressure is calculated on isentropic surfaces by assuming that temperature varies linearly with the natural log of pressure. Linear interpolation is then used in the vertical to find the pressure at each isentropic level. Interpolation method from [Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will be linearly interpolated to the new isentropic levels. See Also -------- potential_temperature
4,690
def getOperationName(self, ps, action): method = self.root.get(_get_element_nsuri_name(ps.body_root)) or \ self.soapAction.get(action) if method is None: raise UnknownRequestException, \ %(action,_get_element_nsuri_name(ps.body_root)) return method
Returns operation name. action -- soapAction value
4,691
def showGridColumns( self ): delegate = self.itemDelegate() if ( isinstance(delegate, XTreeWidgetDelegate) ): return delegate.showGridColumns() return False
Returns whether or not this delegate should draw columns when \ rendering the grid. :return <bool>
4,692
def start(self, context): self.config[] = self.alias safe_config = dict(self.config) del safe_config[] log.info("Connecting MongoEngine database layer.", extra=dict( uri = redact_uri(self.config[]), config = self.config, )) self.connection = connect(**self.config)
Initialize the database connection.
4,693
def get_name(self, name_case=DdlParseBase.NAME_CASE.original): if name_case == self.NAME_CASE.lower: return self._name.lower() elif name_case == self.NAME_CASE.upper: return self._name.upper() else: return self._name
Get Name converted case :param name_case: name case type * DdlParse.NAME_CASE.original : Return to no convert * DdlParse.NAME_CASE.lower : Return to lower * DdlParse.NAME_CASE.upper : Return to upper :return: name
4,694
def _create_latent_variables(self): self.latent_variables.add_z(, fam.Normal(0,3,transform=None), fam.Normal(0,3)) for p_term in range(self.p): self.latent_variables.add_z( + str(p_term+1) + , fam.Normal(0,0.5,transform=), fam.Normal(0,3)) if p_term == 0: self.latent_variables.z_list[-1].start = 3.00 else: self.latent_variables.z_list[-1].start = -4.00 for q_term in range(self.q): self.latent_variables.add_z( + str(q_term+1) + , fam.Normal(0,0.5,transform=), fam.Normal(0,3)) if q_term == 0: self.latent_variables.z_list[-1].start = -1.50 else: self.latent_variables.z_list[-1].start = -4.00 self.latent_variables.add_z(, fam.Flat(transform=), fam.Normal(0,3)) self.latent_variables.add_z(, fam.Normal(0,3,transform=None), fam.Normal(0,3)) self.latent_variables.add_z(, fam.Normal(0,3,transform=None), fam.Normal(0,3)) self.latent_variables.z_list[-3].start = 2.0
Creates model latent variables Returns ---------- None (changes model attributes)
4,695
def __setParentSymbol(self, value): errors = [] if not value is str and not value.split(): errors.append() else: self.__parentSymbol = value if errors: view.Tli.showErrors(, errors)
self.__parentSymbol variable setter
4,696
def assure_migrations_table_setup(db): from mig.models import MigrationData if not MigrationData.__table__.exists(db.bind): MigrationData.metadata.create_all( db.bind, tables=[MigrationData.__table__])
Make sure the migrations table is set up in the database.
4,697
async def drop_model_tables(models, **drop_table_kwargs): for m in reversed(sort_models_topologically(models)): await m.drop_table(**drop_table_kwargs)
Drop tables for all given models (in the right order).
4,698
def is_revision_chain_placeholder(pid): return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter( pid__did=pid ).exists()
For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.
4,699
def p_expr_EQ_expr(p): p[0] = make_binary(p.lineno(2), , p[1], p[3], lambda x, y: x == y)
expr : expr EQ expr