text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def is_optional(self): """ Returns whether the parameter is optional or required :return: Return True if optional, False if required """ if (('optional' in self.attributes and bool(self.attributes['optional'].strip())) and ('minValue' in self.attributes and self.attributes['minValue'] == 0)): return True else: return False
[ "def", "is_optional", "(", "self", ")", ":", "if", "(", "(", "'optional'", "in", "self", ".", "attributes", "and", "bool", "(", "self", ".", "attributes", "[", "'optional'", "]", ".", "strip", "(", ")", ")", ")", "and", "(", "'minValue'", "in", "self", ".", "attributes", "and", "self", ".", "attributes", "[", "'minValue'", "]", "==", "0", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
40.3
21.7
def add_plot(fn: Path, cm, ax, alpha=1): """Astrometry.net makes file ".new" with the image and the WCS SIP 2-D polynomial fit coefficients in the FITS header We use DECL as "x" and RA as "y". pcolormesh() is used as it handles arbitrary pixel shapes. Note that pcolormesh() cannot tolerate NaN in X or Y (NaN in C is OK). This is handled in https://github.com/scivision/pcolormesh_nan.py. """ with fits.open(fn, mode='readonly', memmap=False) as f: img = f[0].data yPix, xPix = f[0].shape[-2:] x, y = np.meshgrid(range(xPix), range(yPix)) # pixel indices to find RA/dec of xy = np.column_stack((x.ravel(order='C'), y.ravel(order='C'))) radec = wcs.WCS(f[0].header).all_pix2world(xy, 0) ra = radec[:, 0].reshape((yPix, xPix), order='C') dec = radec[:, 1].reshape((yPix, xPix), order='C') ax.set_title(fn.name) ax.pcolormesh(ra, dec, img, alpha=alpha, cmap=cm, norm=LogNorm()) ax.set_ylabel('Right Ascension [deg.]') ax.set_xlabel('Declination [deg.]')
[ "def", "add_plot", "(", "fn", ":", "Path", ",", "cm", ",", "ax", ",", "alpha", "=", "1", ")", ":", "with", "fits", ".", "open", "(", "fn", ",", "mode", "=", "'readonly'", ",", "memmap", "=", "False", ")", "as", "f", ":", "img", "=", "f", "[", "0", "]", ".", "data", "yPix", ",", "xPix", "=", "f", "[", "0", "]", ".", "shape", "[", "-", "2", ":", "]", "x", ",", "y", "=", "np", ".", "meshgrid", "(", "range", "(", "xPix", ")", ",", "range", "(", "yPix", ")", ")", "# pixel indices to find RA/dec of", "xy", "=", "np", ".", "column_stack", "(", "(", "x", ".", "ravel", "(", "order", "=", "'C'", ")", ",", "y", ".", "ravel", "(", "order", "=", "'C'", ")", ")", ")", "radec", "=", "wcs", ".", "WCS", "(", "f", "[", "0", "]", ".", "header", ")", ".", "all_pix2world", "(", "xy", ",", "0", ")", "ra", "=", "radec", "[", ":", ",", "0", "]", ".", "reshape", "(", "(", "yPix", ",", "xPix", ")", ",", "order", "=", "'C'", ")", "dec", "=", "radec", "[", ":", ",", "1", "]", ".", "reshape", "(", "(", "yPix", ",", "xPix", ")", ",", "order", "=", "'C'", ")", "ax", ".", "set_title", "(", "fn", ".", "name", ")", "ax", ".", "pcolormesh", "(", "ra", ",", "dec", ",", "img", ",", "alpha", "=", "alpha", ",", "cmap", "=", "cm", ",", "norm", "=", "LogNorm", "(", ")", ")", "ax", ".", "set_ylabel", "(", "'Right Ascension [deg.]'", ")", "ax", ".", "set_xlabel", "(", "'Declination [deg.]'", ")" ]
41.04
21.52
def create(cls, name, user=None, network_element=None, domain_name=None, zone=None, executable=None): """ Create a match expression :param str name: name of match expression :param str user: name of user or user group :param Element network_element: valid network element type, i.e. host, network, etc :param DomainName domain_name: domain name network element :param Zone zone: zone to use :param str executable: name of executable or group :raises ElementNotFound: specified object does not exist :return: instance with meta :rtype: MatchExpression """ ref_list = [] if user: pass if network_element: ref_list.append(network_element.href) if domain_name: ref_list.append(domain_name.href) if zone: ref_list.append(zone.href) if executable: pass json = {'name': name, 'ref': ref_list} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "user", "=", "None", ",", "network_element", "=", "None", ",", "domain_name", "=", "None", ",", "zone", "=", "None", ",", "executable", "=", "None", ")", ":", "ref_list", "=", "[", "]", "if", "user", ":", "pass", "if", "network_element", ":", "ref_list", ".", "append", "(", "network_element", ".", "href", ")", "if", "domain_name", ":", "ref_list", ".", "append", "(", "domain_name", ".", "href", ")", "if", "zone", ":", "ref_list", ".", "append", "(", "zone", ".", "href", ")", "if", "executable", ":", "pass", "json", "=", "{", "'name'", ":", "name", ",", "'ref'", ":", "ref_list", "}", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
33.612903
16.645161
def fetch_url(self, url): """ Retrieves the given url to the prefix Args: url(str): Url to retrieve Returns: str: path to the downloaded file """ url_path = urlparse.urlsplit(url).path dst_path = os.path.basename(url_path) dst_path = self.paths.prefixed(dst_path) with LogTask('Downloading %s' % url): urllib.urlretrieve(url=os.path.expandvars(url), filename=dst_path) return dst_path
[ "def", "fetch_url", "(", "self", ",", "url", ")", ":", "url_path", "=", "urlparse", ".", "urlsplit", "(", "url", ")", ".", "path", "dst_path", "=", "os", ".", "path", ".", "basename", "(", "url_path", ")", "dst_path", "=", "self", ".", "paths", ".", "prefixed", "(", "dst_path", ")", "with", "LogTask", "(", "'Downloading %s'", "%", "url", ")", ":", "urllib", ".", "urlretrieve", "(", "url", "=", "os", ".", "path", ".", "expandvars", "(", "url", ")", ",", "filename", "=", "dst_path", ")", "return", "dst_path" ]
28.647059
16.294118
def _update_explicit_bucket_count(a_float, dist): """Adds `a_float` to `dist`, updating its explicit buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have explict buckets defined ValueError: if there are not enough bucket count fields in `dist` """ buckets = dist.explicitBuckets if buckets is None: raise ValueError(_BAD_UNSET_BUCKETS % (u'explicit buckets')) bucket_counts = dist.bucketCounts bounds = buckets.bounds if len(bucket_counts) < len(bounds) + 1: raise ValueError(_BAD_LOW_BUCKET_COUNT) bucket_counts[bisect.bisect(bounds, a_float)] += 1
[ "def", "_update_explicit_bucket_count", "(", "a_float", ",", "dist", ")", ":", "buckets", "=", "dist", ".", "explicitBuckets", "if", "buckets", "is", "None", ":", "raise", "ValueError", "(", "_BAD_UNSET_BUCKETS", "%", "(", "u'explicit buckets'", ")", ")", "bucket_counts", "=", "dist", ".", "bucketCounts", "bounds", "=", "buckets", ".", "bounds", "if", "len", "(", "bucket_counts", ")", "<", "len", "(", "bounds", ")", "+", "1", ":", "raise", "ValueError", "(", "_BAD_LOW_BUCKET_COUNT", ")", "bucket_counts", "[", "bisect", ".", "bisect", "(", "bounds", ",", "a_float", ")", "]", "+=", "1" ]
38.8
18
def get_latex(self): """Bibliographic entry in LaTeX format.""" if len(self.authors) > 1: authors = _list_authors(self.authors) else: a = self.authors authors = ' '.join([a.given_name, a.surname]) if self.volume and self.issueIdentifier: volissue = '\\textbf{{{}({})}}'.format(self.volume, self.issueIdentifier) elif self.volume: volissue = '\\textbf{{{}}}'.format(self.volume) else: volissue = 'no volume' pages = _parse_pages(self) s = '{auth}, \\textit{{{title}}}, {jour}, {vol}, {pages} ({year}).'.format( auth=authors, title=self.title, jour=self.publicationName, vol=volissue, pages=pages, year=self.coverDate[:4]) if self.doi is not None: s += ' \\href{{https://doi.org/{0}}}{{doi:{0}}}, '.format(self.doi) s += '\\href{{{0}}}{{scopus:{1}}}.'.format(self.scopus_link, self.eid) return s
[ "def", "get_latex", "(", "self", ")", ":", "if", "len", "(", "self", ".", "authors", ")", ">", "1", ":", "authors", "=", "_list_authors", "(", "self", ".", "authors", ")", "else", ":", "a", "=", "self", ".", "authors", "authors", "=", "' '", ".", "join", "(", "[", "a", ".", "given_name", ",", "a", ".", "surname", "]", ")", "if", "self", ".", "volume", "and", "self", ".", "issueIdentifier", ":", "volissue", "=", "'\\\\textbf{{{}({})}}'", ".", "format", "(", "self", ".", "volume", ",", "self", ".", "issueIdentifier", ")", "elif", "self", ".", "volume", ":", "volissue", "=", "'\\\\textbf{{{}}}'", ".", "format", "(", "self", ".", "volume", ")", "else", ":", "volissue", "=", "'no volume'", "pages", "=", "_parse_pages", "(", "self", ")", "s", "=", "'{auth}, \\\\textit{{{title}}}, {jour}, {vol}, {pages} ({year}).'", ".", "format", "(", "auth", "=", "authors", ",", "title", "=", "self", ".", "title", ",", "jour", "=", "self", ".", "publicationName", ",", "vol", "=", "volissue", ",", "pages", "=", "pages", ",", "year", "=", "self", ".", "coverDate", "[", ":", "4", "]", ")", "if", "self", ".", "doi", "is", "not", "None", ":", "s", "+=", "' \\\\href{{https://doi.org/{0}}}{{doi:{0}}}, '", ".", "format", "(", "self", ".", "doi", ")", "s", "+=", "'\\\\href{{{0}}}{{scopus:{1}}}.'", ".", "format", "(", "self", ".", "scopus_link", ",", "self", ".", "eid", ")", "return", "s" ]
46.52381
20.52381
def preview(request): """ Render preview page. :returns: A rendered preview """ if settings.MARKDOWN_PROTECT_PREVIEW: user = getattr(request, 'user', None) if not user or not user.is_staff: from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) return render( request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict( content=request.POST.get('data', 'No content posted'), css=settings.MARKDOWN_STYLE ))
[ "def", "preview", "(", "request", ")", ":", "if", "settings", ".", "MARKDOWN_PROTECT_PREVIEW", ":", "user", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "if", "not", "user", "or", "not", "user", ".", "is_staff", ":", "from", "django", ".", "contrib", ".", "auth", ".", "views", "import", "redirect_to_login", "return", "redirect_to_login", "(", "request", ".", "get_full_path", "(", ")", ")", "return", "render", "(", "request", ",", "settings", ".", "MARKDOWN_PREVIEW_TEMPLATE", ",", "dict", "(", "content", "=", "request", ".", "POST", ".", "get", "(", "'data'", ",", "'No content posted'", ")", ",", "css", "=", "settings", ".", "MARKDOWN_STYLE", ")", ")" ]
31.411765
17.588235
def get_fragment(self, gp, **kwargs): """ Return a complete fragment for a given gp. :param gp: A graph pattern :return: """ collector = FragmentCollector(self.__host, gp) return collector.get_fragment(**kwargs)
[ "def", "get_fragment", "(", "self", ",", "gp", ",", "*", "*", "kwargs", ")", ":", "collector", "=", "FragmentCollector", "(", "self", ".", "__host", ",", "gp", ")", "return", "collector", ".", "get_fragment", "(", "*", "*", "kwargs", ")" ]
32.5
8
def get_instance(self, payload): """ Build an instance of FunctionVersionInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance :rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance """ return FunctionVersionInstance( self._version, payload, service_sid=self._solution['service_sid'], function_sid=self._solution['function_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "FunctionVersionInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "function_sid", "=", "self", ".", "_solution", "[", "'function_sid'", "]", ",", ")" ]
37.866667
22.266667
def format_color(text, color, use_color_setting): """Format text with color. Args: text - Text to be formatted with color if `use_color` color - The color start string use_color_setting - Whether or not to color """ if not use_color_setting: return text else: return '{}{}{}'.format(color, text, NORMAL)
[ "def", "format_color", "(", "text", ",", "color", ",", "use_color_setting", ")", ":", "if", "not", "use_color_setting", ":", "return", "text", "else", ":", "return", "'{}{}{}'", ".", "format", "(", "color", ",", "text", ",", "NORMAL", ")" ]
29.416667
15.666667
def run_commands(commands, settings): """ Runs the commands supplied as an argument It will exit the program if the commands return a non-zero code Args: the commands to run The settings dictionary """ sprint = settings["sprint"] quiet = settings["quiet"] error = settings["error"] enhanced_errors = True the_shell = None if settings["no_enhanced_errors"]: enhanced_errors = False if "shell" in settings: the_shell = settings["shell"] windows_p = sys.platform == "win32" STDOUT = None STDERR = None if quiet: STDOUT = PIPE STDERR = PIPE commands = commands.rstrip() sprint("About to run commands '{}'".format(commands), level="verbose") if not quiet: sprint(commands) if the_shell: tmp = shlex.split(the_shell) the_shell = tmp[0] tmp = tmp[1:] if enhanced_errors and not windows_p: tmp.append("-e") tmp.append(commands) commands = tmp else: if enhanced_errors and not windows_p: commands = ["-e", commands] p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR, executable=the_shell) out, err = p.communicate() if p.returncode: if quiet: error(err.decode(locale.getpreferredencoding())) error("Command failed to run") sys.exit(1)
[ "def", "run_commands", "(", "commands", ",", "settings", ")", ":", "sprint", "=", "settings", "[", "\"sprint\"", "]", "quiet", "=", "settings", "[", "\"quiet\"", "]", "error", "=", "settings", "[", "\"error\"", "]", "enhanced_errors", "=", "True", "the_shell", "=", "None", "if", "settings", "[", "\"no_enhanced_errors\"", "]", ":", "enhanced_errors", "=", "False", "if", "\"shell\"", "in", "settings", ":", "the_shell", "=", "settings", "[", "\"shell\"", "]", "windows_p", "=", "sys", ".", "platform", "==", "\"win32\"", "STDOUT", "=", "None", "STDERR", "=", "None", "if", "quiet", ":", "STDOUT", "=", "PIPE", "STDERR", "=", "PIPE", "commands", "=", "commands", ".", "rstrip", "(", ")", "sprint", "(", "\"About to run commands '{}'\"", ".", "format", "(", "commands", ")", ",", "level", "=", "\"verbose\"", ")", "if", "not", "quiet", ":", "sprint", "(", "commands", ")", "if", "the_shell", ":", "tmp", "=", "shlex", ".", "split", "(", "the_shell", ")", "the_shell", "=", "tmp", "[", "0", "]", "tmp", "=", "tmp", "[", "1", ":", "]", "if", "enhanced_errors", "and", "not", "windows_p", ":", "tmp", ".", "append", "(", "\"-e\"", ")", "tmp", ".", "append", "(", "commands", ")", "commands", "=", "tmp", "else", ":", "if", "enhanced_errors", "and", "not", "windows_p", ":", "commands", "=", "[", "\"-e\"", ",", "commands", "]", "p", "=", "Popen", "(", "commands", ",", "shell", "=", "True", ",", "stdout", "=", "STDOUT", ",", "stderr", "=", "STDERR", ",", "executable", "=", "the_shell", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", ":", "if", "quiet", ":", "error", "(", "err", ".", "decode", "(", "locale", ".", "getpreferredencoding", "(", ")", ")", ")", "error", "(", "\"Command failed to run\"", ")", "sys", ".", "exit", "(", "1", ")" ]
26.461538
16.384615
def p_invoke(p): """ invoke : INVOKE IDENTIFIER SLASH IDENTIFIER | INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET """ priority = None if len(p) > 5: priority = int(p[8]) p[0] = Trigger(p[2], p[4], priority)
[ "def", "p_invoke", "(", "p", ")", ":", "priority", "=", "None", "if", "len", "(", "p", ")", ">", "5", ":", "priority", "=", "int", "(", "p", "[", "8", "]", ")", "p", "[", "0", "]", "=", "Trigger", "(", "p", "[", "2", "]", ",", "p", "[", "4", "]", ",", "priority", ")" ]
32.222222
17.111111
def q_vector(u, v, temperature, pressure, dx, dy, static_stability=1): r"""Calculate Q-vector at a given pressure level using the u, v winds and temperature. .. math:: \vec{Q} = (Q_1, Q_2) = - \frac{R}{\sigma p}\left( \frac{\partial \vec{v}_g}{\partial x} \cdot \nabla_p T, \frac{\partial \vec{v}_g}{\partial y} \cdot \nabla_p T \right) This formula follows equation 5.7.55 from [Bluestein1992]_, and can be used with the the below form of the quasigeostrophic omega equation to assess vertical motion ([Bluestein1992]_ equation 5.7.54): .. math:: \left( \nabla_p^2 + \frac{f_0^2}{\sigma} \frac{\partial^2}{\partial p^2} \right) \omega = - 2 \nabla_p \cdot \vec{Q} - \frac{R}{\sigma p} \beta \frac{\partial T}{\partial x}. Parameters ---------- u : (M, N) ndarray x component of the wind (geostrophic in QG-theory) v : (M, N) ndarray y component of the wind (geostrophic in QG-theory) temperature : (M, N) ndarray Array of temperature at pressure level pressure : `pint.Quantity` Pressure at level dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. static_stability : `pint.Quantity`, optional The static stability at the pressure level. Defaults to 1 if not given to calculate the Q-vector without factoring in static stability. Returns ------- tuple of (M, N) ndarrays The components of the Q-vector in the u- and v-directions respectively See Also -------- static_stability Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ dudy, dudx = gradient(u, deltas=(dy, dx), axes=(-2, -1)) dvdy, dvdx = gradient(v, deltas=(dy, dx), axes=(-2, -1)) dtempdy, dtempdx = gradient(temperature, deltas=(dy, dx), axes=(-2, -1)) q1 = -mpconsts.Rd / (pressure * static_stability) * (dudx * dtempdx + dvdx * dtempdy) q2 = -mpconsts.Rd / (pressure * static_stability) * (dudy * dtempdx + dvdy * dtempdy) return q1.to_base_units(), q2.to_base_units()
[ "def", "q_vector", "(", "u", ",", "v", ",", "temperature", ",", "pressure", ",", "dx", ",", "dy", ",", "static_stability", "=", "1", ")", ":", "dudy", ",", "dudx", "=", "gradient", "(", "u", ",", "deltas", "=", "(", "dy", ",", "dx", ")", ",", "axes", "=", "(", "-", "2", ",", "-", "1", ")", ")", "dvdy", ",", "dvdx", "=", "gradient", "(", "v", ",", "deltas", "=", "(", "dy", ",", "dx", ")", ",", "axes", "=", "(", "-", "2", ",", "-", "1", ")", ")", "dtempdy", ",", "dtempdx", "=", "gradient", "(", "temperature", ",", "deltas", "=", "(", "dy", ",", "dx", ")", ",", "axes", "=", "(", "-", "2", ",", "-", "1", ")", ")", "q1", "=", "-", "mpconsts", ".", "Rd", "/", "(", "pressure", "*", "static_stability", ")", "*", "(", "dudx", "*", "dtempdx", "+", "dvdx", "*", "dtempdy", ")", "q2", "=", "-", "mpconsts", ".", "Rd", "/", "(", "pressure", "*", "static_stability", ")", "*", "(", "dudy", "*", "dtempdx", "+", "dvdy", "*", "dtempdy", ")", "return", "q1", ".", "to_base_units", "(", ")", ",", "q2", ".", "to_base_units", "(", ")" ]
41.918033
27.147541
def script(name, source=None, template=None, onlyif=None, unless=None, creates=None, cwd=None, runas=None, shell=None, env=None, stateful=False, umask=None, timeout=None, use_vt=False, output_loglevel='debug', hide_output=False, defaults=None, context=None, success_retcodes=None, success_stdout=None, success_stderr=None, **kwargs): ''' Download a script and execute it with specified arguments. source The location of the script to download. If the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs template If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported name Either "cmd arg1 arg2 arg3..." (cmd is not used) or a source "salt://...". onlyif Run the named command only if the command passed to the ``onlyif`` option returns true unless Run the named command only if the command passed to the ``unless`` option returns false cwd The current working directory to execute the command in, defaults to /root runas The name of the user to run the command as shell The shell to use for execution. The default is set in grains['shell'] env A list of environment variables to be set prior to execution. Example: .. code-block:: yaml salt://scripts/foo.sh: cmd.script: - env: - BATCH: 'yes' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Variables as values are not evaluated. So $PATH in the following example is a literal '$PATH': .. code-block:: yaml salt://scripts/bar.sh: cmd.script: - env: "PATH=/some/path:$PATH" One can still use the existing $PATH by using a bit of Jinja: .. code-block:: jinja {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} mycommand: cmd.run: - name: ls -l / - env: - PATH: {{ [current_path, '/my/special/bin']|join(':') }} saltenv : ``base`` The Salt environment to use umask The umask (in octal) to use when running the command. stateful The command being executed is expected to return data about executing a state. For more information, see the :ref:`stateful-argument` section. timeout If the command has not terminated after timeout seconds, send the subprocess sigterm, and if sigterm is ignored, follow up with sigkill args String of command line args to pass to the script. Only used if no args are specified as part of the `name` argument. To pass a string containing spaces in YAML, you will need to doubly-quote it: "arg1 'arg two' arg3" creates Only run if the file specified by ``creates`` do not exist. If you specify a list of files then this state will only run if **any** of the files does not exist. .. versionadded:: 2014.7.0 use_vt Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. context .. versionadded:: 2016.3.0 Overrides default context variables passed to the template. defaults .. versionadded:: 2016.3.0 Default context passed to the template. output_loglevel : debug Control the loglevel at which the output from the command is logged to the minion log. .. note:: The command being run will still be logged at the ``debug`` loglevel regardless, unless ``quiet`` is used for this value. hide_output : False Suppress stdout and stderr in the state's results. .. note:: This is separate from ``output_loglevel``, which only handles how Salt logs to the minion log. .. versionadded:: 2018.3.0 success_retcodes: This parameter will be allow a list of non-zero return codes that should be considered a success. If the return code returned from the run matches any in the provided list, the return code will be overridden with zero. .. versionadded:: 2019.2.0 success_stdout: This parameter will be allow a list of strings that when found in standard out should be considered a success. If stdout returned from the run matches any in the provided list, the return code will be overridden with zero. .. versionadded:: Neon success_stderr: This parameter will be allow a list of strings that when found in standard error should be considered a success. If stderr returned from the run matches any in the provided list, the return code will be overridden with zero. .. versionadded:: Neon ''' test_name = None if not isinstance(stateful, list): stateful = stateful is True elif isinstance(stateful, list) and 'test_name' in stateful[0]: test_name = stateful[0]['test_name'] if __opts__['test'] and test_name: name = test_name ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, (list, dict)): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') return ret if context and not isinstance(context, dict): ret['comment'] = ('Invalidly-formatted \'context\' parameter. Must ' 'be formed as a dict.') return ret if defaults and not isinstance(defaults, dict): ret['comment'] = ('Invalidly-formatted \'defaults\' parameter. Must ' 'be formed as a dict.') return ret tmpctx = defaults if defaults else {} if context: tmpctx.update(context) cmd_kwargs = copy.deepcopy(kwargs) cmd_kwargs.update({'runas': runas, 'shell': shell or __grains__['shell'], 'env': env, 'onlyif': onlyif, 'unless': unless, 'cwd': cwd, 'template': template, 'umask': umask, 'timeout': timeout, 'output_loglevel': output_loglevel, 'hide_output': hide_output, 'use_vt': use_vt, 'context': tmpctx, 'saltenv': __env__, 'success_retcodes': success_retcodes, 'success_stdout': success_stdout, 'success_stderr': success_stderr}) run_check_cmd_kwargs = { 'cwd': cwd, 'runas': runas, 'shell': shell or __grains__['shell'] } # Change the source to be the name arg if it is not specified if source is None: source = name # If script args present split from name and define args if not cmd_kwargs.get('args', None) and len(name.split()) > 1: cmd_kwargs.update({'args': name.split(' ', 1)[1]}) cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless, creates ) if isinstance(cret, dict): ret.update(cret) return ret if __opts__['test'] and not test_name: ret['result'] = None ret['comment'] = 'Command \'{0}\' would have been ' \ 'executed'.format(name) return _reinterpreted_state(ret) if stateful else ret if cwd and not os.path.isdir(cwd): ret['comment'] = ( 'Desired working directory "{0}" ' 'is not available' ).format(cwd) return ret # Wow, we passed the test, run this sucker! try: cmd_all = __salt__['cmd.script'](source, python_shell=True, **cmd_kwargs) except (CommandExecutionError, SaltRenderError, IOError) as err: ret['comment'] = six.text_type(err) return ret ret['changes'] = cmd_all if kwargs.get('retcode', False): ret['result'] = not bool(cmd_all) else: ret['result'] = not bool(cmd_all['retcode']) if ret.get('changes', {}).get('cache_error'): ret['comment'] = 'Unable to cache script {0} from saltenv ' \ '\'{1}\''.format(source, __env__) else: ret['comment'] = 'Command \'{0}\' run'.format(name) if stateful: ret = _reinterpreted_state(ret) if __opts__['test'] and cmd_all['retcode'] == 0 and ret['changes']: ret['result'] = None return ret
[ "def", "script", "(", "name", ",", "source", "=", "None", ",", "template", "=", "None", ",", "onlyif", "=", "None", ",", "unless", "=", "None", ",", "creates", "=", "None", ",", "cwd", "=", "None", ",", "runas", "=", "None", ",", "shell", "=", "None", ",", "env", "=", "None", ",", "stateful", "=", "False", ",", "umask", "=", "None", ",", "timeout", "=", "None", ",", "use_vt", "=", "False", ",", "output_loglevel", "=", "'debug'", ",", "hide_output", "=", "False", ",", "defaults", "=", "None", ",", "context", "=", "None", ",", "success_retcodes", "=", "None", ",", "success_stdout", "=", "None", ",", "success_stderr", "=", "None", ",", "*", "*", "kwargs", ")", ":", "test_name", "=", "None", "if", "not", "isinstance", "(", "stateful", ",", "list", ")", ":", "stateful", "=", "stateful", "is", "True", "elif", "isinstance", "(", "stateful", ",", "list", ")", "and", "'test_name'", "in", "stateful", "[", "0", "]", ":", "test_name", "=", "stateful", "[", "0", "]", "[", "'test_name'", "]", "if", "__opts__", "[", "'test'", "]", "and", "test_name", ":", "name", "=", "test_name", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "# Need the check for None here, if env is not provided then it falls back", "# to None and it is assumed that the environment is not being overridden.", "if", "env", "is", "not", "None", "and", "not", "isinstance", "(", "env", ",", "(", "list", ",", "dict", ")", ")", ":", "ret", "[", "'comment'", "]", "=", "(", "'Invalidly-formatted \\'env\\' parameter. See '", "'documentation.'", ")", "return", "ret", "if", "context", "and", "not", "isinstance", "(", "context", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "(", "'Invalidly-formatted \\'context\\' parameter. Must '", "'be formed as a dict.'", ")", "return", "ret", "if", "defaults", "and", "not", "isinstance", "(", "defaults", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "(", "'Invalidly-formatted \\'defaults\\' parameter. Must '", "'be formed as a dict.'", ")", "return", "ret", "tmpctx", "=", "defaults", "if", "defaults", "else", "{", "}", "if", "context", ":", "tmpctx", ".", "update", "(", "context", ")", "cmd_kwargs", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "cmd_kwargs", ".", "update", "(", "{", "'runas'", ":", "runas", ",", "'shell'", ":", "shell", "or", "__grains__", "[", "'shell'", "]", ",", "'env'", ":", "env", ",", "'onlyif'", ":", "onlyif", ",", "'unless'", ":", "unless", ",", "'cwd'", ":", "cwd", ",", "'template'", ":", "template", ",", "'umask'", ":", "umask", ",", "'timeout'", ":", "timeout", ",", "'output_loglevel'", ":", "output_loglevel", ",", "'hide_output'", ":", "hide_output", ",", "'use_vt'", ":", "use_vt", ",", "'context'", ":", "tmpctx", ",", "'saltenv'", ":", "__env__", ",", "'success_retcodes'", ":", "success_retcodes", ",", "'success_stdout'", ":", "success_stdout", ",", "'success_stderr'", ":", "success_stderr", "}", ")", "run_check_cmd_kwargs", "=", "{", "'cwd'", ":", "cwd", ",", "'runas'", ":", "runas", ",", "'shell'", ":", "shell", "or", "__grains__", "[", "'shell'", "]", "}", "# Change the source to be the name arg if it is not specified", "if", "source", "is", "None", ":", "source", "=", "name", "# If script args present split from name and define args", "if", "not", "cmd_kwargs", ".", "get", "(", "'args'", ",", "None", ")", "and", "len", "(", "name", ".", "split", "(", ")", ")", ">", "1", ":", "cmd_kwargs", ".", "update", "(", "{", "'args'", ":", "name", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", "}", ")", "cret", "=", "mod_run_check", "(", "run_check_cmd_kwargs", ",", "onlyif", ",", "unless", ",", "creates", ")", "if", "isinstance", "(", "cret", ",", "dict", ")", ":", "ret", ".", "update", "(", "cret", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", "and", "not", "test_name", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Command \\'{0}\\' would have been '", "'executed'", ".", "format", "(", "name", ")", "return", "_reinterpreted_state", "(", "ret", ")", "if", "stateful", "else", "ret", "if", "cwd", "and", "not", "os", ".", "path", ".", "isdir", "(", "cwd", ")", ":", "ret", "[", "'comment'", "]", "=", "(", "'Desired working directory \"{0}\" '", "'is not available'", ")", ".", "format", "(", "cwd", ")", "return", "ret", "# Wow, we passed the test, run this sucker!", "try", ":", "cmd_all", "=", "__salt__", "[", "'cmd.script'", "]", "(", "source", ",", "python_shell", "=", "True", ",", "*", "*", "cmd_kwargs", ")", "except", "(", "CommandExecutionError", ",", "SaltRenderError", ",", "IOError", ")", "as", "err", ":", "ret", "[", "'comment'", "]", "=", "six", ".", "text_type", "(", "err", ")", "return", "ret", "ret", "[", "'changes'", "]", "=", "cmd_all", "if", "kwargs", ".", "get", "(", "'retcode'", ",", "False", ")", ":", "ret", "[", "'result'", "]", "=", "not", "bool", "(", "cmd_all", ")", "else", ":", "ret", "[", "'result'", "]", "=", "not", "bool", "(", "cmd_all", "[", "'retcode'", "]", ")", "if", "ret", ".", "get", "(", "'changes'", ",", "{", "}", ")", ".", "get", "(", "'cache_error'", ")", ":", "ret", "[", "'comment'", "]", "=", "'Unable to cache script {0} from saltenv '", "'\\'{1}\\''", ".", "format", "(", "source", ",", "__env__", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'Command \\'{0}\\' run'", ".", "format", "(", "name", ")", "if", "stateful", ":", "ret", "=", "_reinterpreted_state", "(", "ret", ")", "if", "__opts__", "[", "'test'", "]", "and", "cmd_all", "[", "'retcode'", "]", "==", "0", "and", "ret", "[", "'changes'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "return", "ret" ]
32.726316
23.533333
def _add_public_adder(self): """ Add a public ``add_x()`` method to the parent element class. """ def add_child(obj): private_add_method = getattr(obj, self._add_method_name) child = private_add_method() return child add_child.__doc__ = ( 'Add a new ``<%s>`` child element unconditionally, inserted in t' 'he correct sequence.' % self._nsptagname ) self._add_to_class(self._public_add_method_name, add_child)
[ "def", "_add_public_adder", "(", "self", ")", ":", "def", "add_child", "(", "obj", ")", ":", "private_add_method", "=", "getattr", "(", "obj", ",", "self", ".", "_add_method_name", ")", "child", "=", "private_add_method", "(", ")", "return", "child", "add_child", ".", "__doc__", "=", "(", "'Add a new ``<%s>`` child element unconditionally, inserted in t'", "'he correct sequence.'", "%", "self", ".", "_nsptagname", ")", "self", ".", "_add_to_class", "(", "self", ".", "_public_add_method_name", ",", "add_child", ")" ]
36.571429
18.285714
def _request_login(self, login, password): """Sends Login request""" return self._request_internal("Login", login=login, password=password)
[ "def", "_request_login", "(", "self", ",", "login", ",", "password", ")", ":", "return", "self", ".", "_request_internal", "(", "\"Login\"", ",", "login", "=", "login", ",", "password", "=", "password", ")" ]
45.4
6.8
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs): """Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.hierarchy.Group.create_dataset`. Parameters ---------- name : string Array name. shape : int or tuple of ints Array shape. dtype : string or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype. """ return self._write_op(self._require_dataset_nosync, name, shape=shape, dtype=dtype, exact=exact, **kwargs)
[ "def", "require_dataset", "(", "self", ",", "name", ",", "shape", ",", "dtype", "=", "None", ",", "exact", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_write_op", "(", "self", ".", "_require_dataset_nosync", ",", "name", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "exact", "=", "exact", ",", "*", "*", "kwargs", ")" ]
35.95
20.2
def remove(self): """Remove a target Raises a ``RuntimeError`` if the target does not exist. """ session = client.get_client().create_session() if not self._base_query(session).count() > 0: session.close() raise RuntimeError("Target does not exist, name={:s}, params={:s}" "".format(self.name, self.params)) # remove the target from the database self._base_query(session).delete() session.commit() session.close()
[ "def", "remove", "(", "self", ")", ":", "session", "=", "client", ".", "get_client", "(", ")", ".", "create_session", "(", ")", "if", "not", "self", ".", "_base_query", "(", "session", ")", ".", "count", "(", ")", ">", "0", ":", "session", ".", "close", "(", ")", "raise", "RuntimeError", "(", "\"Target does not exist, name={:s}, params={:s}\"", "\"\"", ".", "format", "(", "self", ".", "name", ",", "self", ".", "params", ")", ")", "# remove the target from the database", "self", ".", "_base_query", "(", "session", ")", ".", "delete", "(", ")", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")" ]
31.058824
20.529412
def split_path(path) : "convenience routine for splitting a path into a list of components." if isinstance(path, (tuple, list)) : result = path # assume already split elif path == "/" : result = [] else : if not path.startswith("/") or path.endswith("/") : raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path)) #end if result = path.split("/")[1:] #end if return \ result
[ "def", "split_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "(", "tuple", ",", "list", ")", ")", ":", "result", "=", "path", "# assume already split", "elif", "path", "==", "\"/\"", ":", "result", "=", "[", "]", "else", ":", "if", "not", "path", ".", "startswith", "(", "\"/\"", ")", "or", "path", ".", "endswith", "(", "\"/\"", ")", ":", "raise", "DBusError", "(", "DBUS", ".", "ERROR_INVALID_ARGS", ",", "\"invalid path %s\"", "%", "repr", "(", "path", ")", ")", "#end if", "result", "=", "path", ".", "split", "(", "\"/\"", ")", "[", "1", ":", "]", "#end if", "return", "result" ]
32.928571
21.357143
def dilate_obs(self, dilation_radius): """ Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """ for s in self.size_thresholds: self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape) for t in range(self.dilated_obs[s].shape[0]): self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
[ "def", "dilate_obs", "(", "self", ",", "dilation_radius", ")", ":", "for", "s", "in", "self", ".", "size_thresholds", ":", "self", ".", "dilated_obs", "[", "s", "]", "=", "np", ".", "zeros", "(", "self", ".", "window_obs", "[", "self", ".", "mrms_variable", "]", ".", "shape", ")", "for", "t", "in", "range", "(", "self", ".", "dilated_obs", "[", "s", "]", ".", "shape", "[", "0", "]", ")", ":", "self", ".", "dilated_obs", "[", "s", "]", "[", "t", "]", "[", "binary_dilation", "(", "self", ".", "window_obs", "[", "self", ".", "mrms_variable", "]", "[", "t", "]", ">=", "s", ",", "iterations", "=", "dilation_radius", ")", "]", "=", "1" ]
50.545455
28.181818
def build_api_error(response, blob=None): """Helper method for creating errors and attaching HTTP response/request details to them. """ try: blob = blob or response.json() except json.JSONDecodeError: blob = {} error_list = blob.get('errors', None) error = (error_list[0] if error_list else {}) if error: error_id = error.get('id', '') error_message = error.get('message', '') else: # In the case of an OAuth-specific error, the response data is the error # blob, and the keys are slightly different. See # https://developers.coinbase.com/api/v2#error-response error_id = blob.get('error') error_message = blob.get('error_description') error_class = ( _error_id_to_class.get(error_id, None) or _status_code_to_class.get(response.status_code, APIError)) return error_class(response, error_id, error_message, error_list)
[ "def", "build_api_error", "(", "response", ",", "blob", "=", "None", ")", ":", "try", ":", "blob", "=", "blob", "or", "response", ".", "json", "(", ")", "except", "json", ".", "JSONDecodeError", ":", "blob", "=", "{", "}", "error_list", "=", "blob", ".", "get", "(", "'errors'", ",", "None", ")", "error", "=", "(", "error_list", "[", "0", "]", "if", "error_list", "else", "{", "}", ")", "if", "error", ":", "error_id", "=", "error", ".", "get", "(", "'id'", ",", "''", ")", "error_message", "=", "error", ".", "get", "(", "'message'", ",", "''", ")", "else", ":", "# In the case of an OAuth-specific error, the response data is the error", "# blob, and the keys are slightly different. See", "# https://developers.coinbase.com/api/v2#error-response", "error_id", "=", "blob", ".", "get", "(", "'error'", ")", "error_message", "=", "blob", ".", "get", "(", "'error_description'", ")", "error_class", "=", "(", "_error_id_to_class", ".", "get", "(", "error_id", ",", "None", ")", "or", "_status_code_to_class", ".", "get", "(", "response", ".", "status_code", ",", "APIError", ")", ")", "return", "error_class", "(", "response", ",", "error_id", ",", "error_message", ",", "error_list", ")" ]
40.347826
15
def create_return_line_item(cls, return_line_item, **kwargs): """Create ReturnLineItem Create a new ReturnLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_return_line_item(return_line_item, async=True) >>> result = thread.get() :param async bool :param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required) :return: ReturnLineItem If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_return_line_item_with_http_info(return_line_item, **kwargs) else: (data) = cls._create_return_line_item_with_http_info(return_line_item, **kwargs) return data
[ "def", "create_return_line_item", "(", "cls", ",", "return_line_item", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_create_return_line_item_with_http_info", "(", "return_line_item", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_create_return_line_item_with_http_info", "(", "return_line_item", ",", "*", "*", "kwargs", ")", "return", "data" ]
44.333333
22.190476
def get_stop_words(self, language, fail_safe=False): """ Returns a StopWord object initialized with the stop words collection requested by ``language``. If the requested language is not available a StopWordError is raised. If ``fail_safe`` is set to True, an empty StopWord object is returned. """ try: language = self.language_codes[language] except KeyError: pass collection = self.LOADED_LANGUAGES_CACHE.get(language) if collection is None: try: collection = self._get_stop_words(language) self.LOADED_LANGUAGES_CACHE[language] = collection except StopWordError as error: if not fail_safe: raise error collection = [] stop_words = StopWord(language, collection) return stop_words
[ "def", "get_stop_words", "(", "self", ",", "language", ",", "fail_safe", "=", "False", ")", ":", "try", ":", "language", "=", "self", ".", "language_codes", "[", "language", "]", "except", "KeyError", ":", "pass", "collection", "=", "self", ".", "LOADED_LANGUAGES_CACHE", ".", "get", "(", "language", ")", "if", "collection", "is", "None", ":", "try", ":", "collection", "=", "self", ".", "_get_stop_words", "(", "language", ")", "self", ".", "LOADED_LANGUAGES_CACHE", "[", "language", "]", "=", "collection", "except", "StopWordError", "as", "error", ":", "if", "not", "fail_safe", ":", "raise", "error", "collection", "=", "[", "]", "stop_words", "=", "StopWord", "(", "language", ",", "collection", ")", "return", "stop_words" ]
35.56
19.32
def xor(key, data): """ Perform cyclical exclusive or operations on ``data``. The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If the key is smaller than the provided ``data``, the ``key`` will be repeated. Args: key(int or bytes): The key to xor ``data`` with. data(bytes): The data to perform the xor operation on. Returns: bytes: The result of the exclusive or operation. Examples: >>> from pwny import * >>> xor(5, b'ABCD') b'DGFA' >>> xor(5, b'DGFA') b'ABCD' >>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ') b'15-=51)19=%5=9!)!%=-%!9!)-' >>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-') b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' """ if type(key) is int: key = six.int2byte(key) key_len = len(key) return b''.join( six.int2byte(c ^ six.indexbytes(key, i % key_len)) for i, c in enumerate(six.iterbytes(data)) )
[ "def", "xor", "(", "key", ",", "data", ")", ":", "if", "type", "(", "key", ")", "is", "int", ":", "key", "=", "six", ".", "int2byte", "(", "key", ")", "key_len", "=", "len", "(", "key", ")", "return", "b''", ".", "join", "(", "six", ".", "int2byte", "(", "c", "^", "six", ".", "indexbytes", "(", "key", ",", "i", "%", "key_len", ")", ")", "for", "i", ",", "c", "in", "enumerate", "(", "six", ".", "iterbytes", "(", "data", ")", ")", ")" ]
27.6
21.714286
def clinsig_query(self, query, mongo_query): """ Add clinsig filter values to the mongo query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: clinsig_query(dict): a dictionary with clinsig key-values """ LOG.debug('clinsig is a query parameter') trusted_revision_level = ['mult', 'single', 'exp', 'guideline'] rank = [] str_rank = [] clnsig_query = {} for item in query['clinsig']: rank.append(int(item)) # search for human readable clinsig values in newer cases rank.append(CLINSIG_MAP[int(item)]) str_rank.append(CLINSIG_MAP[int(item)]) if query.get('clinsig_confident_always_returned') == True: LOG.debug("add CLINSIG filter with trusted_revision_level") clnsig_query = { "clnsig": { '$elemMatch': { '$or' : [ { '$and' : [ {'value' : { '$in': rank }}, {'revstat': { '$in': trusted_revision_level }} ] }, { '$and': [ {'value' : re.compile('|'.join(str_rank))}, {'revstat' : re.compile('|'.join(trusted_revision_level))} ] } ] } } } else: LOG.debug("add CLINSIG filter for rank: %s" % ', '.join(str(query['clinsig']))) clnsig_query = { "clnsig": { '$elemMatch': { '$or' : [ { 'value' : { '$in': rank }}, { 'value' : re.compile('|'.join(str_rank)) } ] } } } return clnsig_query
[ "def", "clinsig_query", "(", "self", ",", "query", ",", "mongo_query", ")", ":", "LOG", ".", "debug", "(", "'clinsig is a query parameter'", ")", "trusted_revision_level", "=", "[", "'mult'", ",", "'single'", ",", "'exp'", ",", "'guideline'", "]", "rank", "=", "[", "]", "str_rank", "=", "[", "]", "clnsig_query", "=", "{", "}", "for", "item", "in", "query", "[", "'clinsig'", "]", ":", "rank", ".", "append", "(", "int", "(", "item", ")", ")", "# search for human readable clinsig values in newer cases", "rank", ".", "append", "(", "CLINSIG_MAP", "[", "int", "(", "item", ")", "]", ")", "str_rank", ".", "append", "(", "CLINSIG_MAP", "[", "int", "(", "item", ")", "]", ")", "if", "query", ".", "get", "(", "'clinsig_confident_always_returned'", ")", "==", "True", ":", "LOG", ".", "debug", "(", "\"add CLINSIG filter with trusted_revision_level\"", ")", "clnsig_query", "=", "{", "\"clnsig\"", ":", "{", "'$elemMatch'", ":", "{", "'$or'", ":", "[", "{", "'$and'", ":", "[", "{", "'value'", ":", "{", "'$in'", ":", "rank", "}", "}", ",", "{", "'revstat'", ":", "{", "'$in'", ":", "trusted_revision_level", "}", "}", "]", "}", ",", "{", "'$and'", ":", "[", "{", "'value'", ":", "re", ".", "compile", "(", "'|'", ".", "join", "(", "str_rank", ")", ")", "}", ",", "{", "'revstat'", ":", "re", ".", "compile", "(", "'|'", ".", "join", "(", "trusted_revision_level", ")", ")", "}", "]", "}", "]", "}", "}", "}", "else", ":", "LOG", ".", "debug", "(", "\"add CLINSIG filter for rank: %s\"", "%", "', '", ".", "join", "(", "str", "(", "query", "[", "'clinsig'", "]", ")", ")", ")", "clnsig_query", "=", "{", "\"clnsig\"", ":", "{", "'$elemMatch'", ":", "{", "'$or'", ":", "[", "{", "'value'", ":", "{", "'$in'", ":", "rank", "}", "}", ",", "{", "'value'", ":", "re", ".", "compile", "(", "'|'", ".", "join", "(", "str_rank", ")", ")", "}", "]", "}", "}", "}", "return", "clnsig_query" ]
39.206349
19.68254
def get_way(self, way_id, resolve_missing=False): """ Get a way by its ID. :param way_id: The way ID :type way_id: Integer :param resolve_missing: Query the Overpass API if the way is missing in the result set. :return: The way :rtype: overpy.Way :raises overpy.exception.DataIncomplete: The requested way is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved. """ ways = self.get_ways(way_id=way_id) if len(ways) == 0: if resolve_missing is False: raise exception.DataIncomplete("Resolve missing way is disabled") query = ("\n" "[out:json];\n" "way({way_id});\n" "out body;\n" ) query = query.format( way_id=way_id ) tmp_result = self.api.query(query) self.expand(tmp_result) ways = self.get_ways(way_id=way_id) if len(ways) == 0: raise exception.DataIncomplete("Unable to resolve requested way") return ways[0]
[ "def", "get_way", "(", "self", ",", "way_id", ",", "resolve_missing", "=", "False", ")", ":", "ways", "=", "self", ".", "get_ways", "(", "way_id", "=", "way_id", ")", "if", "len", "(", "ways", ")", "==", "0", ":", "if", "resolve_missing", "is", "False", ":", "raise", "exception", ".", "DataIncomplete", "(", "\"Resolve missing way is disabled\"", ")", "query", "=", "(", "\"\\n\"", "\"[out:json];\\n\"", "\"way({way_id});\\n\"", "\"out body;\\n\"", ")", "query", "=", "query", ".", "format", "(", "way_id", "=", "way_id", ")", "tmp_result", "=", "self", ".", "api", ".", "query", "(", "query", ")", "self", ".", "expand", "(", "tmp_result", ")", "ways", "=", "self", ".", "get_ways", "(", "way_id", "=", "way_id", ")", "if", "len", "(", "ways", ")", "==", "0", ":", "raise", "exception", ".", "DataIncomplete", "(", "\"Unable to resolve requested way\"", ")", "return", "ways", "[", "0", "]" ]
35
20.235294
def prepare(self): """Method to check if the impact function can be run. :return: A tuple with the status of the IF and an error message if needed. The status is PREPARE_SUCCESS if everything was fine. The status is PREPARE_FAILED_BAD_INPUT if the client should fix something. The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client should fix the analysis extent. The status is PREPARE_FAILED_BAD_CODE if something went wrong from the code. :rtype: (int, m.Message) """ self._provenance_ready = False # save layer reference before preparing. # used to display it in maps original_exposure = self.exposure original_hazard = self.hazard original_aggregation = self.aggregation try: if not self.exposure: message = generate_input_error_message( tr('The exposure layer is compulsory'), m.Paragraph(tr( 'The impact function needs an exposure layer to run. ' 'You must provide it.')) ) return PREPARE_FAILED_BAD_INPUT, message status, message = check_input_layer(self.exposure, 'exposure') if status != PREPARE_SUCCESS: return status, message if not self.hazard: message = generate_input_error_message( tr('The hazard layer is compulsory'), m.Paragraph(tr( 'The impact function needs a hazard layer to run. ' 'You must provide it.')) ) return PREPARE_FAILED_BAD_INPUT, message status, message = check_input_layer(self.hazard, 'hazard') if status != PREPARE_SUCCESS: return status, message if self.aggregation: if self._requested_extent: message = generate_input_error_message( tr('Error with the requested extent'), m.Paragraph(tr( 'Requested Extent must be null when an ' 'aggregation is provided.')) ) return PREPARE_FAILED_BAD_INPUT, message if self._crs: message = generate_input_error_message( tr('Error with the requested extent'), m.Paragraph(tr( 'Requested Extent CRS must be null when an ' 'aggregation is provided.')) ) return PREPARE_FAILED_BAD_INPUT, message if self.use_exposure_view_only: message = generate_input_error_message( tr('Error with the requested extent'), m.Paragraph(tr( 'Use exposure view only can not be set to True if ' 'you use an aggregation layer.')) ) return PREPARE_FAILED_BAD_INPUT, message status, message = check_input_layer( self.aggregation, 'aggregation') aggregation_source = full_layer_uri(self.aggregation) aggregation_keywords = copy_layer_keywords( self.aggregation.keywords) if status != PREPARE_SUCCESS: return status, message else: aggregation_source = None aggregation_keywords = None if not self._crs: message = generate_input_error_message( tr('Error with the requested CRS'), m.Paragraph(tr( 'CRS must be set when you don\'t use an ' 'aggregation layer. It will be used for the ' 'analysis CRS.')) ) return PREPARE_FAILED_BAD_INPUT, message if self.requested_extent and self.use_exposure_view_only: message = generate_input_error_message( tr('Error with the requested extent'), m.Paragraph(tr( 'Requested Extent must be null when you use the ' 'exposure view only.')) ) return PREPARE_FAILED_BAD_INPUT, message # We need to check if the hazard is OK to run on the exposure. hazard_keywords = self.hazard.keywords exposure_key = self.exposure.keywords['exposure'] if not active_thresholds_value_maps(hazard_keywords, exposure_key): warning_heading = m.Heading( tr('Incompatible exposure/hazard'), **WARNING_STYLE) warning_message = tr( 'The hazard layer is not set up for this kind of ' 'exposure. In InaSAFE, you need to define keywords in the ' 'hazard layer for each exposure type that you want to use ' 'with the hazard.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'Please select the hazard layer in the legend and then ' 'run the keyword wizard to define the needed keywords for ' '{exposure_type} exposure.').format( exposure_type=exposure_key) message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) return PREPARE_FAILED_BAD_INPUT, message status, message = self._compute_analysis_extent() if status != PREPARE_SUCCESS: return status, message # Set the name hazard_name = get_name(self.hazard.keywords.get('hazard')) exposure_name = get_name(self.exposure.keywords.get('exposure')) hazard_geometry_name = get_name(geometry_type(self.hazard)) exposure_geometry_name = get_name(geometry_type(self.exposure)) self._name = tr( '{hazard_type} {hazard_geometry} On {exposure_type} ' '{exposure_geometry}').format( hazard_type=hazard_name, hazard_geometry=hazard_geometry_name, exposure_type=exposure_name, exposure_geometry=exposure_geometry_name ).title() # Set the title if self.exposure.keywords.get('exposure') == 'population': self._title = tr('need evacuation') else: self._title = tr('be affected') for pre_processor in pre_processors: if pre_processor['condition'](self): self._preprocessors.append(pre_processor) except Exception as e: if self.debug_mode: # We run in debug mode, we do not want to catch the exception. # You should download the First Aid plugin for instance. raise else: message = get_error_message(e) return PREPARE_FAILED_BAD_CODE, message else: # Everything was fine. self._is_ready = True set_provenance( self._provenance, provenance_exposure_layer, full_layer_uri(self.exposure)) # reference to original layer being used set_provenance( self._provenance, provenance_exposure_layer_id, original_exposure.id()) set_provenance( self._provenance, provenance_exposure_keywords, copy_layer_keywords(self.exposure.keywords)) set_provenance( self._provenance, provenance_hazard_layer, full_layer_uri(self.hazard)) # reference to original layer being used set_provenance( self._provenance, provenance_hazard_layer_id, original_hazard.id()) set_provenance( self._provenance, provenance_hazard_keywords, copy_layer_keywords(self.hazard.keywords)) # reference to original layer being used if original_aggregation: set_provenance( self._provenance, provenance_aggregation_layer_id, original_aggregation.id()) else: set_provenance( self._provenance, provenance_aggregation_layer_id, None) set_provenance( self._provenance, provenance_aggregation_layer, aggregation_source) set_provenance( self._provenance, provenance_aggregation_keywords, aggregation_keywords) # Set output layer expected self._output_layer_expected = self._compute_output_layer_expected() return PREPARE_SUCCESS, None
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "_provenance_ready", "=", "False", "# save layer reference before preparing.", "# used to display it in maps", "original_exposure", "=", "self", ".", "exposure", "original_hazard", "=", "self", ".", "hazard", "original_aggregation", "=", "self", ".", "aggregation", "try", ":", "if", "not", "self", ".", "exposure", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'The exposure layer is compulsory'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'The impact function needs an exposure layer to run. '", "'You must provide it.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "status", ",", "message", "=", "check_input_layer", "(", "self", ".", "exposure", ",", "'exposure'", ")", "if", "status", "!=", "PREPARE_SUCCESS", ":", "return", "status", ",", "message", "if", "not", "self", ".", "hazard", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'The hazard layer is compulsory'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'The impact function needs a hazard layer to run. '", "'You must provide it.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "status", ",", "message", "=", "check_input_layer", "(", "self", ".", "hazard", ",", "'hazard'", ")", "if", "status", "!=", "PREPARE_SUCCESS", ":", "return", "status", ",", "message", "if", "self", ".", "aggregation", ":", "if", "self", ".", "_requested_extent", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'Error with the requested extent'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'Requested Extent must be null when an '", "'aggregation is provided.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "if", "self", ".", "_crs", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'Error with the requested extent'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'Requested Extent CRS must be null when an '", "'aggregation is provided.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "if", "self", ".", "use_exposure_view_only", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'Error with the requested extent'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'Use exposure view only can not be set to True if '", "'you use an aggregation layer.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "status", ",", "message", "=", "check_input_layer", "(", "self", ".", "aggregation", ",", "'aggregation'", ")", "aggregation_source", "=", "full_layer_uri", "(", "self", ".", "aggregation", ")", "aggregation_keywords", "=", "copy_layer_keywords", "(", "self", ".", "aggregation", ".", "keywords", ")", "if", "status", "!=", "PREPARE_SUCCESS", ":", "return", "status", ",", "message", "else", ":", "aggregation_source", "=", "None", "aggregation_keywords", "=", "None", "if", "not", "self", ".", "_crs", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'Error with the requested CRS'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'CRS must be set when you don\\'t use an '", "'aggregation layer. It will be used for the '", "'analysis CRS.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "if", "self", ".", "requested_extent", "and", "self", ".", "use_exposure_view_only", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'Error with the requested extent'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'Requested Extent must be null when you use the '", "'exposure view only.'", ")", ")", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "# We need to check if the hazard is OK to run on the exposure.", "hazard_keywords", "=", "self", ".", "hazard", ".", "keywords", "exposure_key", "=", "self", ".", "exposure", ".", "keywords", "[", "'exposure'", "]", "if", "not", "active_thresholds_value_maps", "(", "hazard_keywords", ",", "exposure_key", ")", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Incompatible exposure/hazard'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'The hazard layer is not set up for this kind of '", "'exposure. In InaSAFE, you need to define keywords in the '", "'hazard layer for each exposure type that you want to use '", "'with the hazard.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'Please select the hazard layer in the legend and then '", "'run the keyword wizard to define the needed keywords for '", "'{exposure_type} exposure.'", ")", ".", "format", "(", "exposure_type", "=", "exposure_key", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "return", "PREPARE_FAILED_BAD_INPUT", ",", "message", "status", ",", "message", "=", "self", ".", "_compute_analysis_extent", "(", ")", "if", "status", "!=", "PREPARE_SUCCESS", ":", "return", "status", ",", "message", "# Set the name", "hazard_name", "=", "get_name", "(", "self", ".", "hazard", ".", "keywords", ".", "get", "(", "'hazard'", ")", ")", "exposure_name", "=", "get_name", "(", "self", ".", "exposure", ".", "keywords", ".", "get", "(", "'exposure'", ")", ")", "hazard_geometry_name", "=", "get_name", "(", "geometry_type", "(", "self", ".", "hazard", ")", ")", "exposure_geometry_name", "=", "get_name", "(", "geometry_type", "(", "self", ".", "exposure", ")", ")", "self", ".", "_name", "=", "tr", "(", "'{hazard_type} {hazard_geometry} On {exposure_type} '", "'{exposure_geometry}'", ")", ".", "format", "(", "hazard_type", "=", "hazard_name", ",", "hazard_geometry", "=", "hazard_geometry_name", ",", "exposure_type", "=", "exposure_name", ",", "exposure_geometry", "=", "exposure_geometry_name", ")", ".", "title", "(", ")", "# Set the title", "if", "self", ".", "exposure", ".", "keywords", ".", "get", "(", "'exposure'", ")", "==", "'population'", ":", "self", ".", "_title", "=", "tr", "(", "'need evacuation'", ")", "else", ":", "self", ".", "_title", "=", "tr", "(", "'be affected'", ")", "for", "pre_processor", "in", "pre_processors", ":", "if", "pre_processor", "[", "'condition'", "]", "(", "self", ")", ":", "self", ".", "_preprocessors", ".", "append", "(", "pre_processor", ")", "except", "Exception", "as", "e", ":", "if", "self", ".", "debug_mode", ":", "# We run in debug mode, we do not want to catch the exception.", "# You should download the First Aid plugin for instance.", "raise", "else", ":", "message", "=", "get_error_message", "(", "e", ")", "return", "PREPARE_FAILED_BAD_CODE", ",", "message", "else", ":", "# Everything was fine.", "self", ".", "_is_ready", "=", "True", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_exposure_layer", ",", "full_layer_uri", "(", "self", ".", "exposure", ")", ")", "# reference to original layer being used", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_exposure_layer_id", ",", "original_exposure", ".", "id", "(", ")", ")", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_exposure_keywords", ",", "copy_layer_keywords", "(", "self", ".", "exposure", ".", "keywords", ")", ")", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_hazard_layer", ",", "full_layer_uri", "(", "self", ".", "hazard", ")", ")", "# reference to original layer being used", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_hazard_layer_id", ",", "original_hazard", ".", "id", "(", ")", ")", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_hazard_keywords", ",", "copy_layer_keywords", "(", "self", ".", "hazard", ".", "keywords", ")", ")", "# reference to original layer being used", "if", "original_aggregation", ":", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_aggregation_layer_id", ",", "original_aggregation", ".", "id", "(", ")", ")", "else", ":", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_aggregation_layer_id", ",", "None", ")", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_aggregation_layer", ",", "aggregation_source", ")", "set_provenance", "(", "self", ".", "_provenance", ",", "provenance_aggregation_keywords", ",", "aggregation_keywords", ")", "# Set output layer expected", "self", ".", "_output_layer_expected", "=", "self", ".", "_compute_output_layer_expected", "(", ")", "return", "PREPARE_SUCCESS", ",", "None" ]
42.659091
16.659091
def calculate_gradient(self, batch_info, device, model, rollout): """ Calculate loss of the supplied rollout """ evaluator = model.evaluate(rollout) batch_size = rollout.frames() dones_tensor = evaluator.get('rollout:dones') rewards_tensor = evaluator.get('rollout:rewards') assert dones_tensor.dtype == torch.float32 with torch.no_grad(): target_evaluator = self.target_model.evaluate(rollout) if self.double_dqn: # DOUBLE DQN # Histogram gets returned as logits initially, we need to exp it before projection target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp() model_value_histogram_for_all_actions = evaluator.get('model:q_dist_next').exp() atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) selected_action_indices = ( (atoms_aligned * model_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1) ) # Select largest 'target' value based on action that 'model' selects next_value_histograms = ( target_value_histogram_for_all_actions[range(batch_size), selected_action_indices] ) else: # REGULAR DQN # Histogram gets returned as logits initially, we need to exp it before projection target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp() atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) selected_action_indices = ( (atoms_aligned * target_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1) ) next_value_histograms = ( target_value_histogram_for_all_actions[range(batch_size), selected_action_indices] ) # HISTOGRAM PROJECTION CODE forward_steps = rollout.extra_data.get('forward_steps', 1) atoms_projected = ( rewards_tensor.unsqueeze(1) + (self.discount_factor ** forward_steps) * (1 - dones_tensor).unsqueeze(1) * self.support_atoms.unsqueeze(0) ) atoms_projected = atoms_projected.clamp(min=self.vmin, max=self.vmax) projection_indices = (atoms_projected - self.vmin) / self.atom_delta index_floor = projection_indices.floor().long() index_ceil = projection_indices.ceil().long() # Fix corner case when index_floor == index_ceil index_floor[(index_ceil > 0) * (index_floor == index_ceil)] -= 1 index_ceil[(index_floor < (self.num_atoms - 1)) * (index_floor == index_ceil)] += 1 value_histogram_projected = torch.zeros_like(next_value_histograms) # Following part will be a bit convoluted, in an effort to fully vectorize projection operation # Special offset index tensor offsets = ( torch.arange(0, batch_size * self.num_atoms, self.num_atoms) .unsqueeze(1) .expand(batch_size, self.num_atoms) .contiguous().view(-1).to(device) ) # Linearize all the buffers value_histogram_projected = value_histogram_projected.view(-1) index_ceil = index_ceil.view(-1) index_floor = index_floor.view(-1) projection_indices = projection_indices.view(-1) value_histogram_projected.index_add_( 0, index_floor+offsets, (next_value_histograms.view(-1) * (index_ceil.float() - projection_indices)) ) value_histogram_projected.index_add_( 0, index_ceil+offsets, (next_value_histograms.view(-1) * (projection_indices - index_floor.float())) ) value_histogram_projected = value_histogram_projected.reshape(next_value_histograms.shape) q_log_histogram_selected = evaluator.get('model:action:q_dist') # Cross-entropy loss as usual original_losses = -(value_histogram_projected * q_log_histogram_selected).sum(dim=1) if evaluator.is_provided('rollout:weights'): weights = evaluator.get('rollout:weights') else: weights = torch.ones_like(rewards_tensor) loss_value = torch.mean(weights * original_losses) loss_value.backward() with torch.no_grad(): mean_q_model = (self.support_atoms.unsqueeze(0) * torch.exp(q_log_histogram_selected)).sum(dim=1).mean() mean_q_target = (self.support_atoms.unsqueeze(0) * value_histogram_projected).sum(dim=1).mean() return { 'loss': loss_value.item(), # We need it to update priorities in the replay buffer: 'errors': original_losses.detach().cpu().numpy(), 'average_q_selected': mean_q_model.item(), 'average_q_target': mean_q_target.item() }
[ "def", "calculate_gradient", "(", "self", ",", "batch_info", ",", "device", ",", "model", ",", "rollout", ")", ":", "evaluator", "=", "model", ".", "evaluate", "(", "rollout", ")", "batch_size", "=", "rollout", ".", "frames", "(", ")", "dones_tensor", "=", "evaluator", ".", "get", "(", "'rollout:dones'", ")", "rewards_tensor", "=", "evaluator", ".", "get", "(", "'rollout:rewards'", ")", "assert", "dones_tensor", ".", "dtype", "==", "torch", ".", "float32", "with", "torch", ".", "no_grad", "(", ")", ":", "target_evaluator", "=", "self", ".", "target_model", ".", "evaluate", "(", "rollout", ")", "if", "self", ".", "double_dqn", ":", "# DOUBLE DQN", "# Histogram gets returned as logits initially, we need to exp it before projection", "target_value_histogram_for_all_actions", "=", "target_evaluator", ".", "get", "(", "'model:q_dist_next'", ")", ".", "exp", "(", ")", "model_value_histogram_for_all_actions", "=", "evaluator", ".", "get", "(", "'model:q_dist_next'", ")", ".", "exp", "(", ")", "atoms_aligned", "=", "self", ".", "support_atoms", ".", "view", "(", "1", ",", "1", ",", "self", ".", "num_atoms", ")", "selected_action_indices", "=", "(", "(", "atoms_aligned", "*", "model_value_histogram_for_all_actions", ")", ".", "sum", "(", "dim", "=", "-", "1", ")", ".", "argmax", "(", "dim", "=", "1", ")", ")", "# Select largest 'target' value based on action that 'model' selects", "next_value_histograms", "=", "(", "target_value_histogram_for_all_actions", "[", "range", "(", "batch_size", ")", ",", "selected_action_indices", "]", ")", "else", ":", "# REGULAR DQN", "# Histogram gets returned as logits initially, we need to exp it before projection", "target_value_histogram_for_all_actions", "=", "target_evaluator", ".", "get", "(", "'model:q_dist_next'", ")", ".", "exp", "(", ")", "atoms_aligned", "=", "self", ".", "support_atoms", ".", "view", "(", "1", ",", "1", ",", "self", ".", "num_atoms", ")", "selected_action_indices", "=", "(", "(", "atoms_aligned", "*", "target_value_histogram_for_all_actions", ")", ".", "sum", "(", "dim", "=", "-", "1", ")", ".", "argmax", "(", "dim", "=", "1", ")", ")", "next_value_histograms", "=", "(", "target_value_histogram_for_all_actions", "[", "range", "(", "batch_size", ")", ",", "selected_action_indices", "]", ")", "# HISTOGRAM PROJECTION CODE", "forward_steps", "=", "rollout", ".", "extra_data", ".", "get", "(", "'forward_steps'", ",", "1", ")", "atoms_projected", "=", "(", "rewards_tensor", ".", "unsqueeze", "(", "1", ")", "+", "(", "self", ".", "discount_factor", "**", "forward_steps", ")", "*", "(", "1", "-", "dones_tensor", ")", ".", "unsqueeze", "(", "1", ")", "*", "self", ".", "support_atoms", ".", "unsqueeze", "(", "0", ")", ")", "atoms_projected", "=", "atoms_projected", ".", "clamp", "(", "min", "=", "self", ".", "vmin", ",", "max", "=", "self", ".", "vmax", ")", "projection_indices", "=", "(", "atoms_projected", "-", "self", ".", "vmin", ")", "/", "self", ".", "atom_delta", "index_floor", "=", "projection_indices", ".", "floor", "(", ")", ".", "long", "(", ")", "index_ceil", "=", "projection_indices", ".", "ceil", "(", ")", ".", "long", "(", ")", "# Fix corner case when index_floor == index_ceil", "index_floor", "[", "(", "index_ceil", ">", "0", ")", "*", "(", "index_floor", "==", "index_ceil", ")", "]", "-=", "1", "index_ceil", "[", "(", "index_floor", "<", "(", "self", ".", "num_atoms", "-", "1", ")", ")", "*", "(", "index_floor", "==", "index_ceil", ")", "]", "+=", "1", "value_histogram_projected", "=", "torch", ".", "zeros_like", "(", "next_value_histograms", ")", "# Following part will be a bit convoluted, in an effort to fully vectorize projection operation", "# Special offset index tensor", "offsets", "=", "(", "torch", ".", "arange", "(", "0", ",", "batch_size", "*", "self", ".", "num_atoms", ",", "self", ".", "num_atoms", ")", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "batch_size", ",", "self", ".", "num_atoms", ")", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", ".", "to", "(", "device", ")", ")", "# Linearize all the buffers", "value_histogram_projected", "=", "value_histogram_projected", ".", "view", "(", "-", "1", ")", "index_ceil", "=", "index_ceil", ".", "view", "(", "-", "1", ")", "index_floor", "=", "index_floor", ".", "view", "(", "-", "1", ")", "projection_indices", "=", "projection_indices", ".", "view", "(", "-", "1", ")", "value_histogram_projected", ".", "index_add_", "(", "0", ",", "index_floor", "+", "offsets", ",", "(", "next_value_histograms", ".", "view", "(", "-", "1", ")", "*", "(", "index_ceil", ".", "float", "(", ")", "-", "projection_indices", ")", ")", ")", "value_histogram_projected", ".", "index_add_", "(", "0", ",", "index_ceil", "+", "offsets", ",", "(", "next_value_histograms", ".", "view", "(", "-", "1", ")", "*", "(", "projection_indices", "-", "index_floor", ".", "float", "(", ")", ")", ")", ")", "value_histogram_projected", "=", "value_histogram_projected", ".", "reshape", "(", "next_value_histograms", ".", "shape", ")", "q_log_histogram_selected", "=", "evaluator", ".", "get", "(", "'model:action:q_dist'", ")", "# Cross-entropy loss as usual", "original_losses", "=", "-", "(", "value_histogram_projected", "*", "q_log_histogram_selected", ")", ".", "sum", "(", "dim", "=", "1", ")", "if", "evaluator", ".", "is_provided", "(", "'rollout:weights'", ")", ":", "weights", "=", "evaluator", ".", "get", "(", "'rollout:weights'", ")", "else", ":", "weights", "=", "torch", ".", "ones_like", "(", "rewards_tensor", ")", "loss_value", "=", "torch", ".", "mean", "(", "weights", "*", "original_losses", ")", "loss_value", ".", "backward", "(", ")", "with", "torch", ".", "no_grad", "(", ")", ":", "mean_q_model", "=", "(", "self", ".", "support_atoms", ".", "unsqueeze", "(", "0", ")", "*", "torch", ".", "exp", "(", "q_log_histogram_selected", ")", ")", ".", "sum", "(", "dim", "=", "1", ")", ".", "mean", "(", ")", "mean_q_target", "=", "(", "self", ".", "support_atoms", ".", "unsqueeze", "(", "0", ")", "*", "value_histogram_projected", ")", ".", "sum", "(", "dim", "=", "1", ")", ".", "mean", "(", ")", "return", "{", "'loss'", ":", "loss_value", ".", "item", "(", ")", ",", "# We need it to update priorities in the replay buffer:", "'errors'", ":", "original_losses", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ",", "'average_q_selected'", ":", "mean_q_model", ".", "item", "(", ")", ",", "'average_q_target'", ":", "mean_q_target", ".", "item", "(", ")", "}" ]
42.361345
28.966387
def lfriedmanchisquare(*args): """ Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. It assumes 3 or more repeated measures. Only 3 levels requires a minimum of 10 subjects in the study. Four levels requires 5 subjects per level(??). Usage: lfriedmanchisquare(*args) Returns: chi-square statistic, associated p-value """ k = len(args) if k < 3: raise ValueError('Less than 3 levels. Friedman test not appropriate.') n = len(args[0]) data = pstat.abut(*tuple(args)) for i in range(len(data)): data[i] = rankdata(data[i]) ssbn = 0 for i in range(k): ssbn = ssbn + sum(args[i])**2 chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1) return chisq, chisqprob(chisq,k-1)
[ "def", "lfriedmanchisquare", "(", "*", "args", ")", ":", "k", "=", "len", "(", "args", ")", "if", "k", "<", "3", ":", "raise", "ValueError", "(", "'Less than 3 levels. Friedman test not appropriate.'", ")", "n", "=", "len", "(", "args", "[", "0", "]", ")", "data", "=", "pstat", ".", "abut", "(", "*", "tuple", "(", "args", ")", ")", "for", "i", "in", "range", "(", "len", "(", "data", ")", ")", ":", "data", "[", "i", "]", "=", "rankdata", "(", "data", "[", "i", "]", ")", "ssbn", "=", "0", "for", "i", "in", "range", "(", "k", ")", ":", "ssbn", "=", "ssbn", "+", "sum", "(", "args", "[", "i", "]", ")", "**", "2", "chisq", "=", "12.0", "/", "(", "k", "*", "n", "*", "(", "k", "+", "1", ")", ")", "*", "ssbn", "-", "3", "*", "n", "*", "(", "k", "+", "1", ")", "return", "chisq", ",", "chisqprob", "(", "chisq", ",", "k", "-", "1", ")" ]
36.416667
18.25
def add(self, data): """ Add a single character to buffer. If one or more full lines are found, print them (if desired) and pass to callback function. """ data = self._decoder.decode(data) if not data: return self._buffer += data if "\n" in data: to_print, remainder = self._buffer.rsplit("\n") if self._print: try: print(to_print) except UnicodeEncodeError: print(to_print.encode(sys.getdefaultencoding(), errors="replace")) if not hasattr(self, "_warned"): logger.warning("output encoding error, characters replaced") setattr(self, "_warned", True) if self._callback: self._callback(to_print) self._buffer = remainder
[ "def", "add", "(", "self", ",", "data", ")", ":", "data", "=", "self", ".", "_decoder", ".", "decode", "(", "data", ")", "if", "not", "data", ":", "return", "self", ".", "_buffer", "+=", "data", "if", "\"\\n\"", "in", "data", ":", "to_print", ",", "remainder", "=", "self", ".", "_buffer", ".", "rsplit", "(", "\"\\n\"", ")", "if", "self", ".", "_print", ":", "try", ":", "print", "(", "to_print", ")", "except", "UnicodeEncodeError", ":", "print", "(", "to_print", ".", "encode", "(", "sys", ".", "getdefaultencoding", "(", ")", ",", "errors", "=", "\"replace\"", ")", ")", "if", "not", "hasattr", "(", "self", ",", "\"_warned\"", ")", ":", "logger", ".", "warning", "(", "\"output encoding error, characters replaced\"", ")", "setattr", "(", "self", ",", "\"_warned\"", ",", "True", ")", "if", "self", ".", "_callback", ":", "self", ".", "_callback", "(", "to_print", ")", "self", ".", "_buffer", "=", "remainder" ]
39.909091
15.454545
def validate(self, export): """ Validates an Export. :param Export export: :rtype: ExportValidationResponse """ target_url = self.client.get_url(self._URL_KEY, 'POST', 'validate') response_object = ExportValidationResponse() r = self.client.request('POST', target_url, json=export._serialize()) return response_object._deserialize(r.json())
[ "def", "validate", "(", "self", ",", "export", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "self", ".", "_URL_KEY", ",", "'POST'", ",", "'validate'", ")", "response_object", "=", "ExportValidationResponse", "(", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "export", ".", "_serialize", "(", ")", ")", "return", "response_object", ".", "_deserialize", "(", "r", ".", "json", "(", ")", ")" ]
33.583333
17.75
def _source_info(): """ Get information from the user's code (two frames up) to leave breadcrumbs for file, line, class and function. """ ofi = inspect.getouterframes(inspect.currentframe())[2] try: calling_class = ofi[0].f_locals['self'].__class__ except KeyError: calling_class = None # Tuple of file,line,calling_class,function_name return ofi[1], ofi[2], calling_class, ofi[3]
[ "def", "_source_info", "(", ")", ":", "ofi", "=", "inspect", ".", "getouterframes", "(", "inspect", ".", "currentframe", "(", ")", ")", "[", "2", "]", "try", ":", "calling_class", "=", "ofi", "[", "0", "]", ".", "f_locals", "[", "'self'", "]", ".", "__class__", "except", "KeyError", ":", "calling_class", "=", "None", "# Tuple of file,line,calling_class,function_name", "return", "ofi", "[", "1", "]", ",", "ofi", "[", "2", "]", ",", "calling_class", ",", "ofi", "[", "3", "]" ]
35.083333
14.75
def remove_service_listener(self, listener): """Removes a listener from the set that is currently listening.""" for browser in self.browsers: if browser.listener == listener: browser.cancel() del(browser)
[ "def", "remove_service_listener", "(", "self", ",", "listener", ")", ":", "for", "browser", "in", "self", ".", "browsers", ":", "if", "browser", ".", "listener", "==", "listener", ":", "browser", ".", "cancel", "(", ")", "del", "(", "browser", ")" ]
43.166667
5.166667
def forward(ctx, x, dutyCycles, k, boostStrength): """ Use the boost strength to compute a boost factor for each unit represented in x. These factors are used to increase the impact of each unit to improve their chances of being chosen. This encourages participation of more columns in the learning process. The boosting function is a curve defined as: boostFactors = exp[ - boostStrength * (dutyCycle - targetDensity)] Intuitively this means that units that have been active (i.e. in the top-k) at the target activation level have a boost factor of 1, meaning their activity is not boosted. Columns whose duty cycle drops too much below that of their neighbors are boosted depending on how infrequently they have been active. Unit that has been active more than the target activation level have a boost factor below 1, meaning their activity is suppressed and they are less likely to be in the top-k. Note that we do not transmit the boosted values. We only use boosting to determine the winning units. The target activation density for each unit is k / number of units. The boostFactor depends on the dutyCycle via an exponential function: boostFactor ^ | |\ | \ 1 _ | \ | _ | _ _ | _ _ _ _ +--------------------> dutyCycle | targetDensity :param ctx: Place where we can store information we will need to compute the gradients for the backward pass. :param x: Current activity of each unit. :param dutyCycles: The averaged duty cycle of each unit. :param k: The activity of the top k units will be allowed to remain, the rest are set to zero. :param boostStrength: A boost strength of 0.0 has no effect on x. :return: A tensor representing the activity of x after k-winner take all. """ if boostStrength > 0.0: targetDensity = float(k) / x.size(1) boostFactors = torch.exp((targetDensity - dutyCycles) * boostStrength) boosted = x.detach() * boostFactors else: boosted = x.detach() # Take the boosted version of the input x, find the top k winners. # Compute an output that contains the values of x corresponding to the top k # boosted values res = torch.zeros_like(x) topk, indices = boosted.topk(k, sorted=False) for i in range(x.shape[0]): res[i, indices[i]] = x[i, indices[i]] ctx.save_for_backward(indices) return res
[ "def", "forward", "(", "ctx", ",", "x", ",", "dutyCycles", ",", "k", ",", "boostStrength", ")", ":", "if", "boostStrength", ">", "0.0", ":", "targetDensity", "=", "float", "(", "k", ")", "/", "x", ".", "size", "(", "1", ")", "boostFactors", "=", "torch", ".", "exp", "(", "(", "targetDensity", "-", "dutyCycles", ")", "*", "boostStrength", ")", "boosted", "=", "x", ".", "detach", "(", ")", "*", "boostFactors", "else", ":", "boosted", "=", "x", ".", "detach", "(", ")", "# Take the boosted version of the input x, find the top k winners.", "# Compute an output that contains the values of x corresponding to the top k", "# boosted values", "res", "=", "torch", ".", "zeros_like", "(", "x", ")", "topk", ",", "indices", "=", "boosted", ".", "topk", "(", "k", ",", "sorted", "=", "False", ")", "for", "i", "in", "range", "(", "x", ".", "shape", "[", "0", "]", ")", ":", "res", "[", "i", ",", "indices", "[", "i", "]", "]", "=", "x", "[", "i", ",", "indices", "[", "i", "]", "]", "ctx", ".", "save_for_backward", "(", "indices", ")", "return", "res" ]
35.808219
24.191781
def context(src): """ Used to add the source_id to the error message. To be used as with context(src): operation_with(src) Typically the operation is filtering a source, that can fail for tricky geometries. """ try: yield except Exception: etype, err, tb = sys.exc_info() msg = 'An error occurred with source id=%s. Error: %s' msg %= (src.source_id, err) raise_(etype, msg, tb)
[ "def", "context", "(", "src", ")", ":", "try", ":", "yield", "except", "Exception", ":", "etype", ",", "err", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'An error occurred with source id=%s. Error: %s'", "msg", "%=", "(", "src", ".", "source_id", ",", "err", ")", "raise_", "(", "etype", ",", "msg", ",", "tb", ")" ]
26.058824
18.882353
def games(years, months=None, days=None, home=None, away=None): """Return a list of lists of games for multiple days. If home and away are the same team, it will return all games for that team. """ # put in data if months and days are not specified if months is None: months = list(range(1, 13)) if days is None: days = list(range(1, 32)) results = [] # check if lists, if not make lists # allows users to input either numbers or lists if not isinstance(years, list): years = [years] if not isinstance(months, list): months = [months] if not isinstance(days, list): days = [days] for i in years: for y in months: # get the days in a month daysinmonth = calendar.monthrange(i, y)[1] for x in days: if daysinmonth >= x: # use the day function to get data for each day in range game = day(i, y, x, home=home, away=away) if game: results.append(game) return results
[ "def", "games", "(", "years", ",", "months", "=", "None", ",", "days", "=", "None", ",", "home", "=", "None", ",", "away", "=", "None", ")", ":", "# put in data if months and days are not specified", "if", "months", "is", "None", ":", "months", "=", "list", "(", "range", "(", "1", ",", "13", ")", ")", "if", "days", "is", "None", ":", "days", "=", "list", "(", "range", "(", "1", ",", "32", ")", ")", "results", "=", "[", "]", "# check if lists, if not make lists", "# allows users to input either numbers or lists", "if", "not", "isinstance", "(", "years", ",", "list", ")", ":", "years", "=", "[", "years", "]", "if", "not", "isinstance", "(", "months", ",", "list", ")", ":", "months", "=", "[", "months", "]", "if", "not", "isinstance", "(", "days", ",", "list", ")", ":", "days", "=", "[", "days", "]", "for", "i", "in", "years", ":", "for", "y", "in", "months", ":", "# get the days in a month", "daysinmonth", "=", "calendar", ".", "monthrange", "(", "i", ",", "y", ")", "[", "1", "]", "for", "x", "in", "days", ":", "if", "daysinmonth", ">=", "x", ":", "# use the day function to get data for each day in range", "game", "=", "day", "(", "i", ",", "y", ",", "x", ",", "home", "=", "home", ",", "away", "=", "away", ")", "if", "game", ":", "results", ".", "append", "(", "game", ")", "return", "results" ]
35.766667
14.5
def _handle_end_way(self): """ Handle closing way element """ self._result.append(Way(result=self._result, **self._curr)) self._curr = {}
[ "def", "_handle_end_way", "(", "self", ")", ":", "self", ".", "_result", ".", "append", "(", "Way", "(", "result", "=", "self", ".", "_result", ",", "*", "*", "self", ".", "_curr", ")", ")", "self", ".", "_curr", "=", "{", "}" ]
28.666667
10.666667
def read_zipfile(self, encoding='utf8'): """ READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING """ from zipfile import ZipFile with ZipFile(self.abspath) as zipped: for num, zip_name in enumerate(zipped.namelist()): return zipped.open(zip_name).read().decode(encoding)
[ "def", "read_zipfile", "(", "self", ",", "encoding", "=", "'utf8'", ")", ":", "from", "zipfile", "import", "ZipFile", "with", "ZipFile", "(", "self", ".", "abspath", ")", "as", "zipped", ":", "for", "num", ",", "zip_name", "in", "enumerate", "(", "zipped", ".", "namelist", "(", ")", ")", ":", "return", "zipped", ".", "open", "(", "zip_name", ")", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")" ]
35.4
9.8
def csv_to_list_of_dicts(lines: List[str], csvheader: str, quotechar: str = '"') -> List[Dict[str, str]]: """ Extracts data from a list of CSV lines (starting with a defined header line) embedded in a longer text block but ending with a blank line. Args: lines: CSV lines csvheader: CSV header line quotechar: ``quotechar`` parameter passed to :func:`csv.reader` Returns: list of dictionaries mapping fieldnames (from the header) to values """ data = [] # type: List[Dict[str, str]] # an empty line marks the end of the block csvlines = get_lines_from_to(lines, csvheader, [None])[1:] # ... remove the CSV header headerfields = csvheader.split(",") reader = csv.reader(csvlines, quotechar=quotechar) for fields in reader: row = {} # type: Dict[str, str] for f in range(len(headerfields)): row[headerfields[f]] = fields[f] data.append(row) return data
[ "def", "csv_to_list_of_dicts", "(", "lines", ":", "List", "[", "str", "]", ",", "csvheader", ":", "str", ",", "quotechar", ":", "str", "=", "'\"'", ")", "->", "List", "[", "Dict", "[", "str", ",", "str", "]", "]", ":", "data", "=", "[", "]", "# type: List[Dict[str, str]]", "# an empty line marks the end of the block", "csvlines", "=", "get_lines_from_to", "(", "lines", ",", "csvheader", ",", "[", "None", "]", ")", "[", "1", ":", "]", "# ... remove the CSV header", "headerfields", "=", "csvheader", ".", "split", "(", "\",\"", ")", "reader", "=", "csv", ".", "reader", "(", "csvlines", ",", "quotechar", "=", "quotechar", ")", "for", "fields", "in", "reader", ":", "row", "=", "{", "}", "# type: Dict[str, str]", "for", "f", "in", "range", "(", "len", "(", "headerfields", ")", ")", ":", "row", "[", "headerfields", "[", "f", "]", "]", "=", "fields", "[", "f", "]", "data", ".", "append", "(", "row", ")", "return", "data" ]
35.785714
17.214286
def process_normal(_dict): """ this method process the _dict to correct dict to be called by class constructor this method will be imported and called by main csv uploader function """ cooked_dict = group_raw_to_formatted_string_dict(_dict) data_class = cooked_dict.pop('amaasclass', '') children_class_dict = direct_to_class(data_class) tasty_dict = dict() for cooked_key, cooked_value in cooked_dict.items(): if cooked_key in CHILDREN_SIGNAL: processed_dict = {cooked_key: formatted_string_to_others(cooked_value, children_class_dict[cooked_key])} elif cooked_key == 'links': processed_dict = {cooked_key: formatted_string_to_links(cooked_value, children_class_dict[cooked_key])} else: processed_dict = {cooked_key: process_value_with_header(cooked_key, cooked_value)} tasty_dict.update(processed_dict) return tasty_dict
[ "def", "process_normal", "(", "_dict", ")", ":", "cooked_dict", "=", "group_raw_to_formatted_string_dict", "(", "_dict", ")", "data_class", "=", "cooked_dict", ".", "pop", "(", "'amaasclass'", ",", "''", ")", "children_class_dict", "=", "direct_to_class", "(", "data_class", ")", "tasty_dict", "=", "dict", "(", ")", "for", "cooked_key", ",", "cooked_value", "in", "cooked_dict", ".", "items", "(", ")", ":", "if", "cooked_key", "in", "CHILDREN_SIGNAL", ":", "processed_dict", "=", "{", "cooked_key", ":", "formatted_string_to_others", "(", "cooked_value", ",", "children_class_dict", "[", "cooked_key", "]", ")", "}", "elif", "cooked_key", "==", "'links'", ":", "processed_dict", "=", "{", "cooked_key", ":", "formatted_string_to_links", "(", "cooked_value", ",", "children_class_dict", "[", "cooked_key", "]", ")", "}", "else", ":", "processed_dict", "=", "{", "cooked_key", ":", "process_value_with_header", "(", "cooked_key", ",", "cooked_value", ")", "}", "tasty_dict", ".", "update", "(", "processed_dict", ")", "return", "tasty_dict" ]
50.722222
23.5
def sessionize(user_events, cutoff=defaults.CUTOFF): """ Clusters user sessions from a sequence of user events. Note that, `event` data will simply be returned in the case of a revert. This function serves as a convenience wrapper around calls to :class:`~mw.lib.sessions.Cache`'s :meth:`~mw.lib.sessions.Cache.process` method. :Parameters: user_events : iter( (user, timestamp, event) ) an iterable over tuples of user, timestamp and event data. * user : `hashable` * timestamp : :class:`mw.Timestamp` * event : `mixed` cutoff : int the maximum time between events within a user session :Returns: a iterator over :class:`~mw.lib.sessions.Session` :Example: >>> import mwsessions >>> >>> user_events = [ ... ("Willy on wheels", 20150101000000, {'rev_id': 1}), ... ("Walter", 20150101000001, {'rev_id': 2}), ... ("Willy on wheels", 20150101000001, {'rev_id': 3}), ... ("Walter", 100035, {'rev_id': 4}), ... ("Willy on wheels", 103602, {'rev_id': 5}) ... ] >>> >>> for user, events in mwsessions.sessionize(user_events): ... (user, events) ... ('Willy on wheels', [{'rev_id': 1}, {'rev_id': 3}]) ('Walter', [{'rev_id': 2}, {'rev_id': 4}]) ('Willy on wheels', [{'rev_id': 5}]) """ # Construct the session manager sessionizer = Sessionizer(cutoff) # Apply the events for user, timestamp, event in user_events: for session in sessionizer.process(user, timestamp, event): yield session # Yield the left-overs for session in sessionizer.get_active_sessions(): yield session
[ "def", "sessionize", "(", "user_events", ",", "cutoff", "=", "defaults", ".", "CUTOFF", ")", ":", "# Construct the session manager", "sessionizer", "=", "Sessionizer", "(", "cutoff", ")", "# Apply the events", "for", "user", ",", "timestamp", ",", "event", "in", "user_events", ":", "for", "session", "in", "sessionizer", ".", "process", "(", "user", ",", "timestamp", ",", "event", ")", ":", "yield", "session", "# Yield the left-overs", "for", "session", "in", "sessionizer", ".", "get_active_sessions", "(", ")", ":", "yield", "session" ]
31.285714
22.785714
def write_to_file(self, file_path: str) -> None: """ Serialize and write the data into a JSON file. """ data = self.encode() with open(file_path, "w") as f: json.dump(data, f, indent=1)
[ "def", "write_to_file", "(", "self", ",", "file_path", ":", "str", ")", "->", "None", ":", "data", "=", "self", ".", "encode", "(", ")", "with", "open", "(", "file_path", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "data", ",", "f", ",", "indent", "=", "1", ")" ]
43.4
4.2
def _whitelist_blacklist(self, os_name): ''' Determines if the OS should be ignored, depending on the whitelist-blacklist logic configured by the user. ''' return napalm_logs.ext.check_whitelist_blacklist(os_name, whitelist=self.device_whitelist, blacklist=self.device_blacklist)
[ "def", "_whitelist_blacklist", "(", "self", ",", "os_name", ")", ":", "return", "napalm_logs", ".", "ext", ".", "check_whitelist_blacklist", "(", "os_name", ",", "whitelist", "=", "self", ".", "device_whitelist", ",", "blacklist", "=", "self", ".", "device_blacklist", ")" ]
48.111111
23
def _build_cookie_jar(cls, session: AppSession): '''Build the cookie jar''' if not session.args.cookies: return if session.args.load_cookies or session.args.save_cookies: session.factory.set('CookieJar', BetterMozillaCookieJar) cookie_jar = session.factory.new('CookieJar') if session.args.load_cookies: cookie_jar.load(session.args.load_cookies, ignore_discard=True) else: cookie_jar = session.factory.new('CookieJar') policy = session.factory.new('CookiePolicy', cookie_jar=cookie_jar) cookie_jar.set_policy(policy) _logger.debug(__('Loaded cookies: {0}', list(cookie_jar))) cookie_jar_wrapper = session.factory.new( 'CookieJarWrapper', cookie_jar, save_filename=session.args.save_cookies, keep_session_cookies=session.args.keep_session_cookies, ) return cookie_jar_wrapper
[ "def", "_build_cookie_jar", "(", "cls", ",", "session", ":", "AppSession", ")", ":", "if", "not", "session", ".", "args", ".", "cookies", ":", "return", "if", "session", ".", "args", ".", "load_cookies", "or", "session", ".", "args", ".", "save_cookies", ":", "session", ".", "factory", ".", "set", "(", "'CookieJar'", ",", "BetterMozillaCookieJar", ")", "cookie_jar", "=", "session", ".", "factory", ".", "new", "(", "'CookieJar'", ")", "if", "session", ".", "args", ".", "load_cookies", ":", "cookie_jar", ".", "load", "(", "session", ".", "args", ".", "load_cookies", ",", "ignore_discard", "=", "True", ")", "else", ":", "cookie_jar", "=", "session", ".", "factory", ".", "new", "(", "'CookieJar'", ")", "policy", "=", "session", ".", "factory", ".", "new", "(", "'CookiePolicy'", ",", "cookie_jar", "=", "cookie_jar", ")", "cookie_jar", ".", "set_policy", "(", "policy", ")", "_logger", ".", "debug", "(", "__", "(", "'Loaded cookies: {0}'", ",", "list", "(", "cookie_jar", ")", ")", ")", "cookie_jar_wrapper", "=", "session", ".", "factory", ".", "new", "(", "'CookieJarWrapper'", ",", "cookie_jar", ",", "save_filename", "=", "session", ".", "args", ".", "save_cookies", ",", "keep_session_cookies", "=", "session", ".", "args", ".", "keep_session_cookies", ",", ")", "return", "cookie_jar_wrapper" ]
31.966667
24.366667
def addEnvPath(env, name, value): """ concat a path for this name """ try: oldval = env[name] if not oldval.endswith(';'): oldval = oldval + ';' except KeyError: oldval = "" if not value.endswith(';'): value = value + ';' env[name] = oldval + value
[ "def", "addEnvPath", "(", "env", ",", "name", ",", "value", ")", ":", "try", ":", "oldval", "=", "env", "[", "name", "]", "if", "not", "oldval", ".", "endswith", "(", "';'", ")", ":", "oldval", "=", "oldval", "+", "';'", "except", "KeyError", ":", "oldval", "=", "\"\"", "if", "not", "value", ".", "endswith", "(", "';'", ")", ":", "value", "=", "value", "+", "';'", "env", "[", "name", "]", "=", "oldval", "+", "value" ]
27.454545
12.454545
def set_cookie(self, key, value, domain=None, path='/', secure=False, httponly=True): """Set a cookie. Args: key (:obj:`str`): Cookie name value (:obj:`str`): Cookie value domain (:obj:`str`): Cookie domain path (:obj:`str`): Cookie value secure (:obj:`bool`): True if secure, False otherwise httponly (:obj:`bool`): True if it's a HTTP only cookie, False otherwise """ self._cookies[key] = value if domain: self._cookies[key]['domain'] = domain if path: self._cookies[key]['path'] = path if secure: self._cookies[key]['secure'] = secure if httponly: self._cookies[key]['httponly'] = httponly
[ "def", "set_cookie", "(", "self", ",", "key", ",", "value", ",", "domain", "=", "None", ",", "path", "=", "'/'", ",", "secure", "=", "False", ",", "httponly", "=", "True", ")", ":", "self", ".", "_cookies", "[", "key", "]", "=", "value", "if", "domain", ":", "self", ".", "_cookies", "[", "key", "]", "[", "'domain'", "]", "=", "domain", "if", "path", ":", "self", ".", "_cookies", "[", "key", "]", "[", "'path'", "]", "=", "path", "if", "secure", ":", "self", ".", "_cookies", "[", "key", "]", "[", "'secure'", "]", "=", "secure", "if", "httponly", ":", "self", ".", "_cookies", "[", "key", "]", "[", "'httponly'", "]", "=", "httponly" ]
35.954545
14.545455
def visible_object_groups(self): """Return iterator of object group indexes that are set 'visible' :rtype: Iterator """ return (i for (i, l) in enumerate(self.layers) if l.visible and isinstance(l, TiledObjectGroup))
[ "def", "visible_object_groups", "(", "self", ")", ":", "return", "(", "i", "for", "(", "i", ",", "l", ")", "in", "enumerate", "(", "self", ".", "layers", ")", "if", "l", ".", "visible", "and", "isinstance", "(", "l", ",", "TiledObjectGroup", ")", ")" ]
37
14.714286
def isdir(self, relpath, rsc=None): """ Returns whether or not the resource is a directory. :return <bool> """ filepath = self.find(relpath, rsc) if filepath.startswith(':'): resource = QtCore.QResource(filepath) return not resource.isFile() else: return os.path.isdir(filepath)
[ "def", "isdir", "(", "self", ",", "relpath", ",", "rsc", "=", "None", ")", ":", "filepath", "=", "self", ".", "find", "(", "relpath", ",", "rsc", ")", "if", "filepath", ".", "startswith", "(", "':'", ")", ":", "resource", "=", "QtCore", ".", "QResource", "(", "filepath", ")", "return", "not", "resource", ".", "isFile", "(", ")", "else", ":", "return", "os", ".", "path", ".", "isdir", "(", "filepath", ")" ]
31.916667
9.416667
def _get_asym_alpha_tag(self,a,b): """ Find asymmetry from cryo oven with alpha detectors. a: list of alpha detector histograms (each helicity) b: list of beta detector histograms (each helicity) 1+ 1- 2+ 2- """ # beta in coincidence with alpha coin = a[:4] # beta coincidence with no alpha no_coin = a[4:8] # get split helicity asym from hel_coin = self._get_asym_hel(coin) hel_no_coin = self._get_asym_hel(no_coin) hel_reg = self._get_asym_hel(b) # get combined helicities com_coin = self._get_asym_comb(coin) com_no_coin = self._get_asym_comb(no_coin) com_reg = self._get_asym_comb(b) # make output return (hel_coin,hel_no_coin,hel_reg,com_coin,com_no_coin,com_reg)
[ "def", "_get_asym_alpha_tag", "(", "self", ",", "a", ",", "b", ")", ":", "# beta in coincidence with alpha", "coin", "=", "a", "[", ":", "4", "]", "# beta coincidence with no alpha", "no_coin", "=", "a", "[", "4", ":", "8", "]", "# get split helicity asym from ", "hel_coin", "=", "self", ".", "_get_asym_hel", "(", "coin", ")", "hel_no_coin", "=", "self", ".", "_get_asym_hel", "(", "no_coin", ")", "hel_reg", "=", "self", ".", "_get_asym_hel", "(", "b", ")", "# get combined helicities", "com_coin", "=", "self", ".", "_get_asym_comb", "(", "coin", ")", "com_no_coin", "=", "self", ".", "_get_asym_comb", "(", "no_coin", ")", "com_reg", "=", "self", ".", "_get_asym_comb", "(", "b", ")", "# make output", "return", "(", "hel_coin", ",", "hel_no_coin", ",", "hel_reg", ",", "com_coin", ",", "com_no_coin", ",", "com_reg", ")" ]
34.44
17
def close(self): """Close all connections """ keys = set(self._conns.keys()) for key in keys: self.stop_socket(key) self._conns = {}
[ "def", "close", "(", "self", ")", ":", "keys", "=", "set", "(", "self", ".", "_conns", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "self", ".", "stop_socket", "(", "key", ")", "self", ".", "_conns", "=", "{", "}" ]
19.777778
16.111111
def emoji_list(server, n=1): """return a list of `n` random emoji""" global EMOJI if EMOJI is None: EMOJI = EmojiCache(server) return EMOJI.get(n)
[ "def", "emoji_list", "(", "server", ",", "n", "=", "1", ")", ":", "global", "EMOJI", "if", "EMOJI", "is", "None", ":", "EMOJI", "=", "EmojiCache", "(", "server", ")", "return", "EMOJI", ".", "get", "(", "n", ")" ]
27.5
13
def get_disk_labels(self): """ Creates a mapping of device nodes to filesystem labels """ path = '/dev/disk/by-label/' labels = {} if not os.path.isdir(path): return labels for label in os.listdir(path): label = label.replace('\\x2f', '/') device = os.path.realpath(path + '/' + label) labels[device] = label return labels
[ "def", "get_disk_labels", "(", "self", ")", ":", "path", "=", "'/dev/disk/by-label/'", "labels", "=", "{", "}", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "labels", "for", "label", "in", "os", ".", "listdir", "(", "path", ")", ":", "label", "=", "label", ".", "replace", "(", "'\\\\x2f'", ",", "'/'", ")", "device", "=", "os", ".", "path", ".", "realpath", "(", "path", "+", "'/'", "+", "label", ")", "labels", "[", "device", "]", "=", "label", "return", "labels" ]
28.133333
14.133333
def profile_detail( request, username, template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE, extra_context=None, **kwargs): """ Detailed view of an user. :param username: String of the username of which the profile should be viewed. :param template_name: String representing the template name that should be used to display the profile. :param extra_context: Dictionary of variables which should be supplied to the template. The ``profile`` key is always the current profile. **Context** ``profile`` Instance of the currently viewed ``Profile``. """ user = get_object_or_404(get_user_model(), username__iexact=username) profile_model = get_profile_model() try: profile = user.get_profile() except profile_model.DoesNotExist: profile = profile_model(user=user) profile.save() if not profile.can_view_profile(request.user): return HttpResponseForbidden(_("You don't have permission to view this profile.")) if not extra_context: extra_context = dict() extra_context['profile'] = user.get_profile() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
[ "def", "profile_detail", "(", "request", ",", "username", ",", "template_name", "=", "accounts_settings", ".", "ACCOUNTS_PROFILE_DETAIL_TEMPLATE", ",", "extra_context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "user", "=", "get_object_or_404", "(", "get_user_model", "(", ")", ",", "username__iexact", "=", "username", ")", "profile_model", "=", "get_profile_model", "(", ")", "try", ":", "profile", "=", "user", ".", "get_profile", "(", ")", "except", "profile_model", ".", "DoesNotExist", ":", "profile", "=", "profile_model", "(", "user", "=", "user", ")", "profile", ".", "save", "(", ")", "if", "not", "profile", ".", "can_view_profile", "(", "request", ".", "user", ")", ":", "return", "HttpResponseForbidden", "(", "_", "(", "\"You don't have permission to view this profile.\"", ")", ")", "if", "not", "extra_context", ":", "extra_context", "=", "dict", "(", ")", "extra_context", "[", "'profile'", "]", "=", "user", ".", "get_profile", "(", ")", "return", "ExtraContextTemplateView", ".", "as_view", "(", "template_name", "=", "template_name", ",", "extra_context", "=", "extra_context", ")", "(", "request", ")" ]
33.692308
20.923077
def supported_tasks(self, lang=None): """Languages that are covered by a specific task. Args: lang (string): Language code name. """ if lang: collection = self.get_collection(lang=lang) return [x.id.split('.')[0] for x in collection.packages] else: return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id]
[ "def", "supported_tasks", "(", "self", ",", "lang", "=", "None", ")", ":", "if", "lang", ":", "collection", "=", "self", ".", "get_collection", "(", "lang", "=", "lang", ")", "return", "[", "x", ".", "id", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "x", "in", "collection", ".", "packages", "]", "else", ":", "return", "[", "x", ".", "name", ".", "split", "(", ")", "[", "0", "]", "for", "x", "in", "self", ".", "collections", "(", ")", "if", "Downloader", ".", "TASK_PREFIX", "in", "x", ".", "id", "]" ]
33.818182
19.818182
def validate(self, value): """ Ensures that the password follows the following criteria: :param value: <str> :return: True """ if not isinstance(value, (str, unicode)): raise orb.errors.ColumnValidationError(self, 'Invalid password.') elif not self.__allowUnicode and value != projex.text.toAscii(value): raise orb.errors.ColumnValidationError(self, 'Only ASCII characters are allowed for your password.') elif len(value) < self.__minlength or \ (self.__requireUppercase and not re.search('[A-Z]', value)) or \ (self.__requireLowercase and not re.search('[a-z]', value)) or \ (self.__requireNumber and not re.search('[0-9]', value)) or \ (self.__requireWildcard and not re.search('[^a-zA-Z0-9]', value)): raise orb.errors.ColumnValidationError(self, self.rules()) # check for invalid characters elif self.__invalidCharacters and re.search(self.__invalidCharacters, value): raise orb.errors.ColumnValidationError(self, self.__invalidCharactersRule) else: return super(PasswordColumn, self).validate(value)
[ "def", "validate", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "str", ",", "unicode", ")", ")", ":", "raise", "orb", ".", "errors", ".", "ColumnValidationError", "(", "self", ",", "'Invalid password.'", ")", "elif", "not", "self", ".", "__allowUnicode", "and", "value", "!=", "projex", ".", "text", ".", "toAscii", "(", "value", ")", ":", "raise", "orb", ".", "errors", ".", "ColumnValidationError", "(", "self", ",", "'Only ASCII characters are allowed for your password.'", ")", "elif", "len", "(", "value", ")", "<", "self", ".", "__minlength", "or", "(", "self", ".", "__requireUppercase", "and", "not", "re", ".", "search", "(", "'[A-Z]'", ",", "value", ")", ")", "or", "(", "self", ".", "__requireLowercase", "and", "not", "re", ".", "search", "(", "'[a-z]'", ",", "value", ")", ")", "or", "(", "self", ".", "__requireNumber", "and", "not", "re", ".", "search", "(", "'[0-9]'", ",", "value", ")", ")", "or", "(", "self", ".", "__requireWildcard", "and", "not", "re", ".", "search", "(", "'[^a-zA-Z0-9]'", ",", "value", ")", ")", ":", "raise", "orb", ".", "errors", ".", "ColumnValidationError", "(", "self", ",", "self", ".", "rules", "(", ")", ")", "# check for invalid characters", "elif", "self", ".", "__invalidCharacters", "and", "re", ".", "search", "(", "self", ".", "__invalidCharacters", ",", "value", ")", ":", "raise", "orb", ".", "errors", ".", "ColumnValidationError", "(", "self", ",", "self", ".", "__invalidCharactersRule", ")", "else", ":", "return", "super", "(", "PasswordColumn", ",", "self", ")", ".", "validate", "(", "value", ")" ]
44.296296
29.777778
def get_datacenter(self, datacenter_id, depth=1): """ Retrieves a data center by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s?depth=%s' % (datacenter_id, str(depth))) return response
[ "def", "get_datacenter", "(", "self", ",", "datacenter_id", ",", "depth", "=", "1", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "'/datacenters/%s?depth=%s'", "%", "(", "datacenter_id", ",", "str", "(", "depth", ")", ")", ")", "return", "response" ]
29.866667
18.133333
def debug_tag(self, tag): """Setter for the debug tag. By default, the tag is the serial of the device, but sometimes it may be more descriptive to use a different tag of the user's choice. Changing debug tag changes part of the prefix of debug info emitted by this object, like log lines and the message of DeviceError. Example: By default, the device's serial number is used: 'INFO [AndroidDevice|abcdefg12345] One pending call ringing.' The tag can be customized with `ad.debug_tag = 'Caller'`: 'INFO [AndroidDevice|Caller] One pending call ringing.' """ self.log.info('Logging debug tag set to "%s"', tag) self._debug_tag = tag self.log.extra['tag'] = tag
[ "def", "debug_tag", "(", "self", ",", "tag", ")", ":", "self", ".", "log", ".", "info", "(", "'Logging debug tag set to \"%s\"'", ",", "tag", ")", "self", ".", "_debug_tag", "=", "tag", "self", ".", "log", ".", "extra", "[", "'tag'", "]", "=", "tag" ]
43.388889
24.666667
def disconnect(self, token): """ Unregisters a callback for an event topic. @param token: Token of the callback to unregister @type token: dict """ topic = token['topic'] try: arr = self._connects[topic] except KeyError: return arr.remove(token['cb']) if len(arr) == 0: del self._connects[topic]
[ "def", "disconnect", "(", "self", ",", "token", ")", ":", "topic", "=", "token", "[", "'topic'", "]", "try", ":", "arr", "=", "self", ".", "_connects", "[", "topic", "]", "except", "KeyError", ":", "return", "arr", ".", "remove", "(", "token", "[", "'cb'", "]", ")", "if", "len", "(", "arr", ")", "==", "0", ":", "del", "self", ".", "_connects", "[", "topic", "]" ]
26.533333
13.2
def _h_function(self,h): """ private method for the gaussian variogram "h" function Parameters ---------- h : (float or numpy.ndarray) distance(s) Returns ------- h_function : float or numpy.ndarray the value of the "h" function implied by the GauVario """ hh = -1.0 * (h * h) / (self.a * self.a) return self.contribution * np.exp(hh)
[ "def", "_h_function", "(", "self", ",", "h", ")", ":", "hh", "=", "-", "1.0", "*", "(", "h", "*", "h", ")", "/", "(", "self", ".", "a", "*", "self", ".", "a", ")", "return", "self", ".", "contribution", "*", "np", ".", "exp", "(", "hh", ")" ]
25.058824
19.470588
def get_argument_parser(name=None, **kwargs): """Returns the global ArgumentParser instance with the given name. The 1st time this function is called, a new ArgumentParser instance will be created for the given name, and any args other than "name" will be passed on to the ArgumentParser constructor. """ if name is None: name = "default" if len(kwargs) > 0 or name not in _parsers: init_argument_parser(name, **kwargs) return _parsers[name]
[ "def", "get_argument_parser", "(", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "name", "is", "None", ":", "name", "=", "\"default\"", "if", "len", "(", "kwargs", ")", ">", "0", "or", "name", "not", "in", "_parsers", ":", "init_argument_parser", "(", "name", ",", "*", "*", "kwargs", ")", "return", "_parsers", "[", "name", "]" ]
36.846154
18
def alive(self): '''Is this component alive?''' with self._mutex: if self.exec_contexts: for ec in self.exec_contexts: if self._obj.is_alive(ec): return True return False
[ "def", "alive", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "exec_contexts", ":", "for", "ec", "in", "self", ".", "exec_contexts", ":", "if", "self", ".", "_obj", ".", "is_alive", "(", "ec", ")", ":", "return", "True", "return", "False" ]
32.375
10.375
def from_json(cls, input_json: str) -> 'NistBeaconValue': """ Convert a string of JSON which represents a NIST randomness beacon value into a 'NistBeaconValue' object. :param input_json: JSON to build a 'Nist RandomnessBeaconValue' from :return: A 'NistBeaconValue' object, 'None' otherwise """ try: data_dict = json.loads(input_json) except ValueError: return None # Our required values are "must haves". This makes it simple # to verify we loaded everything out of JSON correctly. required_values = { cls._KEY_FREQUENCY: None, cls._KEY_OUTPUT_VALUE: None, cls._KEY_PREVIOUS_OUTPUT_VALUE: None, cls._KEY_SEED_VALUE: None, cls._KEY_SIGNATURE_VALUE: None, cls._KEY_STATUS_CODE: None, cls._KEY_TIMESTAMP: None, cls._KEY_VERSION: None, } for key in required_values: if key in data_dict: required_values[key] = data_dict[key] # Confirm that the required values are set, and not 'None' if None in required_values.values(): return None # We have all the required values, return a node object return cls( version=required_values[cls._KEY_VERSION], frequency=int(required_values[cls._KEY_FREQUENCY]), timestamp=int(required_values[cls._KEY_TIMESTAMP]), seed_value=required_values[cls._KEY_SEED_VALUE], previous_output_value=required_values[ cls._KEY_PREVIOUS_OUTPUT_VALUE ], signature_value=required_values[cls._KEY_SIGNATURE_VALUE], output_value=required_values[cls._KEY_OUTPUT_VALUE], status_code=required_values[cls._KEY_STATUS_CODE], )
[ "def", "from_json", "(", "cls", ",", "input_json", ":", "str", ")", "->", "'NistBeaconValue'", ":", "try", ":", "data_dict", "=", "json", ".", "loads", "(", "input_json", ")", "except", "ValueError", ":", "return", "None", "# Our required values are \"must haves\". This makes it simple", "# to verify we loaded everything out of JSON correctly.", "required_values", "=", "{", "cls", ".", "_KEY_FREQUENCY", ":", "None", ",", "cls", ".", "_KEY_OUTPUT_VALUE", ":", "None", ",", "cls", ".", "_KEY_PREVIOUS_OUTPUT_VALUE", ":", "None", ",", "cls", ".", "_KEY_SEED_VALUE", ":", "None", ",", "cls", ".", "_KEY_SIGNATURE_VALUE", ":", "None", ",", "cls", ".", "_KEY_STATUS_CODE", ":", "None", ",", "cls", ".", "_KEY_TIMESTAMP", ":", "None", ",", "cls", ".", "_KEY_VERSION", ":", "None", ",", "}", "for", "key", "in", "required_values", ":", "if", "key", "in", "data_dict", ":", "required_values", "[", "key", "]", "=", "data_dict", "[", "key", "]", "# Confirm that the required values are set, and not 'None'", "if", "None", "in", "required_values", ".", "values", "(", ")", ":", "return", "None", "# We have all the required values, return a node object", "return", "cls", "(", "version", "=", "required_values", "[", "cls", ".", "_KEY_VERSION", "]", ",", "frequency", "=", "int", "(", "required_values", "[", "cls", ".", "_KEY_FREQUENCY", "]", ")", ",", "timestamp", "=", "int", "(", "required_values", "[", "cls", ".", "_KEY_TIMESTAMP", "]", ")", ",", "seed_value", "=", "required_values", "[", "cls", ".", "_KEY_SEED_VALUE", "]", ",", "previous_output_value", "=", "required_values", "[", "cls", ".", "_KEY_PREVIOUS_OUTPUT_VALUE", "]", ",", "signature_value", "=", "required_values", "[", "cls", ".", "_KEY_SIGNATURE_VALUE", "]", ",", "output_value", "=", "required_values", "[", "cls", ".", "_KEY_OUTPUT_VALUE", "]", ",", "status_code", "=", "required_values", "[", "cls", ".", "_KEY_STATUS_CODE", "]", ",", ")" ]
37.875
18.458333
def sys_path(self): """ The system path inside the environment :return: The :data:`sys.path` from the environment :rtype: list """ from .vendor.vistir.compat import JSONDecodeError current_executable = vistir.compat.Path(sys.executable).as_posix() if not self.python or self.python == current_executable: return sys.path elif any([sys.prefix == self.prefix, not self.is_venv]): return sys.path cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"] path, _ = vistir.misc.run(cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False) try: path = json.loads(path.strip()) except JSONDecodeError: path = sys.path return path
[ "def", "sys_path", "(", "self", ")", ":", "from", ".", "vendor", ".", "vistir", ".", "compat", "import", "JSONDecodeError", "current_executable", "=", "vistir", ".", "compat", ".", "Path", "(", "sys", ".", "executable", ")", ".", "as_posix", "(", ")", "if", "not", "self", ".", "python", "or", "self", ".", "python", "==", "current_executable", ":", "return", "sys", ".", "path", "elif", "any", "(", "[", "sys", ".", "prefix", "==", "self", ".", "prefix", ",", "not", "self", ".", "is_venv", "]", ")", ":", "return", "sys", ".", "path", "cmd_args", "=", "[", "self", ".", "python", ",", "\"-c\"", ",", "\"import json, sys; print(json.dumps(sys.path))\"", "]", "path", ",", "_", "=", "vistir", ".", "misc", ".", "run", "(", "cmd_args", ",", "return_object", "=", "False", ",", "nospin", "=", "True", ",", "block", "=", "True", ",", "combine_stderr", "=", "False", ",", "write_to_stdout", "=", "False", ")", "try", ":", "path", "=", "json", ".", "loads", "(", "path", ".", "strip", "(", ")", ")", "except", "JSONDecodeError", ":", "path", "=", "sys", ".", "path", "return", "path" ]
39.571429
23.095238
def threshold_monitor_hidden_threshold_monitor_sfp_pause(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") sfp = ET.SubElement(threshold_monitor, "sfp") pause = ET.SubElement(sfp, "pause") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_sfp_pause", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"threshold-monitor-hidden\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-threshold-monitor\"", ")", "threshold_monitor", "=", "ET", ".", "SubElement", "(", "threshold_monitor_hidden", ",", "\"threshold-monitor\"", ")", "sfp", "=", "ET", ".", "SubElement", "(", "threshold_monitor", ",", "\"sfp\"", ")", "pause", "=", "ET", ".", "SubElement", "(", "sfp", ",", "\"pause\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
51.181818
24.181818
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None): ''' Get a value (and proof optionally)for the given path in state trie. Does not return the proof is there is no aggregate signature for it. :param path: the path generate a state proof for :param head_hash: the root to create the proof against :param get_value: whether to return the value :return: a state proof or None ''' root_hash = head_hash if head_hash else self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) if not with_proof: return self.state.get_for_root_hash(root_hash, path), None if not multi_sig: # Just return the value and not proof try: return self.state.get_for_root_hash(root_hash, path), None except KeyError: return None, None else: try: proof, value = self.state.generate_state_proof(key=path, root=self.state.get_head_by_hash(root_hash), serialize=True, get_value=True) value = self.state.get_decoded(value) if value else value encoded_proof = proof_nodes_serializer.serialize(proof) proof = { ROOT_HASH: encoded_root_hash, MULTI_SIGNATURE: multi_sig.as_dict(), PROOF_NODES: encoded_proof } return value, proof except KeyError: return None, None
[ "def", "get_value_from_state", "(", "self", ",", "path", ",", "head_hash", "=", "None", ",", "with_proof", "=", "False", ",", "multi_sig", "=", "None", ")", ":", "root_hash", "=", "head_hash", "if", "head_hash", "else", "self", ".", "state", ".", "committedHeadHash", "encoded_root_hash", "=", "state_roots_serializer", ".", "serialize", "(", "bytes", "(", "root_hash", ")", ")", "if", "not", "with_proof", ":", "return", "self", ".", "state", ".", "get_for_root_hash", "(", "root_hash", ",", "path", ")", ",", "None", "if", "not", "multi_sig", ":", "# Just return the value and not proof", "try", ":", "return", "self", ".", "state", ".", "get_for_root_hash", "(", "root_hash", ",", "path", ")", ",", "None", "except", "KeyError", ":", "return", "None", ",", "None", "else", ":", "try", ":", "proof", ",", "value", "=", "self", ".", "state", ".", "generate_state_proof", "(", "key", "=", "path", ",", "root", "=", "self", ".", "state", ".", "get_head_by_hash", "(", "root_hash", ")", ",", "serialize", "=", "True", ",", "get_value", "=", "True", ")", "value", "=", "self", ".", "state", ".", "get_decoded", "(", "value", ")", "if", "value", "else", "value", "encoded_proof", "=", "proof_nodes_serializer", ".", "serialize", "(", "proof", ")", "proof", "=", "{", "ROOT_HASH", ":", "encoded_root_hash", ",", "MULTI_SIGNATURE", ":", "multi_sig", ".", "as_dict", "(", ")", ",", "PROOF_NODES", ":", "encoded_proof", "}", "return", "value", ",", "proof", "except", "KeyError", ":", "return", "None", ",", "None" ]
47.081081
24.648649
def is_header(self): """ Whether or not the cell is a header Any header cell will have "=" instead of "-" on its border. For example, this is a header cell:: +-----+ | foo | +=====+ while this cell is not:: +-----+ | foo | +-----+ Returns ------- bool Whether or not the cell is a header """ bottom_line = self.text.split('\n')[-1] if is_only(bottom_line, ['+', '=']): return True return False
[ "def", "is_header", "(", "self", ")", ":", "bottom_line", "=", "self", ".", "text", ".", "split", "(", "'\\n'", ")", "[", "-", "1", "]", "if", "is_only", "(", "bottom_line", ",", "[", "'+'", ",", "'='", "]", ")", ":", "return", "True", "return", "False" ]
19.482759
22.103448
def default_classification_value_maps(classification): """Helper to get default value maps from classification. :param classification: Classification definition. :type classification: dict :returns: Dictionary with key = the class key and value = default strings. :rtype: dict """ value_maps = {} for hazard_class in classification['classes']: value_maps[hazard_class['key']] = hazard_class.get( 'string_defaults', []) return value_maps
[ "def", "default_classification_value_maps", "(", "classification", ")", ":", "value_maps", "=", "{", "}", "for", "hazard_class", "in", "classification", "[", "'classes'", "]", ":", "value_maps", "[", "hazard_class", "[", "'key'", "]", "]", "=", "hazard_class", ".", "get", "(", "'string_defaults'", ",", "[", "]", ")", "return", "value_maps" ]
32.066667
19.6
def _parse_triggered_hits(self, file_obj): """Parse and store triggered hits.""" for _ in range(self.n_triggered_hits): dom_id, pmt_id = unpack('<ib', file_obj.read(5)) tdc_time = unpack('>I', file_obj.read(4))[0] tot = unpack('<b', file_obj.read(1))[0] trigger_mask = unpack('<Q', file_obj.read(8)) self.triggered_hits.append( (dom_id, pmt_id, tdc_time, tot, trigger_mask) )
[ "def", "_parse_triggered_hits", "(", "self", ",", "file_obj", ")", ":", "for", "_", "in", "range", "(", "self", ".", "n_triggered_hits", ")", ":", "dom_id", ",", "pmt_id", "=", "unpack", "(", "'<ib'", ",", "file_obj", ".", "read", "(", "5", ")", ")", "tdc_time", "=", "unpack", "(", "'>I'", ",", "file_obj", ".", "read", "(", "4", ")", ")", "[", "0", "]", "tot", "=", "unpack", "(", "'<b'", ",", "file_obj", ".", "read", "(", "1", ")", ")", "[", "0", "]", "trigger_mask", "=", "unpack", "(", "'<Q'", ",", "file_obj", ".", "read", "(", "8", ")", ")", "self", ".", "triggered_hits", ".", "append", "(", "(", "dom_id", ",", "pmt_id", ",", "tdc_time", ",", "tot", ",", "trigger_mask", ")", ")" ]
47
12.1
def _robust_rmtree(path, logger=None, max_retries=5): """Try to delete paths robustly . Retries several times (with increasing delays) if an OSError occurs. If the final attempt fails, the Exception is propagated to the caller. Taken from https://github.com/hashdist/hashdist/pull/116 """ for i in range(max_retries): try: shutil.rmtree(path) return except OSError as e: if logger: info('Unable to remove path: %s' % path) info('Retrying after %d seconds' % i) time.sleep(i) # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path)
[ "def", "_robust_rmtree", "(", "path", ",", "logger", "=", "None", ",", "max_retries", "=", "5", ")", ":", "for", "i", "in", "range", "(", "max_retries", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "path", ")", "return", "except", "OSError", "as", "e", ":", "if", "logger", ":", "info", "(", "'Unable to remove path: %s'", "%", "path", ")", "info", "(", "'Retrying after %d seconds'", "%", "i", ")", "time", ".", "sleep", "(", "i", ")", "# Final attempt, pass any Exceptions up to caller.", "shutil", ".", "rmtree", "(", "path", ")" ]
34.631579
18.526316
def read_value(self): """Reads the raw red, green, blue and clear channel values. Will return a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit numbers). """ while not self._valid(): time.sleep((self._integration_time + 0.9)/1000.0) # Read each color register. r = self._readU16LE(TCS34725_RDATAL) g = self._readU16LE(TCS34725_GDATAL) b = self._readU16LE(TCS34725_BDATAL) c = self._readU16LE(TCS34725_CDATAL) # Delay for the integration time to allow for next reading immediately. red = int(pow((int((r/c) * 256) / 255), 2.5) * 255) green = int(pow((int((g/c) * 256) / 255), 2.5) * 255) blue = int(pow((int((b/c) * 256) / 255), 2.5) * 255) return [r, g, b]
[ "def", "read_value", "(", "self", ")", ":", "while", "not", "self", ".", "_valid", "(", ")", ":", "time", ".", "sleep", "(", "(", "self", ".", "_integration_time", "+", "0.9", ")", "/", "1000.0", ")", "# Read each color register.", "r", "=", "self", ".", "_readU16LE", "(", "TCS34725_RDATAL", ")", "g", "=", "self", ".", "_readU16LE", "(", "TCS34725_GDATAL", ")", "b", "=", "self", ".", "_readU16LE", "(", "TCS34725_BDATAL", ")", "c", "=", "self", ".", "_readU16LE", "(", "TCS34725_CDATAL", ")", "# Delay for the integration time to allow for next reading immediately.", "red", "=", "int", "(", "pow", "(", "(", "int", "(", "(", "r", "/", "c", ")", "*", "256", ")", "/", "255", ")", ",", "2.5", ")", "*", "255", ")", "green", "=", "int", "(", "pow", "(", "(", "int", "(", "(", "g", "/", "c", ")", "*", "256", ")", "/", "255", ")", ",", "2.5", ")", "*", "255", ")", "blue", "=", "int", "(", "pow", "(", "(", "int", "(", "(", "b", "/", "c", ")", "*", "256", ")", "/", "255", ")", ",", "2.5", ")", "*", "255", ")", "return", "[", "r", ",", "g", ",", "b", "]" ]
42
17.368421
def prepare_destruction(self, recursive=True): """Prepares the model for destruction Recursively un-registers all observers and removes references to child models """ if self.state is None: logger.verbose("Multiple calls of prepare destruction for {0}".format(self)) self.destruction_signal.emit() try: self.unregister_observer(self) except KeyError: # Might happen if the observer was already unregistered logger.verbose("Observer already unregistered!") pass if recursive: if self.income: self.income.prepare_destruction() for port in self.input_data_ports[:] + self.output_data_ports[:] + self.outcomes[:]: port.prepare_destruction() del self.input_data_ports[:] del self.output_data_ports[:] del self.outcomes[:] self.state = None self.input_data_ports = None self.output_data_ports = None self.income = None self.outcomes = None # History TODO: these are needed by the modification history # self.action_signal = None # self.meta_signal = None # self.destruction_signal = None self.observe = None super(AbstractStateModel, self).prepare_destruction()
[ "def", "prepare_destruction", "(", "self", ",", "recursive", "=", "True", ")", ":", "if", "self", ".", "state", "is", "None", ":", "logger", ".", "verbose", "(", "\"Multiple calls of prepare destruction for {0}\"", ".", "format", "(", "self", ")", ")", "self", ".", "destruction_signal", ".", "emit", "(", ")", "try", ":", "self", ".", "unregister_observer", "(", "self", ")", "except", "KeyError", ":", "# Might happen if the observer was already unregistered", "logger", ".", "verbose", "(", "\"Observer already unregistered!\"", ")", "pass", "if", "recursive", ":", "if", "self", ".", "income", ":", "self", ".", "income", ".", "prepare_destruction", "(", ")", "for", "port", "in", "self", ".", "input_data_ports", "[", ":", "]", "+", "self", ".", "output_data_ports", "[", ":", "]", "+", "self", ".", "outcomes", "[", ":", "]", ":", "port", ".", "prepare_destruction", "(", ")", "del", "self", ".", "input_data_ports", "[", ":", "]", "del", "self", ".", "output_data_ports", "[", ":", "]", "del", "self", ".", "outcomes", "[", ":", "]", "self", ".", "state", "=", "None", "self", ".", "input_data_ports", "=", "None", "self", ".", "output_data_ports", "=", "None", "self", ".", "income", "=", "None", "self", ".", "outcomes", "=", "None", "# History TODO: these are needed by the modification history", "# self.action_signal = None", "# self.meta_signal = None", "# self.destruction_signal = None", "self", ".", "observe", "=", "None", "super", "(", "AbstractStateModel", ",", "self", ")", ".", "prepare_destruction", "(", ")" ]
40.8125
15.8125
def timetopythonvalue(time_val): "Convert a time or time range from ArcGIS REST server format to Python" if isinstance(time_val, sequence): return map(timetopythonvalue, time_val) elif isinstance(time_val, numeric): return datetime.datetime(*(time.gmtime(time_val))[:6]) elif isinstance(time_val, numeric): values = [] try: values = map(long, time_val.split(",")) except: pass if values: return map(timetopythonvalue, values) raise ValueError(repr(time_val))
[ "def", "timetopythonvalue", "(", "time_val", ")", ":", "if", "isinstance", "(", "time_val", ",", "sequence", ")", ":", "return", "map", "(", "timetopythonvalue", ",", "time_val", ")", "elif", "isinstance", "(", "time_val", ",", "numeric", ")", ":", "return", "datetime", ".", "datetime", "(", "*", "(", "time", ".", "gmtime", "(", "time_val", ")", ")", "[", ":", "6", "]", ")", "elif", "isinstance", "(", "time_val", ",", "numeric", ")", ":", "values", "=", "[", "]", "try", ":", "values", "=", "map", "(", "long", ",", "time_val", ".", "split", "(", "\",\"", ")", ")", "except", ":", "pass", "if", "values", ":", "return", "map", "(", "timetopythonvalue", ",", "values", ")", "raise", "ValueError", "(", "repr", "(", "time_val", ")", ")" ]
36.533333
14.666667
def all(cls, connection=None, **params): """ Returns first page if no params passed in as a list. """ request = cls._make_request('GET', cls._get_all_path(), connection, params=params) return cls._create_object(request, connection=connection)
[ "def", "all", "(", "cls", ",", "connection", "=", "None", ",", "*", "*", "params", ")", ":", "request", "=", "cls", ".", "_make_request", "(", "'GET'", ",", "cls", ".", "_get_all_path", "(", ")", ",", "connection", ",", "params", "=", "params", ")", "return", "cls", ".", "_create_object", "(", "request", ",", "connection", "=", "connection", ")" ]
40.142857
19.857143
def update_by_token(self, token, **kwargs): """ Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements """ _sid = self.handler.sid(token) return self.update(_sid, **kwargs)
[ "def", "update_by_token", "(", "self", ",", "token", ",", "*", "*", "kwargs", ")", ":", "_sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "return", "self", ".", "update", "(", "_sid", ",", "*", "*", "kwargs", ")" ]
34.777778
10.555556
def get_all_project_owners(project_ids=None, **kwargs): """ Get the project owner entries for all the requested projects. If the project_ids argument is None, return all the owner entries for ALL projects """ projowner_qry = db.DBSession.query(ProjectOwner) if project_ids is not None: projowner_qry = projowner_qry.filter(ProjectOwner.project_id.in_(project_ids)) project_owners_i = projowner_qry.all() return [JSONObject(project_owner_i) for project_owner_i in project_owners_i]
[ "def", "get_all_project_owners", "(", "project_ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "projowner_qry", "=", "db", ".", "DBSession", ".", "query", "(", "ProjectOwner", ")", "if", "project_ids", "is", "not", "None", ":", "projowner_qry", "=", "projowner_qry", ".", "filter", "(", "ProjectOwner", ".", "project_id", ".", "in_", "(", "project_ids", ")", ")", "project_owners_i", "=", "projowner_qry", ".", "all", "(", ")", "return", "[", "JSONObject", "(", "project_owner_i", ")", "for", "project_owner_i", "in", "project_owners_i", "]" ]
32.8125
25.0625
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE): """ Receives a diverted packet that matched the filter. The remapped function is WinDivertRecv:: BOOL WinDivertRecv( __in HANDLE handle, __out PVOID pPacket, __in UINT packetLen, __out_opt PWINDIVERT_ADDRESS pAddr, __out_opt UINT *recvLen ); For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv :return: The return value is a `pydivert.Packet`. """ if self._handle is None: raise RuntimeError("WinDivert handle is not open") packet = bytearray(bufsize) packet_ = (c_char * bufsize).from_buffer(packet) address = windivert_dll.WinDivertAddress() recv_len = c_uint(0) windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len)) return Packet( memoryview(packet)[:recv_len.value], (address.IfIdx, address.SubIfIdx), Direction(address.Direction) )
[ "def", "recv", "(", "self", ",", "bufsize", "=", "DEFAULT_PACKET_BUFFER_SIZE", ")", ":", "if", "self", ".", "_handle", "is", "None", ":", "raise", "RuntimeError", "(", "\"WinDivert handle is not open\"", ")", "packet", "=", "bytearray", "(", "bufsize", ")", "packet_", "=", "(", "c_char", "*", "bufsize", ")", ".", "from_buffer", "(", "packet", ")", "address", "=", "windivert_dll", ".", "WinDivertAddress", "(", ")", "recv_len", "=", "c_uint", "(", "0", ")", "windivert_dll", ".", "WinDivertRecv", "(", "self", ".", "_handle", ",", "packet_", ",", "bufsize", ",", "byref", "(", "address", ")", ",", "byref", "(", "recv_len", ")", ")", "return", "Packet", "(", "memoryview", "(", "packet", ")", "[", ":", "recv_len", ".", "value", "]", ",", "(", "address", ".", "IfIdx", ",", "address", ".", "SubIfIdx", ")", ",", "Direction", "(", "address", ".", "Direction", ")", ")" ]
35.451613
18.16129
def _get_register_specs(bit_labels): """Get the number and size of unique registers from bit_labels list. Args: bit_labels (list): this list is of the form:: [['reg1', 0], ['reg1', 1], ['reg2', 0]] which indicates a register named "reg1" of size 2 and a register named "reg2" of size 1. This is the format of classic and quantum bit labels in qobj header. Yields: tuple: iterator of register_name:size pairs. """ it = itertools.groupby(bit_labels, operator.itemgetter(0)) for register_name, sub_it in it: yield register_name, max(ind[1] for ind in sub_it) + 1
[ "def", "_get_register_specs", "(", "bit_labels", ")", ":", "it", "=", "itertools", ".", "groupby", "(", "bit_labels", ",", "operator", ".", "itemgetter", "(", "0", ")", ")", "for", "register_name", ",", "sub_it", "in", "it", ":", "yield", "register_name", ",", "max", "(", "ind", "[", "1", "]", "for", "ind", "in", "sub_it", ")", "+", "1" ]
34.368421
20.631579
def addr2line(self, addrq): ''' Get the line number for a given bytecode offset Analogous to PyCode_Addr2Line; translated from pseudocode in Objects/lnotab_notes.txt ''' co_lnotab = self.pyop_field('co_lnotab').proxyval(set()) # Initialize lineno to co_firstlineno as per PyCode_Addr2Line # not 0, as lnotab_notes.txt has it: lineno = int_from_int(self.field('co_firstlineno')) addr = 0 for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]): addr += ord(addr_incr) if addr > addrq: return lineno lineno += ord(line_incr) return lineno
[ "def", "addr2line", "(", "self", ",", "addrq", ")", ":", "co_lnotab", "=", "self", ".", "pyop_field", "(", "'co_lnotab'", ")", ".", "proxyval", "(", "set", "(", ")", ")", "# Initialize lineno to co_firstlineno as per PyCode_Addr2Line", "# not 0, as lnotab_notes.txt has it:", "lineno", "=", "int_from_int", "(", "self", ".", "field", "(", "'co_firstlineno'", ")", ")", "addr", "=", "0", "for", "addr_incr", ",", "line_incr", "in", "zip", "(", "co_lnotab", "[", ":", ":", "2", "]", ",", "co_lnotab", "[", "1", ":", ":", "2", "]", ")", ":", "addr", "+=", "ord", "(", "addr_incr", ")", "if", "addr", ">", "addrq", ":", "return", "lineno", "lineno", "+=", "ord", "(", "line_incr", ")", "return", "lineno" ]
33.85
21.35
def parse(self, nodes): """Given a stream of node data, try to parse the nodes according to the machine's graph.""" self.last_node_type = self.initial_node_type for node_number, node in enumerate(nodes): try: self.step(node) except Exception as ex: raise Exception("An error occurred on node {}".format(node_number)) from ex
[ "def", "parse", "(", "self", ",", "nodes", ")", ":", "self", ".", "last_node_type", "=", "self", ".", "initial_node_type", "for", "node_number", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "try", ":", "self", ".", "step", "(", "node", ")", "except", "Exception", "as", "ex", ":", "raise", "Exception", "(", "\"An error occurred on node {}\"", ".", "format", "(", "node_number", ")", ")", "from", "ex" ]
44.625
16.5
def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = '' for c in self.contracts: txt += "\nContract %s\n"%c.name table = PrettyTable(['Variable', 'Dependencies']) for v in c.state_variables: table.add_row([v.name, _get(v, c)]) txt += str(table) txt += "\n" for f in c.functions_and_modifiers_not_inherited: txt += "\nFunction %s\n"%f.full_name table = PrettyTable(['Variable', 'Dependencies']) for v in f.variables: table.add_row([v.name, _get(v, f)]) for v in c.state_variables: table.add_row([v.canonical_name, _get(v, f)]) txt += str(table) self.info(txt)
[ "def", "output", "(", "self", ",", "_filename", ")", ":", "txt", "=", "''", "for", "c", "in", "self", ".", "contracts", ":", "txt", "+=", "\"\\nContract %s\\n\"", "%", "c", ".", "name", "table", "=", "PrettyTable", "(", "[", "'Variable'", ",", "'Dependencies'", "]", ")", "for", "v", "in", "c", ".", "state_variables", ":", "table", ".", "add_row", "(", "[", "v", ".", "name", ",", "_get", "(", "v", ",", "c", ")", "]", ")", "txt", "+=", "str", "(", "table", ")", "txt", "+=", "\"\\n\"", "for", "f", "in", "c", ".", "functions_and_modifiers_not_inherited", ":", "txt", "+=", "\"\\nFunction %s\\n\"", "%", "f", ".", "full_name", "table", "=", "PrettyTable", "(", "[", "'Variable'", ",", "'Dependencies'", "]", ")", "for", "v", "in", "f", ".", "variables", ":", "table", ".", "add_row", "(", "[", "v", ".", "name", ",", "_get", "(", "v", ",", "f", ")", "]", ")", "for", "v", "in", "c", ".", "state_variables", ":", "table", ".", "add_row", "(", "[", "v", ".", "canonical_name", ",", "_get", "(", "v", ",", "f", ")", "]", ")", "txt", "+=", "str", "(", "table", ")", "self", ".", "info", "(", "txt", ")" ]
33.230769
15
def create_collection(self, collection_name, database_name=None): """ Creates a new collection in the CosmosDB database. """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") # We need to check to see if this container already exists so we don't try # to create it twice existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) # Only create if we did not find it already existing if len(existing_container) == 0: self.get_conn().CreateContainer( get_database_link(self.__get_database_name(database_name)), {"id": collection_name})
[ "def", "create_collection", "(", "self", ",", "collection_name", ",", "database_name", "=", "None", ")", ":", "if", "collection_name", "is", "None", ":", "raise", "AirflowBadRequest", "(", "\"Collection name cannot be None.\"", ")", "# We need to check to see if this container already exists so we don't try", "# to create it twice", "existing_container", "=", "list", "(", "self", ".", "get_conn", "(", ")", ".", "QueryContainers", "(", "get_database_link", "(", "self", ".", "__get_database_name", "(", "database_name", ")", ")", ",", "{", "\"query\"", ":", "\"SELECT * FROM r WHERE r.id=@id\"", ",", "\"parameters\"", ":", "[", "{", "\"name\"", ":", "\"@id\"", ",", "\"value\"", ":", "collection_name", "}", "]", "}", ")", ")", "# Only create if we did not find it already existing", "if", "len", "(", "existing_container", ")", "==", "0", ":", "self", ".", "get_conn", "(", ")", ".", "CreateContainer", "(", "get_database_link", "(", "self", ".", "__get_database_name", "(", "database_name", ")", ")", ",", "{", "\"id\"", ":", "collection_name", "}", ")" ]
42.727273
19.363636
def fetch(self): """ Fetch a UserChannelInstance :returns: Fetched UserChannelInstance :rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return UserChannelInstance( self._version, payload, service_sid=self._solution['service_sid'], user_sid=self._solution['user_sid'], channel_sid=self._solution['channel_sid'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "UserChannelInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "user_sid", "=", "self", ".", "_solution", "[", "'user_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'channel_sid'", "]", ",", ")" ]
26.727273
18.090909
def set(self, language: str, value: str): """Sets the value in the specified language. Arguments: language: The language to set the value in. value: The value to set. """ self[language] = value self.__dict__.update(self) return self
[ "def", "set", "(", "self", ",", "language", ":", "str", ",", "value", ":", "str", ")", ":", "self", "[", "language", "]", "=", "value", "self", ".", "__dict__", ".", "update", "(", "self", ")", "return", "self" ]
23.285714
16.928571
def get_subscriber_model(): """ Attempt to pull settings.DJSTRIPE_SUBSCRIBER_MODEL. Users have the option of specifying a custom subscriber model via the DJSTRIPE_SUBSCRIBER_MODEL setting. This methods falls back to AUTH_USER_MODEL if DJSTRIPE_SUBSCRIBER_MODEL is not set. Returns the subscriber model that is active in this project. """ model_name = get_subscriber_model_string() # Attempt a Django 1.7 app lookup try: subscriber_model = django_apps.get_model(model_name) except ValueError: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL must be of the form 'app_label.model_name'." ) except LookupError: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL refers to model '{model}' " "that has not been installed.".format(model=model_name) ) if ( "email" not in [field_.name for field_ in subscriber_model._meta.get_fields()] ) and not hasattr(subscriber_model, "email"): raise ImproperlyConfigured("DJSTRIPE_SUBSCRIBER_MODEL must have an email attribute.") if model_name != settings.AUTH_USER_MODEL: # Custom user model detected. Make sure the callback is configured. func = get_callback_function("DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK") if not func: raise ImproperlyConfigured( "DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK must be implemented " "if a DJSTRIPE_SUBSCRIBER_MODEL is defined." ) return subscriber_model
[ "def", "get_subscriber_model", "(", ")", ":", "model_name", "=", "get_subscriber_model_string", "(", ")", "# Attempt a Django 1.7 app lookup", "try", ":", "subscriber_model", "=", "django_apps", ".", "get_model", "(", "model_name", ")", "except", "ValueError", ":", "raise", "ImproperlyConfigured", "(", "\"DJSTRIPE_SUBSCRIBER_MODEL must be of the form 'app_label.model_name'.\"", ")", "except", "LookupError", ":", "raise", "ImproperlyConfigured", "(", "\"DJSTRIPE_SUBSCRIBER_MODEL refers to model '{model}' \"", "\"that has not been installed.\"", ".", "format", "(", "model", "=", "model_name", ")", ")", "if", "(", "\"email\"", "not", "in", "[", "field_", ".", "name", "for", "field_", "in", "subscriber_model", ".", "_meta", ".", "get_fields", "(", ")", "]", ")", "and", "not", "hasattr", "(", "subscriber_model", ",", "\"email\"", ")", ":", "raise", "ImproperlyConfigured", "(", "\"DJSTRIPE_SUBSCRIBER_MODEL must have an email attribute.\"", ")", "if", "model_name", "!=", "settings", ".", "AUTH_USER_MODEL", ":", "# Custom user model detected. Make sure the callback is configured.", "func", "=", "get_callback_function", "(", "\"DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK\"", ")", "if", "not", "func", ":", "raise", "ImproperlyConfigured", "(", "\"DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK must be implemented \"", "\"if a DJSTRIPE_SUBSCRIBER_MODEL is defined.\"", ")", "return", "subscriber_model" ]
33.170732
24.146341
def _get_userprofile_from_registry(user, sid): ''' In case net user doesn't return the userprofile we can get it from the registry Args: user (str): The user name, used in debug message sid (str): The sid to lookup in the registry Returns: str: Profile directory ''' profile_dir = __utils__['reg.read_value']( 'HKEY_LOCAL_MACHINE', 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\{0}'.format(sid), 'ProfileImagePath' )['vdata'] log.debug( 'user %s with sid=%s profile is located at "%s"', user, sid, profile_dir ) return profile_dir
[ "def", "_get_userprofile_from_registry", "(", "user", ",", "sid", ")", ":", "profile_dir", "=", "__utils__", "[", "'reg.read_value'", "]", "(", "'HKEY_LOCAL_MACHINE'", ",", "'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\ProfileList\\\\{0}'", ".", "format", "(", "sid", ")", ",", "'ProfileImagePath'", ")", "[", "'vdata'", "]", "log", ".", "debug", "(", "'user %s with sid=%s profile is located at \"%s\"'", ",", "user", ",", "sid", ",", "profile_dir", ")", "return", "profile_dir" ]
27.652174
24.434783
def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ self._parent._check_closing() with self._parent._all_tasks_done: if self._parent._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._parent._unfinished_tasks -= 1 if self._parent._unfinished_tasks == 0: self._parent._finished.set() self._parent._all_tasks_done.notify_all()
[ "def", "task_done", "(", "self", ")", ":", "self", ".", "_parent", ".", "_check_closing", "(", ")", "with", "self", ".", "_parent", ".", "_all_tasks_done", ":", "if", "self", ".", "_parent", ".", "_unfinished_tasks", "<=", "0", ":", "raise", "ValueError", "(", "'task_done() called too many times'", ")", "self", ".", "_parent", ".", "_unfinished_tasks", "-=", "1", "if", "self", ".", "_parent", ".", "_unfinished_tasks", "==", "0", ":", "self", ".", "_parent", ".", "_finished", ".", "set", "(", ")", "self", ".", "_parent", ".", "_all_tasks_done", ".", "notify_all", "(", ")" ]
44
20.045455
def _make_argparser(self): """Makes a new argument parser.""" self.argparser = ShellArgumentParser(prog='') subparsers = self.argparser.add_subparsers() for name in self.get_names(): if name.startswith('parser_'): parser = subparsers.add_parser(name[7:]) parser.set_defaults(func=getattr(self, 'arg_' + name[7:])) getattr(self, name)(parser) self.argparser_completer = None try: import argcomplete except ImportError: pass else: os.environ.setdefault("_ARGCOMPLETE_COMP_WORDBREAKS", " \t\"'") self.argparser_completer = argcomplete.CompletionFinder(self.argparser)
[ "def", "_make_argparser", "(", "self", ")", ":", "self", ".", "argparser", "=", "ShellArgumentParser", "(", "prog", "=", "''", ")", "subparsers", "=", "self", ".", "argparser", ".", "add_subparsers", "(", ")", "for", "name", "in", "self", ".", "get_names", "(", ")", ":", "if", "name", ".", "startswith", "(", "'parser_'", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "name", "[", "7", ":", "]", ")", "parser", ".", "set_defaults", "(", "func", "=", "getattr", "(", "self", ",", "'arg_'", "+", "name", "[", "7", ":", "]", ")", ")", "getattr", "(", "self", ",", "name", ")", "(", "parser", ")", "self", ".", "argparser_completer", "=", "None", "try", ":", "import", "argcomplete", "except", "ImportError", ":", "pass", "else", ":", "os", ".", "environ", ".", "setdefault", "(", "\"_ARGCOMPLETE_COMP_WORDBREAKS\"", ",", "\" \\t\\\"'\"", ")", "self", ".", "argparser_completer", "=", "argcomplete", ".", "CompletionFinder", "(", "self", ".", "argparser", ")" ]
36
19.9
def build_full_day_ips(query, period_start, period_end): """Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that have not been deallocated ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(models.IPAddress.allocated_at != null()).\ filter(models.IPAddress.allocated_at < period_start).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
[ "def", "build_full_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", ":", "# Filter out only IPv4 that have not been deallocated", "ip_list", "=", "query", ".", "filter", "(", "models", ".", "IPAddress", ".", "version", "==", "4L", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "network_id", "==", "PUBLIC_NETWORK_ID", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "used_by_tenant_id", "is", "not", "None", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "allocated_at", "!=", "null", "(", ")", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "allocated_at", "<", "period_start", ")", ".", "filter", "(", "or_", "(", "models", ".", "IPAddress", ".", "_deallocated", "is", "False", ",", "models", ".", "IPAddress", ".", "deallocated_at", "==", "null", "(", ")", ",", "models", ".", "IPAddress", ".", "deallocated_at", ">=", "period_end", ")", ")", ".", "all", "(", ")", "return", "ip_list" ]
44.631579
18.684211
def getDateReceived(self): """Used to populate catalog values. Returns the date the Analysis Request this analysis belongs to was received. If the analysis was created after, then returns the date the analysis was created. """ request = self.getRequest() if request: ar_date = request.getDateReceived() if ar_date and self.created() > ar_date: return self.created() return ar_date return None
[ "def", "getDateReceived", "(", "self", ")", ":", "request", "=", "self", ".", "getRequest", "(", ")", "if", "request", ":", "ar_date", "=", "request", ".", "getDateReceived", "(", ")", "if", "ar_date", "and", "self", ".", "created", "(", ")", ">", "ar_date", ":", "return", "self", ".", "created", "(", ")", "return", "ar_date", "return", "None" ]
38.153846
13.230769
def _setup_imports(self): """ Ensure the local importer and PushFileService has everything for the Ansible module before setup() completes, but before detach() is called in an asynchronous task. The master automatically streams modules towards us concurrent to the runner invocation, however there is no public API to synchronize on the completion of those preloads. Instead simply reuse the importer's synchronization mechanism by importing everything the module will need prior to detaching. """ for fullname, _, _ in self.module_map['custom']: mitogen.core.import_module(fullname) for fullname in self.module_map['builtin']: mitogen.core.import_module(fullname)
[ "def", "_setup_imports", "(", "self", ")", ":", "for", "fullname", ",", "_", ",", "_", "in", "self", ".", "module_map", "[", "'custom'", "]", ":", "mitogen", ".", "core", ".", "import_module", "(", "fullname", ")", "for", "fullname", "in", "self", ".", "module_map", "[", "'builtin'", "]", ":", "mitogen", ".", "core", ".", "import_module", "(", "fullname", ")" ]
48.125
21.25
def queue(self): """Message queue queue.""" with self.connection_pool.acquire(block=True) as conn: return Q( self.routing_key, exchange=self.exchange, routing_key=self.routing_key )(conn)
[ "def", "queue", "(", "self", ")", ":", "with", "self", ".", "connection_pool", ".", "acquire", "(", "block", "=", "True", ")", "as", "conn", ":", "return", "Q", "(", "self", ".", "routing_key", ",", "exchange", "=", "self", ".", "exchange", ",", "routing_key", "=", "self", ".", "routing_key", ")", "(", "conn", ")" ]
33.5
12.25
def decorate_with_validators(func, func_signature=None, # type: Signature **validators # type: Validator ): """ Utility method to decorate the provided function with the provided input and output Validator objects. Since this method takes Validator objects as argument, it is for advanced users. :param func: the function to decorate. It might already be decorated, this method will check it and wont create another wrapper in this case, simply adding the validators to the existing wrapper :param func_signature: the function's signature if it is already known (internal calls), otherwise it will be found again by inspection :param validators: a dictionary of arg_name (or _out_) => Validator or list of Validator :return: """ # first turn the dictionary values into lists only for arg_name, validator in validators.items(): if not isinstance(validator, list): validators[arg_name] = [validator] if hasattr(func, '__wrapped__') and hasattr(func.__wrapped__, '__validators__'): # ---- This function is already wrapped by our validation wrapper ---- # Update the dictionary of validators with the new validator(s) for arg_name, validator in validators.items(): for v in validator: if arg_name in func.__wrapped__.__validators__: func.__wrapped__.__validators__[arg_name].append(v) else: func.__wrapped__.__validators__[arg_name] = [v] # return the function, no need to wrap it further (it is already wrapped) return func else: # ---- This function is not yet wrapped by our validator. ---- # Store the dictionary of validators as an attribute of the function if hasattr(func, '__validators__'): raise ValueError('Function ' + str(func) + ' already has a defined __validators__ attribute, valid8 ' 'decorators can not be applied on it') else: try: func.__validators__ = validators except AttributeError: raise ValueError("Error - Could not add validators list to function '%s'" % func) # either reuse or recompute function signature func_signature = func_signature or signature(func) # create a wrapper with the same signature @wraps(func) def validating_wrapper(*args, **kwargs): """ This is the wrapper that will be called everytime the function is called """ # (a) Perform input validation by applying `_assert_input_is_valid` on all received arguments apply_on_each_func_args_sig(func, args, kwargs, func_signature, func_to_apply=_assert_input_is_valid, func_to_apply_params_dict=func.__validators__) # (b) execute the function as usual res = func(*args, **kwargs) # (c) validate output if needed if _OUT_KEY in func.__validators__: for validator in func.__validators__[_OUT_KEY]: validator.assert_valid(res) return res return validating_wrapper
[ "def", "decorate_with_validators", "(", "func", ",", "func_signature", "=", "None", ",", "# type: Signature", "*", "*", "validators", "# type: Validator", ")", ":", "# first turn the dictionary values into lists only", "for", "arg_name", ",", "validator", "in", "validators", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "validator", ",", "list", ")", ":", "validators", "[", "arg_name", "]", "=", "[", "validator", "]", "if", "hasattr", "(", "func", ",", "'__wrapped__'", ")", "and", "hasattr", "(", "func", ".", "__wrapped__", ",", "'__validators__'", ")", ":", "# ---- This function is already wrapped by our validation wrapper ----", "# Update the dictionary of validators with the new validator(s)", "for", "arg_name", ",", "validator", "in", "validators", ".", "items", "(", ")", ":", "for", "v", "in", "validator", ":", "if", "arg_name", "in", "func", ".", "__wrapped__", ".", "__validators__", ":", "func", ".", "__wrapped__", ".", "__validators__", "[", "arg_name", "]", ".", "append", "(", "v", ")", "else", ":", "func", ".", "__wrapped__", ".", "__validators__", "[", "arg_name", "]", "=", "[", "v", "]", "# return the function, no need to wrap it further (it is already wrapped)", "return", "func", "else", ":", "# ---- This function is not yet wrapped by our validator. ----", "# Store the dictionary of validators as an attribute of the function", "if", "hasattr", "(", "func", ",", "'__validators__'", ")", ":", "raise", "ValueError", "(", "'Function '", "+", "str", "(", "func", ")", "+", "' already has a defined __validators__ attribute, valid8 '", "'decorators can not be applied on it'", ")", "else", ":", "try", ":", "func", ".", "__validators__", "=", "validators", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Error - Could not add validators list to function '%s'\"", "%", "func", ")", "# either reuse or recompute function signature", "func_signature", "=", "func_signature", "or", "signature", "(", "func", ")", "# create a wrapper with the same signature", "@", "wraps", "(", "func", ")", "def", "validating_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\" This is the wrapper that will be called everytime the function is called \"\"\"", "# (a) Perform input validation by applying `_assert_input_is_valid` on all received arguments", "apply_on_each_func_args_sig", "(", "func", ",", "args", ",", "kwargs", ",", "func_signature", ",", "func_to_apply", "=", "_assert_input_is_valid", ",", "func_to_apply_params_dict", "=", "func", ".", "__validators__", ")", "# (b) execute the function as usual", "res", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# (c) validate output if needed", "if", "_OUT_KEY", "in", "func", ".", "__validators__", ":", "for", "validator", "in", "func", ".", "__validators__", "[", "_OUT_KEY", "]", ":", "validator", ".", "assert_valid", "(", "res", ")", "return", "res", "return", "validating_wrapper" ]
46.15493
27.84507
def has_permission(self, user): """ Returns True if the given request has permission to use the tool. Can be overriden by the user in subclasses. """ return user.has_perm( self.model._meta.app_label + '.' + self.get_permission() )
[ "def", "has_permission", "(", "self", ",", "user", ")", ":", "return", "user", ".", "has_perm", "(", "self", ".", "model", ".", "_meta", ".", "app_label", "+", "'.'", "+", "self", ".", "get_permission", "(", ")", ")" ]
35.375
15.375
def simulate_custom_policy(PolicyInputList=None, ActionNames=None, ResourceArns=None, ResourcePolicy=None, ResourceOwner=None, CallerArn=None, ContextEntries=None, ResourceHandlingOption=None, MaxItems=None, Marker=None): """ Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings. The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions. If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead. Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy . If the output is long, you can use MaxItems and Marker parameters to paginate the results. See also: AWS API Documentation :example: response = client.simulate_custom_policy( PolicyInputList=[ 'string', ], ActionNames=[ 'string', ], ResourceArns=[ 'string', ], ResourcePolicy='string', ResourceOwner='string', CallerArn='string', ContextEntries=[ { 'ContextKeyName': 'string', 'ContextKeyValues': [ 'string', ], 'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList' }, ], ResourceHandlingOption='string', MaxItems=123, Marker='string' ) :type PolicyInputList: list :param PolicyInputList: [REQUIRED] A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be 'scope-down' policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). (string) -- :type ActionNames: list :param ActionNames: [REQUIRED] A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser . (string) -- :type ResourceArns: list :param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response. The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter. If you include a ResourcePolicy , then it must be applicable to all of the resources included in the simulation or you receive an invalid input error. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . (string) -- :type ResourcePolicy: string :param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). :type ResourceOwner: string :param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn . :type CallerArn: string :param CallerArn: The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy. You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal. :type ContextEntries: list :param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied. (dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies. This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` . ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId . ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy. (string) -- ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter. :type ResourceHandlingOption: string :param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation. Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide . EC2-Classic-InstanceStore instance, image, security-group EC2-Classic-EBS instance, image, security-group, volume EC2-VPC-InstanceStore instance, image, security-group, network-interface EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet EC2-VPC-EBS instance, image, security-group, network-interface, volume EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume :type MaxItems: integer :param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true . If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from. :type Marker: string :param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start. :rtype: dict :return: { 'EvaluationResults': [ { 'EvalActionName': 'string', 'EvalResourceName': 'string', 'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'OrganizationsDecisionDetail': { 'AllowedByOrganizations': True|False }, 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' }, 'ResourceSpecificResults': [ { 'EvalResourceName': 'string', 'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' } }, ] }, ], 'IsTruncated': True|False, 'Marker': 'string' } :returns: (string) -- """ pass
[ "def", "simulate_custom_policy", "(", "PolicyInputList", "=", "None", ",", "ActionNames", "=", "None", ",", "ResourceArns", "=", "None", ",", "ResourcePolicy", "=", "None", ",", "ResourceOwner", "=", "None", ",", "CallerArn", "=", "None", ",", "ContextEntries", "=", "None", ",", "ResourceHandlingOption", "=", "None", ",", "MaxItems", "=", "None", ",", "Marker", "=", "None", ")", ":", "pass" ]
71.570588
56.794118
def html2vtml(vtmarkup): """ Convert hypertext markup into vt markup. The output can be given to `vtmlrender` for converstion to VT100 sequences. """ try: htmlconv.feed(vtmarkup) htmlconv.close() return htmlconv.getvalue() finally: htmlconv.reset()
[ "def", "html2vtml", "(", "vtmarkup", ")", ":", "try", ":", "htmlconv", ".", "feed", "(", "vtmarkup", ")", "htmlconv", ".", "close", "(", ")", "return", "htmlconv", ".", "getvalue", "(", ")", "finally", ":", "htmlconv", ".", "reset", "(", ")" ]
29.1
15.1
def setup_ssh_tunnel(job_id, local_port, remote_port): """ Setup an ssh tunnel to the given job-id. This will establish the port over the given local_port to the given remote_port and then exit, keeping the tunnel in place until the job is terminated. """ cmd = ['dx', 'ssh', '--suppress-running-check', job_id, '-o', 'StrictHostKeyChecking no'] cmd += ['-f', '-L', '{0}:localhost:{1}'.format(local_port, remote_port), '-N'] subprocess.check_call(cmd)
[ "def", "setup_ssh_tunnel", "(", "job_id", ",", "local_port", ",", "remote_port", ")", ":", "cmd", "=", "[", "'dx'", ",", "'ssh'", ",", "'--suppress-running-check'", ",", "job_id", ",", "'-o'", ",", "'StrictHostKeyChecking no'", "]", "cmd", "+=", "[", "'-f'", ",", "'-L'", ",", "'{0}:localhost:{1}'", ".", "format", "(", "local_port", ",", "remote_port", ")", ",", "'-N'", "]", "subprocess", ".", "check_call", "(", "cmd", ")" ]
47.9
21.5
def calculate_splits(sdf_file, split_size): """Retrieve """ counts = _sdfstats(sdf_file)["counts"] splits = [] cur = 0 for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)): splits.append("%s-%s" % (cur, min(counts, cur + split_size))) cur += split_size return splits
[ "def", "calculate_splits", "(", "sdf_file", ",", "split_size", ")", ":", "counts", "=", "_sdfstats", "(", "sdf_file", ")", "[", "\"counts\"", "]", "splits", "=", "[", "]", "cur", "=", "0", "for", "i", "in", "range", "(", "counts", "//", "split_size", "+", "(", "0", "if", "counts", "%", "split_size", "==", "0", "else", "1", ")", ")", ":", "splits", ".", "append", "(", "\"%s-%s\"", "%", "(", "cur", ",", "min", "(", "counts", ",", "cur", "+", "split_size", ")", ")", ")", "cur", "+=", "split_size", "return", "splits" ]
32.6
16.8
def protoWithProof(): """ Run the full protocol including proof generation and verification. """ r, x = blind(m) y,kw,tTilde = eval(w,t,x,msk,s) pi = prove(x, tTilde, kw, y) verify(x, t, y, pi, errorOnFail=True) z = deblind(r, y)
[ "def", "protoWithProof", "(", ")", ":", "r", ",", "x", "=", "blind", "(", "m", ")", "y", ",", "kw", ",", "tTilde", "=", "eval", "(", "w", ",", "t", ",", "x", ",", "msk", ",", "s", ")", "pi", "=", "prove", "(", "x", ",", "tTilde", ",", "kw", ",", "y", ")", "verify", "(", "x", ",", "t", ",", "y", ",", "pi", ",", "errorOnFail", "=", "True", ")", "z", "=", "deblind", "(", "r", ",", "y", ")" ]
23
16.636364
def datum_to_value(self, instance, datum): """Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum. """ datum = self.map_func(instance, datum) if datum is None: return None local_data = None if self.reverse is not None: local_data = {} if self.reverse is undefined: local_data[instance.__class__.__name__.lower()] = instance else: local_data[self.reverse] = instance # Get the class from the bound origin. bound = getattr(instance._origin, self.cls) return bound(datum, local_data=local_data)
[ "def", "datum_to_value", "(", "self", ",", "instance", ",", "datum", ")", ":", "datum", "=", "self", ".", "map_func", "(", "instance", ",", "datum", ")", "if", "datum", "is", "None", ":", "return", "None", "local_data", "=", "None", "if", "self", ".", "reverse", "is", "not", "None", ":", "local_data", "=", "{", "}", "if", "self", ".", "reverse", "is", "undefined", ":", "local_data", "[", "instance", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "]", "=", "instance", "else", ":", "local_data", "[", "self", ".", "reverse", "]", "=", "instance", "# Get the class from the bound origin.", "bound", "=", "getattr", "(", "instance", ".", "_origin", ",", "self", ".", "cls", ")", "return", "bound", "(", "datum", ",", "local_data", "=", "local_data", ")" ]
43.652174
15.695652
def connection_lost(self, exc: Exception=None): """连接丢失时触发的回调函数. 用于清理一些任务和关闭连接,包括: + 取消监听任务 + 取消过期监控任务 + 取消其他还没执行完的任务 + 将流读写器都重置 + 将本连接从当前的连接池中去除 Parameters: exc (Exception): - 异常,如果是None的话说明不是因为异常而关闭的连接 """ self._handlertask.cancel() super().connection_lost(exc) if self._timeout_handler: self._timeout_handler.cancel() self._transport = None for i, task in self.tasks.items(): task.cancel() self.connections.discard(self) if self.debug: access_logger.info("lost connection", extra=self._extra)
[ "def", "connection_lost", "(", "self", ",", "exc", ":", "Exception", "=", "None", ")", ":", "self", ".", "_handlertask", ".", "cancel", "(", ")", "super", "(", ")", ".", "connection_lost", "(", "exc", ")", "if", "self", ".", "_timeout_handler", ":", "self", ".", "_timeout_handler", ".", "cancel", "(", ")", "self", ".", "_transport", "=", "None", "for", "i", ",", "task", "in", "self", ".", "tasks", ".", "items", "(", ")", ":", "task", ".", "cancel", "(", ")", "self", ".", "connections", ".", "discard", "(", "self", ")", "if", "self", ".", "debug", ":", "access_logger", ".", "info", "(", "\"lost connection\"", ",", "extra", "=", "self", ".", "_extra", ")" ]
26.04
16.64