code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def parse_game_event(self, ge): if ge.name == "dota_combatlog": if ge.keys["type"] == 4: #Something died try: source = self.dp.combat_log_names.get(ge.keys["sourcename"], "unknown") target = self.dp.combat_log_names.get(ge.keys["targetname"], "unknown") target_illusion = ge.keys["targetillusion"] timestamp = ge.keys["timestamp"] if (target.startswith("npc_dota_hero") and not target_illusion): self.kills.append({ "target": target, "source": source, "timestamp": timestamp, "tick": self.tick, }) elif source.startswith("npc_dota_hero"): self.heroes[source].creep_kill(target, timestamp) except KeyError: """ Sometimes we get combat logs for things we dont have in combat_log_names. My theory is that the server sends us incremental updates to the string table using CSVCMsg_UpdateStringTable but I'm not sure how to parse that """ pass
Game events contain the combat log as well as 'chase_hero' events which could be interesting
def fix_file(file_name, line_ranges, options=None, in_place=False, diff=False, verbose=0, cwd=None): import codecs from os import getcwd from pep8radius.diff import get_diff from pep8radius.shell import from_dir if cwd is None: cwd = getcwd() with from_dir(cwd): try: with codecs.open(file_name, 'r', encoding='utf-8') as f: original = f.read() except IOError: # Most likely the file has been removed. # Note: it would be nice if we could raise here, specifically # for the case of passing in a diff when in the wrong directory. return '' fixed = fix_code(original, line_ranges, options, verbose=verbose) if in_place: with from_dir(cwd): with codecs.open(file_name, 'w', encoding='utf-8') as f: f.write(fixed) return get_diff(original, fixed, file_name) if diff else fixed
Calls fix_code on the source code from the passed in file over the given line_ranges. - If diff then this returns the udiff for the changes, otherwise returns the fixed code. - If in_place the changes are written to the file.
def fix_line_range(source_code, start, end, options): # TODO confirm behaviour outside range (indexing starts at 1) start = max(start, 1) options.line_range = [start, end] from autopep8 import fix_code fixed = fix_code(source_code, options) try: if options.docformatter: from docformatter import format_code fixed = format_code( fixed, summary_wrap_length=options.max_line_length - 1, description_wrap_length=(options.max_line_length - 2 * options.indent_size), pre_summary_newline=options.pre_summary_newline, post_description_blank=options.post_description_blank, force_wrap=options.force_wrap, line_range=[start, end]) except AttributeError: # e.g. using autopep8.parse_args, pragma: no cover pass return fixed
Apply autopep8 (and docformatter) between the lines start and end of source.
def _maybe_print(something_to_print, end=None, min_=1, max_=99, verbose=0): if min_ <= verbose <= max_: import sys print(something_to_print, end=end) sys.stdout.flush()
Print if verbose is within min_ and max_.
def from_diff(diff, options=None, cwd=None): return RadiusFromDiff(diff=diff, options=options, cwd=cwd)
Create a Radius object from a diff rather than a reposistory.
def fix(self): from pep8radius.diff import print_diff, udiff_lines_fixed n = len(self.filenames_diff) _maybe_print('Applying autopep8 to touched lines in %s file(s).' % n) any_changes = False total_lines_changed = 0 pep8_diffs = [] for i, file_name in enumerate(self.filenames_diff, start=1): _maybe_print('%s/%s: %s: ' % (i, n, file_name), end='') _maybe_print('', min_=2) p_diff = self.fix_file(file_name) lines_changed = udiff_lines_fixed(p_diff) if p_diff else 0 total_lines_changed += lines_changed if p_diff: any_changes = True if self.diff: pep8_diffs.append(p_diff) if self.in_place: _maybe_print('pep8radius fixed %s lines in %s files.' % (total_lines_changed, n), verbose=self.verbose) else: _maybe_print('pep8radius would fix %s lines in %s files.' % (total_lines_changed, n), verbose=self.verbose) if self.diff: for diff in pep8_diffs: print_diff(diff, color=self.color) return any_changes
Runs fix_file on each modified file. - Prints progress and diff depending on options. - Returns True if there were any changes
def fix_file(self, file_name): # We hope that a CalledProcessError would have already raised # during the init if it were going to raise here. modified_lines = self.modified_lines(file_name) return fix_file(file_name, modified_lines, self.options, in_place=self.in_place, diff=True, verbose=self.verbose, cwd=self.cwd)
Apply autopep8 to the diff lines of a file. - Returns the diff between original and fixed file. - If self.in_place then this writes the the fixed code the file_name. - Prints dots to show progress depending on options.
def version(): with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'pep8radius', 'main.py')) as input_file: for line in input_file: if line.startswith('__version__'): return parse(line).body[0].value.s
Return version string.
def multi_evaluate(self, x, out=None): if out is None: out = _np.empty(len(x)) else: assert len(out) == len(x) for i, point in enumerate(x): out[i] = self.evaluate(point) return out
Evaluate log of the density to propose ``x``, namely log(q(x)) for each row in x. :param x: Matrix-like array; the proposed points. Expect i-th accessible as ``x[i]``. :param out: Vector-like array, length==``len(x)``, optional; If provided, the output is written into this array.
def url_map(base, params): url = base if not params: url.rstrip("?&") elif '?' not in url: url += "?" entries = [] for key, value in params.items(): if value is not None: value = str(value) entries.append("%s=%s" % (quote_plus(key.encode("utf-8")), quote_plus(value.encode("utf-8")))) url += "&".join(entries) return str(url)
Return a URL with get parameters based on the params passed in This is more forgiving than urllib.urlencode and will attempt to coerce non-string objects into strings and automatically UTF-8 encode strings. @param params: HTTP GET parameters
def make_request(name, params=None, version="V001", key=None, api_type="web", fetcher=get_page, base=None, language="en_us"): params = params or {} params["key"] = key or API_KEY params["language"] = language if not params["key"]: raise ValueError("API key not set, please set DOTA2_API_KEY") url = url_map("%s%s/%s/" % (base or BASE_URL, name, version), params) return fetcher(url)
Make an API request
def json_request_response(f): @wraps(f) def wrapper(*args, **kwargs): response = f(*args, **kwargs) response.raise_for_status() return json.loads(response.content.decode('utf-8')) API_FUNCTIONS[f.__name__] = f return wrapper
Parse the JSON from an API response. We do this in a decorator so that our Twisted library can reuse the underlying functions
def get_match_history(start_at_match_id=None, player_name=None, hero_id=None, skill=0, date_min=None, date_max=None, account_id=None, league_id=None, matches_requested=None, game_mode=None, min_players=None, tournament_games_only=None, **kwargs): params = { "start_at_match_id": start_at_match_id, "player_name": player_name, "hero_id": hero_id, "skill": skill, "date_min": date_min, "date_max": date_max, "account_id": account_id, "league_id": league_id, "matches_requested": matches_requested, "game_mode": game_mode, "min_players": min_players, "tournament_games_only": tournament_games_only } return make_request("GetMatchHistory", params, **kwargs)
List of most recent 25 matches before start_at_match_id
def get_match_history_by_sequence_num(start_at_match_seq_num, matches_requested=None, **kwargs): params = { "start_at_match_seq_num": start_at_match_seq_num, "matches_requested": matches_requested } return make_request("GetMatchHistoryBySequenceNum", params, **kwargs)
Most recent matches ordered by sequence number
def get_steam_id(vanityurl, **kwargs): params = {"vanityurl": vanityurl} return make_request("ResolveVanityURL", params, version="v0001", base="http://api.steampowered.com/ISteamUser/", **kwargs)
Get a players steam id from their steam name/vanity url
def get_player_summaries(players, **kwargs): if (isinstance(players, list)): params = {'steamids': ','.join(str(p) for p in players)} elif (isinstance(players, int)): params = {'steamids': players} else: raise ValueError("The players input needs to be a list or int") return make_request("GetPlayerSummaries", params, version="v0002", base="http://api.steampowered.com/ISteamUser/", **kwargs)
Get players steam profile from their steam ids
def get_hero_image_url(hero_name, image_size="lg"): if hero_name.startswith("npc_dota_hero_"): hero_name = hero_name[len("npc_dota_hero_"):] valid_sizes = ['eg', 'sb', 'lg', 'full', 'vert'] if image_size not in valid_sizes: raise ValueError("Not a valid hero image size") return "http://media.steampowered.com/apps/dota2/images/heroes/{}_{}.png".format( hero_name, image_size)
Get a hero image based on name and image size
def generate_proxy( prefix, base_url='', verify_ssl=True, middleware=None, append_middleware=None, cert=None, timeout=None): middleware = list(middleware or HttpProxy.proxy_middleware) middleware += list(append_middleware or []) return type('ProxyClass', (HttpProxy,), { 'base_url': base_url, 'reverse_urls': [(prefix, base_url)], 'verify_ssl': verify_ssl, 'proxy_middleware': middleware, 'cert': cert, 'timeout': timeout })
Generate a ProxyClass based view that uses the passed base_url.
def brier_score(observations, forecasts): machine_eps = np.finfo(float).eps forecasts = np.asarray(forecasts) if (forecasts < 0.0).any() or (forecasts > (1.0 + machine_eps)).any(): raise ValueError('forecasts must not be outside of the unit interval ' '[0, 1]') observations = np.asarray(observations) if observations.ndim > 0: valid_obs = observations[~np.isnan(observations)] else: valid_obs = observations if not np.isnan(observations) else [] if not set(np.unique(valid_obs)) <= {0, 1}: raise ValueError('observations can only contain 0, 1, or NaN') return (forecasts - observations) ** 2
Calculate the Brier score (BS) The Brier score (BS) scores binary forecasts $k \in \{0, 1\}$, ..math: BS(p, k) = (p_1 - k)^2, where $p_1$ is the forecast probability of $k=1$. Parameters ---------- observations, forecasts : array_like Broadcast compatible arrays of forecasts (probabilities between 0 and 1) and observations (0, 1 or NaN). Returns ------- out : np.ndarray Brier score for each forecast/observation. References ---------- Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd edition, 2012. https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules, prediction, and estimation, 2005. University of Washington Department of Statistics Technical Report no. 463R. https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
def dumps(*args, **kwargs): import json from django.conf import settings from argonauts.serializers import JSONArgonautsEncoder kwargs.setdefault('cls', JSONArgonautsEncoder) # pretty print in DEBUG mode. if settings.DEBUG: kwargs.setdefault('indent', 4) kwargs.setdefault('separators', (',', ': ')) else: kwargs.setdefault('separators', (',', ':')) return json.dumps(*args, **kwargs)
Wrapper for json.dumps that uses the JSONArgonautsEncoder.
def format(self, record): # XXX: idea, colorize message arguments s = super(ANSIFormatter, self).format(record) if hasattr(self.context, 'ansi'): s = self.context.ansi(s, **self.get_sgr(record)) return s
Overridden method that applies SGR codes to log messages.
def added(self, context): self._expose_argparse = context.bowl.has_spice("log:arguments") self.configure_logging(context)
Configure generic application logging. This method just calls ``:meth:`configure_logging()`` which sets up everything else. This allows other components to use logging without triggering implicit configuration.
def configure_logging(self, context): fmt = "%(name)-12s: %(levelname)-8s %(message)s" formatter = ANSIFormatter(context, fmt) handler = logging.StreamHandler() handler.setFormatter(formatter) logging.root.addHandler(handler)
Configure logging for the application. :param context: The guacamole context object. This method attaches a :py:class:logging.StreamHandler` with a subclass of :py:class:`logging.Formatter` to the root logger. The specific subclass is :class:`ANSIFormatter` and it adds basic ANSI formatting (colors and some styles) to logging messages so that they stand out from normal output.
def adjust_logging(self, context): if context.early_args.log_level: log_level = context.early_args.log_level logging.getLogger("").setLevel(log_level) for name in context.early_args.trace: logging.getLogger(name).setLevel(logging.DEBUG) _logger.info("Enabled tracing on logger %r", name)
Adjust logging configuration. :param context: The guacamole context object. This method uses the context and the results of early argument parsing to adjust the configuration of the logging subsystem. In practice the values passed to ``--log-level`` and ``--trace`` are applied.
def invoked(self, ctx): logging.debug("Some debugging message") print("Just a normal print!") logging.info("Some informational message") print("Just a normal print!") logging.warn("Some warning message") print("Just a normal print!") logging.error("Some error message") print("Just a normal print!") logging.critical("Some critical message") print("Just a normal print!")
Guacamole method used by the command ingredient. :param ctx: The guacamole context object. Context provides access to all features of guacamole. :returns: The return code of the command. Guacamole translates ``None`` to a successful exit status (return code zero).
def perp(weights): r # normalize weights w = _np.asarray(weights) / _np.sum(weights) # mask zero weights w = _np.ma.MaskedArray(w, copy=False, mask=(w == 0)) # avoid NaN due to log(0) by log(1)=0 entr = - _np.sum( w * _np.log(w.filled(1.0))) return _np.exp(entr) / len(w)
r"""Calculate the normalized perplexity :math:`\mathcal{P}` of samples with ``weights`` :math:`\omega_i`. :math:`\mathcal{P}=0` is terrible and :math:`\mathcal{P}=1` is perfect. .. math:: \mathcal{P} = exp(H) / N where .. math:: H = - \sum_{i=1}^N \bar{\omega}_i log ~ \bar{\omega}_i .. math:: \bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i} :param weights: Vector-like array; the samples' weights
def ess(weights): r # normalize weights w = _np.asarray(weights) / _np.sum(weights) # ess coeff_var = _np.sum((len(w) * w - 1)**2) / len(w) return 1.0 / (1.0 + coeff_var)
r"""Calculate the normalized effective sample size :math:`ESS` [LC95]_ of samples with ``weights`` :math:`\omega_i`. :math:`ESS=0` is terrible and :math:`ESS=1` is perfect. .. math:: ESS = \frac{1}{1+C^2} where .. math:: C^2 = \frac{1}{N} \sum_{i=1}^N (N \bar{\omega}_i - 1)^2 .. math:: \bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i} :param weights: Vector-like array; the samples' weights
def json(a): json_str = json_dumps(a) # Escape all the XML/HTML special characters. escapes = ['<', '>', '&'] for c in escapes: json_str = json_str.replace(c, r'\u%04x' % ord(c)) # now it's safe to use mark_safe return mark_safe(json_str)
Output the json encoding of its argument. This will escape all the HTML/XML special characters with their unicode escapes, so it is safe to be output anywhere except for inside a tag attribute. If the output needs to be put in an attribute, entitize the output of this filter.
def json_twisted_response(f): def wrapper(*args, **kwargs): response = f(*args, **kwargs) response.addCallback(lambda x: json.loads(x)) return response wrapper.func = f wrapper = util.mergeFunctionMetadata(f.func, wrapper) return wrapper
Parse the JSON from an API response. We do this in a decorator so that our Twisted library can reuse the underlying functions
def main(self, argv=None, exit=True): bowl = self.prepare() try: retval = bowl.eat(argv) except SystemExit as exc: if exit: raise else: return exc.args[0] else: if retval is None: retval = 0 if exit: raise SystemExit(retval) else: return retval
Shortcut to prepare a bowl of guacamole and eat it. :param argv: Command line arguments or None. None means that sys.argv is used :param exit: Raise SystemExit after finishing execution :returns: Whatever is returned by the eating the guacamole. :raises: Whatever is raised by eating the guacamole. .. note:: This method always either raises and exception or returns an object. The way it behaves depends on the value of the `exit` argument. This method can be used to quickly take a recipe, prepare the guacamole and eat it. It is named main as it is applicable as the main method of an application. The `exit` argument controls if main returns normally or raises SystemExit. By default it will raise SystemExit (it will either wrap the return value with SystemExit or re-raise the SystemExit exception again). If SystemExit is raised but `exit` is False the argument to SystemExit is unwrapped and returned instead.
def dispatch_failed(self, context): traceback.print_exception( context.exc_type, context.exc_value, context.traceback) raise SystemExit(1)
Print the unhandled exception and exit the application.
def variables(template): '''Returns the set of keywords in a uri template''' vars = set() for varlist in TEMPLATE.findall(template): if varlist[0] in OPERATOR: varlist = varlist[1:] varspecs = varlist.split(',') for var in varspecs: # handle prefix values var = var.split(':')[0] # handle composite values if var.endswith('*'): var = var[:-1] vars.add(var) return varf variables(template): '''Returns the set of keywords in a uri template''' vars = set() for varlist in TEMPLATE.findall(template): if varlist[0] in OPERATOR: varlist = varlist[1:] varspecs = varlist.split(',') for var in varspecs: # handle prefix values var = var.split(':')[0] # handle composite values if var.endswith('*'): var = var[:-1] vars.add(var) return vars
Returns the set of keywords in a uri template
def expand(template, variables): def _sub(match): expression = match.group(1) operator = "" if expression[0] in OPERATOR: operator = expression[0] varlist = expression[1:] else: varlist = expression safe = "" if operator in ["+", "#"]: safe = RESERVED varspecs = varlist.split(",") varnames = [] defaults = {} for varspec in varspecs: default = None explode = False prefix = None if "=" in varspec: varname, default = tuple(varspec.split("=", 1)) else: varname = varspec if varname[-1] == "*": explode = True varname = varname[:-1] elif ":" in varname: try: prefix = int(varname[varname.index(":")+1:]) except ValueError: raise ValueError("non-integer prefix '{0}'".format( varname[varname.index(":")+1:])) varname = varname[:varname.index(":")] if default: defaults[varname] = default varnames.append((varname, explode, prefix)) retval = [] joiner = operator start = operator if operator == "+": start = "" joiner = "," if operator == "#": joiner = "," if operator == "?": joiner = "&" if operator == "&": start = "&" if operator == "": joiner = "," for varname, explode, prefix in varnames: if varname in variables: value = variables[varname] if not value and value != "" and varname in defaults: value = defaults[varname] elif varname in defaults: value = defaults[varname] else: continue expanded = TOSTRING[operator]( varname, value, explode, prefix, operator, safe=safe) if expanded is not None: retval.append(expanded) if len(retval) > 0: return start + joiner.join(retval) else: return "" return TEMPLATE.sub(_sub, template)
Expand template as a URI Template using variables.
def calculate_mean(samples, weights): r'''Calculate the mean of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights. ''' assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) return _np.average(samples, axis=0, weights=weightsf calculate_mean(samples, weights): r'''Calculate the mean of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights. ''' assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) return _np.average(samples, axis=0, weights=weights)
r'''Calculate the mean of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights.
def calculate_covariance(samples, weights): r'''Calculates the covariance matrix of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights. ''' assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) sum_weights_sq = (weights.sum())**2 sum_sq_weights = (weights**2).sum() mean = calculate_mean(samples, weights) return sum_weights_sq / (sum_weights_sq - sum_sq_weights) *\ calculate_expectation(samples, weights, lambda x: _np.einsum('i,j', x - mean, x - mean)f calculate_covariance(samples, weights): r'''Calculates the covariance matrix of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights. ''' assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) sum_weights_sq = (weights.sum())**2 sum_sq_weights = (weights**2).sum() mean = calculate_mean(samples, weights) return sum_weights_sq / (sum_weights_sq - sum_sq_weights) *\ calculate_expectation(samples, weights, lambda x: _np.einsum('i,j', x - mean, x - mean))
r'''Calculates the covariance matrix of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights.
def clear(self): '''Clear history of samples and other internal variables to free memory. .. note:: The proposal is untouched. ''' self.samples.clear() self.weights.clear() if self.target_values is not None: self.target_values.clear(f clear(self): '''Clear history of samples and other internal variables to free memory. .. note:: The proposal is untouched. ''' self.samples.clear() self.weights.clear() if self.target_values is not None: self.target_values.clear()
Clear history of samples and other internal variables to free memory. .. note:: The proposal is untouched.
def _calculate_weights(self, this_samples, N): this_weights = self.weights.append(N)[:,0] if self.target_values is None: for i in range(N): tmp = self.target(this_samples[i]) - self.proposal.evaluate(this_samples[i]) this_weights[i] = _exp(tmp) else: this_target_values = self.target_values.append(N) for i in range(N): this_target_values[i] = self.target(this_samples[i]) tmp = this_target_values[i] - self.proposal.evaluate(this_samples[i]) this_weights[i] = _exp(tmp)
Calculate and save the weights of a run.
def _get_samples(self, N, trace_sort): # allocate an empty numpy array to store the run and append accept count # (importance sampling accepts all points) this_run = self.samples.append(N) # store the proposed points (weights are still to be calculated) if trace_sort: this_run[:], origin = self.proposal.propose(N, self.rng, trace=True, shuffle=False) return this_run, origin else: this_run[:] = self.proposal.propose(N, self.rng) return this_run
Save N samples from ``self.proposal`` to ``self.samples`` This function does NOT calculate the weights. Return a reference to this run's samples in ``self.samples``. If ``trace_sort`` is True, additionally return an array indicating the responsible component. (MixtureDensity only)
def x_forwarded_for(self): ip = self._request.META.get('REMOTE_ADDR') current_xff = self.headers.get('X-Forwarded-For') return '%s, %s' % (current_xff, ip) if current_xff else ip
X-Forwarded-For header value. This is the amended header so that it contains the previous IP address in the forwarding change.
def _add_to_docstring(string): '''Private wrapper function. Appends ``string`` to the docstring of the wrapped function. ''' def wrapper(method): if method.__doc__ is not None: method.__doc__ += string else: method.__doc__ = string return method return wrappef _add_to_docstring(string): '''Private wrapper function. Appends ``string`` to the docstring of the wrapped function. ''' def wrapper(method): if method.__doc__ is not None: method.__doc__ += string else: method.__doc__ = string return method return wrapper
Private wrapper function. Appends ``string`` to the docstring of the wrapped function.
def _normalize_django_header_name(header): # Remove HTTP_ prefix. new_header = header.rpartition('HTTP_')[2] # Camel case and replace _ with - new_header = '-'.join( x.capitalize() for x in new_header.split('_')) return new_header
Unmunge header names modified by Django.
def from_request(cls, request): request_headers = HeaderDict() other_headers = ['CONTENT_TYPE', 'CONTENT_LENGTH'] for header, value in iteritems(request.META): is_header = header.startswith('HTTP_') or header in other_headers normalized_header = cls._normalize_django_header_name(header) if is_header and value: request_headers[normalized_header] = value return request_headers
Generate a HeaderDict based on django request object meta data.
def filter(self, exclude): filtered_headers = HeaderDict() lowercased_ignore_list = [x.lower() for x in exclude] for header, value in iteritems(self): if header.lower() not in lowercased_ignore_list: filtered_headers[header] = value return filtered_headers
Return a HeaderSet excluding the headers in the exclude list.
def crps_gaussian(x, mu, sig, grad=False): x = np.asarray(x) mu = np.asarray(mu) sig = np.asarray(sig) # standadized x sx = (x - mu) / sig # some precomputations to speed up the gradient pdf = _normpdf(sx) cdf = _normcdf(sx) pi_inv = 1. / np.sqrt(np.pi) # the actual crps crps = sig * (sx * (2 * cdf - 1) + 2 * pdf - pi_inv) if grad: dmu = 1 - 2 * cdf dsig = 2 * pdf - pi_inv return crps, np.array([dmu, dsig]) else: return crps
Computes the CRPS of observations x relative to normally distributed forecasts with mean, mu, and standard deviation, sig. CRPS(N(mu, sig^2); x) Formula taken from Equation (5): Calibrated Probablistic Forecasting Using Ensemble Model Output Statistics and Minimum CRPS Estimation. Gneiting, Raftery, Westveld, Goldman. Monthly Weather Review 2004 http://journals.ametsoc.org/doi/pdf/10.1175/MWR2904.1 Parameters ---------- x : scalar or np.ndarray The observation or set of observations. mu : scalar or np.ndarray The mean of the forecast normal distribution sig : scalar or np.ndarray The standard deviation of the forecast distribution grad : boolean If True the gradient of the CRPS w.r.t. mu and sig is returned along with the CRPS. Returns ------- crps : scalar or np.ndarray or tuple of The CRPS of each observation x relative to mu and sig. The shape of the output array is determined by numpy broadcasting rules. crps_grad : np.ndarray (optional) If grad=True the gradient of the crps is returned as a numpy array [grad_wrt_mu, grad_wrt_sig]. The same broadcasting rules apply.
def _discover_bounds(cdf, tol=1e-7): class DistFromCDF(stats.distributions.rv_continuous): def cdf(self, x): return cdf(x) dist = DistFromCDF() # the ppf is the inverse cdf lower = dist.ppf(tol) upper = dist.ppf(1. - tol) return lower, upper
Uses scipy's general continuous distribution methods which compute the ppf from the cdf, then use the ppf to find the lower and upper limits of the distribution.
def crps_quadrature(x, cdf_or_dist, xmin=None, xmax=None, tol=1e-6): return _crps_cdf(x, cdf_or_dist, xmin, xmax, tol)
Compute the continuously ranked probability score (CPRS) for a given forecast distribution (cdf) and observation (x) using numerical quadrature. This implementation allows the computation of CRPS for arbitrary forecast distributions. If gaussianity can be assumed ``crps_gaussian`` is faster. Parameters ---------- x : np.ndarray Observations associated with the forecast distribution cdf_or_dist cdf_or_dist : callable or scipy.stats.distribution Function which returns the the cumulative density of the forecast distribution at value x. This can also be an object with a callable cdf() method such as a scipy.stats.distribution object. xmin : np.ndarray or scalar The lower bounds for integration, this is required to perform quadrature. xmax : np.ndarray or scalar The upper bounds for integration, this is required to perform quadrature. tol : float , optional The desired accuracy of the CRPS, larger values will speed up integration. If tol is set to None, bounds errors or integration tolerance errors will be ignored. Returns ------- crps : np.ndarray The continuously ranked probability score of an observation x given forecast distribution.
def clear(self): self._points = _np.empty( (self.prealloc,self.dim) ) self._slice_for_run_nr = [] self.memleft = self.prealloc
Deletes the history
def partition(N, k): '''Distribute ``N`` into ``k`` parts such that each part takes the value ``N//k`` or ``N//k + 1`` where ``//`` denotes integer division; i.e., perform the minimal lexicographic integer partition. Example: N = 5, k = 2 --> return [3, 2] ''' out = [N // k] * k remainder = N % k for i in range(remainder): out[i] += 1 return ouf partition(N, k): '''Distribute ``N`` into ``k`` parts such that each part takes the value ``N//k`` or ``N//k + 1`` where ``//`` denotes integer division; i.e., perform the minimal lexicographic integer partition. Example: N = 5, k = 2 --> return [3, 2] ''' out = [N // k] * k remainder = N % k for i in range(remainder): out[i] += 1 return out
Distribute ``N`` into ``k`` parts such that each part takes the value ``N//k`` or ``N//k + 1`` where ``//`` denotes integer division; i.e., perform the minimal lexicographic integer partition. Example: N = 5, k = 2 --> return [3, 2]
def dispatch(self, request, *args, **kwargs): self.request = DownstreamRequest(request) self.args = args self.kwargs = kwargs self._verify_config() self.middleware = MiddlewareSet(self.proxy_middleware) return self.proxy()
Dispatch all HTTP methods to the proxy.
def proxy(self): headers = self.request.headers.filter(self.ignored_request_headers) qs = self.request.query_string if self.pass_query_string else '' # Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005 if (self.request.META.get('CONTENT_LENGTH', None) == '' and get_django_version() == '1.10'): del self.request.META['CONTENT_LENGTH'] request_kwargs = self.middleware.process_request( self, self.request, method=self.request.method, url=self.proxy_url, headers=headers, data=self.request.body, params=qs, allow_redirects=False, verify=self.verify_ssl, cert=self.cert, timeout=self.timeout) result = request(**request_kwargs) response = HttpResponse(result.content, status=result.status_code) # Attach forwardable headers to response forwardable_headers = HeaderDict(result.headers).filter( self.ignored_upstream_headers) for header, value in iteritems(forwardable_headers): response[header] = value return self.middleware.process_response( self, self.request, result, response)
Retrieve the upstream content and build an HttpResponse.
def shell_out(cmd, stderr=STDOUT, cwd=None): if cwd is None: from os import getcwd cwd = getcwd() # TODO do I need to normalize this on Windows out = check_output(cmd, cwd=cwd, stderr=stderr, universal_newlines=True) return _clean_output(out)
Friendlier version of check_output.
def shell_out_ignore_exitcode(cmd, stderr=STDOUT, cwd=None): try: return shell_out(cmd, stderr=stderr, cwd=cwd) except CalledProcessError as c: return _clean_output(c.output)
Same as shell_out but doesn't raise if the cmd exits badly.
def from_dir(cwd): "Context manager to ensure in the cwd directory." import os curdir = os.getcwd() try: os.chdir(cwd) yield finally: os.chdir(curdirf from_dir(cwd): "Context manager to ensure in the cwd directory." import os curdir = os.getcwd() try: os.chdir(cwd) yield finally: os.chdir(curdir)
Context manager to ensure in the cwd directory.
def merge_function_with_indicator(function, indicator, alternative): '''Returns a function such that a call to it is equivalent to: if indicator(x): return function(x) else: return alternative Note that ``function`` is not called if indicator evaluates to False. :param function: The function to be called when indicator returns True. :param indicator: Bool-returning function; the indicator :param alternative: The object to be returned when indicator returns False ''' if indicator is None: return function else: def merged_function(x): if indicator(x): return function(x) else: return alternative return merged_functiof merge_function_with_indicator(function, indicator, alternative): '''Returns a function such that a call to it is equivalent to: if indicator(x): return function(x) else: return alternative Note that ``function`` is not called if indicator evaluates to False. :param function: The function to be called when indicator returns True. :param indicator: Bool-returning function; the indicator :param alternative: The object to be returned when indicator returns False ''' if indicator is None: return function else: def merged_function(x): if indicator(x): return function(x) else: return alternative return merged_function
Returns a function such that a call to it is equivalent to: if indicator(x): return function(x) else: return alternative Note that ``function`` is not called if indicator evaluates to False. :param function: The function to be called when indicator returns True. :param indicator: Bool-returning function; the indicator :param alternative: The object to be returned when indicator returns False
def text_filter(regex_base, value): from thumbnails import get_thumbnail regex = regex_base % { 'caption': '[a-zA-Z0-9\.\,:;/_ \(\)\-\!\?\"]+', 'image': '[a-zA-Z0-9\.:/_\-\% ]+' } images = re.findall(regex, value) for i in images: image_url = i[1] image = get_thumbnail( image_url, **settings.THUMBNAIL_FILTER_OPTIONS ) value = value.replace(i[1], image.url) return value
A text-filter helper, used in ``markdown_thumbnails``-filter and ``html_thumbnails``-filter. It can be used to build custom thumbnail text-filters. :param regex_base: A string with a regex that contains ``%(captions)s`` and ``%(image)s`` where the caption and image should be. :param value: String of text in which the source URLs can be found. :return: A string ready to be put in a template.
def eat(self, argv=None): # The setup phase, here KeyboardInterrupt is a silent sign to exit the # application. Any error that happens here will result in a raw # backtrace being printed to the user. try: self.context.argv = argv self._added() self._build_early_parser() self._preparse() self._early_init() self._build_parser() self._parse() self._late_init() except KeyboardInterrupt: self._shutdown() return # The execution phase. Here we differentiate SystemExit from all other # exceptions. SystemExit is just re-raised as that's what any piece of # code can raise to ask to exit the currently running application. All # other exceptions are recorded in the context and the failure-path of # the dispatch is followed. In other case, when there are no # exceptions, the success-path is followed. In both cases, ingredients # are shut down. try: return self._dispatch() except SystemExit: raise except BaseException: (self.context.exc_type, self.context.exc_value, self.context.traceback) = sys.exc_info() self._dispatch_failed() else: self._dispatch_succeeded() finally: self._shutdown()
Eat the guacamole. :param argv: Command line arguments or None. None means that sys.argv is used :return: Whatever is returned by the first ingredient that agrees to perform the command dispatch. The eat method is called to run the application, as if it was invoked from command line directly.
def _dispatch(self): for ingredient in self.ingredients: result = ingredient.dispatch(self.context) if result is not None: return result
Run the dispatch() method on all ingredients.
def clear(self): self.sampler.clear() self.samples_list = self._comm.gather(self.sampler.samples, root=0) if hasattr(self.sampler, 'weights'): self.weights_list = self._comm.gather(self.sampler.weights, root=0) else: self.weights_list = None
Delete the history.
def path(self, path): if os.path.isabs(path): return path return os.path.join(self.location, path)
Creates a path based on the location attribute of the backend and the path argument of the function. If the path argument is an absolute path the path is returned. :param path: The path that should be joined with the backends location.
def kullback_leibler(c1, c2): d = c2.log_det_sigma - c1.log_det_sigma d += np.trace(c2.inv_sigma.dot(c1.sigma)) mean_diff = c1.mu - c2.mu d += mean_diff.transpose().dot(c2.inv_sigma).dot(mean_diff) d -= len(c1.mu) return 0.5 * d
Kullback Leibler divergence of two Gaussians, :math:`KL(1||2)`
def _cleanup(self, kill, verbose): if kill: removed_indices = self.g.prune() self.nout -= len(removed_indices) if verbose and removed_indices: print('Removing %s' % removed_indices) for j in removed_indices: self.inv_map.pop(j[0])
Look for dead components (weight=0) and remove them if enabled by ``kill``. Resize storage. Recompute determinant and covariance.
def _distance(self): return np.average(self.min_kl, weights=self.f.weights)
Compute the distance function d(f,g,\pi), Eq. (3)
def _refit(self): # temporary variables for manipulation mu_diff = np.empty_like(self.f.components[0].mu) sigma = np.empty_like(self.f.components[0].sigma) mean = np.empty_like(mu_diff) cov = np.empty_like(sigma) for j, c in enumerate(self.g.components): # stop if inv_map is empty for j-th comp. if not self.inv_map[j]: self.g.weights[j] = 0. continue # (re-)initialize new mean/cov to zero mean[:] = 0.0 cov[:] = 0.0 # compute total weight and mean self.g.weights[j] = self.f.weights[self.inv_map[j]].sum() for i in self.inv_map[j]: mean += self.f.weights[i] * self.f.components[i].mu # rescale by total weight mean /= self.g.weights[j] # update covariance for i in self.inv_map[j]: # mu_diff = mu'_j - mu_i mu_diff[:] = mean mu_diff -= self.f.components[i].mu # sigma = (mu'_j - mu_i) (mu'_j - mu_i)^T sigma[:] = np.outer(mu_diff, mu_diff) # sigma += sigma_i sigma += self.f.components[i].sigma # multiply with alpha_i sigma *= self.f.weights[i] # sigma_j += alpha_i * (sigma_i + (mu'_j - mu_i) (mu'_j - mu_i)^T cov += sigma # 1 / beta_j cov /= self.g.weights[j] # update the Mixture c.update(mean, cov)
Update the map :math:`\pi` keeping the output :math:`g` fixed Use Eq. (7) and below in [GR04]_
def _regroup(self): # clean up old maps for j in range(self.nout): self.inv_map[j] = [] # find smallest divergence between input component i # and output component j of the cluster mixture density for i in range(self.nin): self.min_kl[i] = np.inf j_min = None for j in range(self.nout): kl = kullback_leibler(self.f.components[i], self.g.components[j]) if kl < self.min_kl[i]: self.min_kl[i] = kl j_min = j assert j_min is not None self.inv_map[j_min].append(i)
Update the output :math:`g` keeping the map :math:`\pi` fixed. Compute the KL between all input and output components.
def run(self, eps=1e-4, kill=True, max_steps=50, verbose=False): r old_distance = np.finfo(np.float64).max new_distance = np.finfo(np.float64).max if verbose: print('Starting hierarchical clustering with %d components.' % len(self.g.components)) converged = False for step in range(1, max_steps + 1): self._cleanup(kill, verbose) self._regroup() self._refit() new_distance = self._distance() assert new_distance >= 0, 'Found non-positive distance %d' % new_distance if verbose: print('Distance in step %d: %g' % (step, new_distance)) if new_distance == old_distance: converged = True if verbose: print('Exact minimum found after %d steps' % step) break rel_change = (old_distance - new_distance) / old_distance assert not (rel_change < -1e-13), 'distance increased' if rel_change < eps and not converged and step > 0: converged = True if verbose and new_distance != old_distance: print('Close enough to local minimum after %d steps' % step) break # save distance for comparison in next step old_distance = new_distance self._cleanup(kill, verbose) if verbose: print('%d components remain.' % len(self.g.components)) if converged: return step
r"""Perform the clustering on the input components updating the initial guess. The result is available in the member ``self.g``. Return the number of iterations at convergence, or None. :param eps: If relative change of distance between current and last step falls below ``eps``, declare convergence: .. math:: 0 < \frac{d^t - d^{t-1}}{d^t} < \varepsilon :param kill: If a component is assigned zero weight (no input components), it is removed. :param max_steps: Perform a maximum number of update steps. :param verbose: Output information on progress of algorithm.
def eventdata(payload): headerinfo, data = payload.split('\n', 1) headers = get_headers(headerinfo) return headers, data
Parse a Supervisor event.
def supervisor_events(stdin, stdout): while True: stdout.write('READY\n') stdout.flush() line = stdin.readline() headers = get_headers(line) payload = stdin.read(int(headers['len'])) event_headers, event_data = eventdata(payload) yield event_headers, event_data stdout.write('RESULT 2\nOK') stdout.flush()
An event stream from Supervisor.
def main(): env = os.environ try: host = env['SYSLOG_SERVER'] port = int(env['SYSLOG_PORT']) socktype = socket.SOCK_DGRAM if env['SYSLOG_PROTO'] == 'udp' \ else socket.SOCK_STREAM except KeyError: sys.exit("SYSLOG_SERVER, SYSLOG_PORT and SYSLOG_PROTO are required.") handler = SysLogHandler( address=(host, port), socktype=socktype, ) handler.setFormatter(PalletFormatter()) for event_headers, event_data in supervisor_events(sys.stdin, sys.stdout): event = logging.LogRecord( name=event_headers['processname'], level=logging.INFO, pathname=None, lineno=0, msg=event_data, args=(), exc_info=None, ) event.process = int(event_headers['pid']) handler.handle(event)
Main application loop.
def formatTime(self, record, datefmt=None): formatted = super(PalletFormatter, self).formatTime( record, datefmt=datefmt) return formatted + '.%03dZ' % record.msecs
Format time, including milliseconds.
def modified_lines_from_udiff(udiff): chunks = re.split('\n@@ [^\n]+\n', udiff)[1:] line_numbers = re.findall('@@\s[+-]\d+,\d+ \+(\d+)', udiff) line_numbers = list(map(int, line_numbers)) for c, start in zip(chunks, line_numbers): ilines = enumerate((line for line in c.splitlines() if not line.startswith('-')), start=start) added_lines = [i for i, line in ilines if line.startswith('+')] if added_lines: yield (added_lines[0], added_lines[-1])
Extract from a udiff an iterator of tuples of (start, end) line numbers.
def get_diff(original, fixed, file_name, original_label='original', fixed_label='fixed'): original, fixed = original.splitlines(True), fixed.splitlines(True) newline = '\n' from difflib import unified_diff diff = unified_diff(original, fixed, os.path.join(original_label, file_name), os.path.join(fixed_label, file_name), lineterm=newline) text = '' for line in diff: text += line # Work around missing newline (http://bugs.python.org/issue2142). if not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text
Return text of unified diff between original and fixed.
def print_diff(diff, color=True): import colorama if not diff: return if not color: colorama.init = lambda autoreset: None colorama.Fore.RED = '' colorama.Back.RED = '' colorama.Fore.GREEN = '' colorama.deinit = lambda: None colorama.init(autoreset=True) # TODO use context_manager for line in diff.splitlines(): if line.startswith('+') and not line.startswith('+++ '): # Note there shouldn't be trailing whitespace # but may be nice to generalise this print(colorama.Fore.GREEN + line) elif line.startswith('-') and not line.startswith('--- '): split_whitespace = re.split('(\s+)$', line) if len(split_whitespace) > 1: # claim it must be 3 line, trailing, _ = split_whitespace else: line, trailing = split_whitespace[0], '' print(colorama.Fore.RED + line, end='') # give trailing whitespace a RED background print(colorama.Back.RED + trailing) elif line == '\ No newline at end of file': # The assumption here is that there is now a new line... print(colorama.Fore.RED + line) else: print(line) colorama.deinit()
Pretty printing for a diff, if color then we use a simple color scheme (red for removed lines, green for added lines).
def _get_log_rho_metropolis_hastings(self, proposed_point, proposed_eval): return self._get_log_rho_metropolis(proposed_point, proposed_eval)\ - self.proposal.evaluate (proposed_point, self.current) \ + self.proposal.evaluate (self.current, proposed_point)
calculate log(metropolis ratio times hastings factor)
def _update_scale_factor(self, accept_rate): '''Private function. Updates the covariance scaling factor ``covar_scale_factor`` according to its limits ''' if accept_rate > self.force_acceptance_max and self.covar_scale_factor < self.covar_scale_factor_max: self.covar_scale_factor *= self.covar_scale_multiplier elif accept_rate < self.force_acceptance_min and self.covar_scale_factor > self.covar_scale_factor_min: self.covar_scale_factor /= self.covar_scale_multiplief _update_scale_factor(self, accept_rate): '''Private function. Updates the covariance scaling factor ``covar_scale_factor`` according to its limits ''' if accept_rate > self.force_acceptance_max and self.covar_scale_factor < self.covar_scale_factor_max: self.covar_scale_factor *= self.covar_scale_multiplier elif accept_rate < self.force_acceptance_min and self.covar_scale_factor > self.covar_scale_factor_min: self.covar_scale_factor /= self.covar_scale_multiplier
Private function. Updates the covariance scaling factor ``covar_scale_factor`` according to its limits
def get_thumbnail(self, original, size, crop, options): try: image = self.create(original, size, crop, options) except ThumbnailError: image = None finally: self.cleanup(original) return image
Wrapper for .create() with cleanup. :param original: :param size: :param crop: :param options: :return: An image object
def create(self, original, size, crop, options=None): if options is None: options = self.evaluate_options() image = self.engine_load_image(original) image = self.scale(image, size, crop, options) crop = self.parse_crop(crop, self.get_image_size(image), size) image = self.crop(image, size, crop, options) image = self.colormode(image, options) return image
Creates a thumbnail. It loads the image, scales it and crops it. :param original: :param size: :param crop: :param options: :return:
def scale(self, image, size, crop, options): original_size = self.get_image_size(image) factor = self._calculate_scaling_factor(original_size, size, crop is not None) if factor < 1 or options['scale_up']: width = int(original_size[0] * factor) height = int(original_size[1] * factor) image = self.engine_scale(image, width, height) return image
Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up option is set to True before calling ``engine_scale``. :param image: :param size: :param crop: :param options: :return:
def crop(self, image, size, crop, options): if not crop: return image return self.engine_crop(image, size, crop, options)
Wrapper for ``engine_crop``, will return without calling ``engine_crop`` if crop is None. :param image: :param size: :param crop: :param options: :return:
def colormode(self, image, options): mode = options['colormode'] return self.engine_colormode(image, mode)
Wrapper for ``engine_colormode``. :param image: :param options: :return:
def parse_size(size): if size.startswith('x'): return None, int(size.replace('x', '')) if 'x' in size: return int(size.split('x')[0]), int(size.split('x')[1]) return int(size), None
Parses size string into a tuple :param size: String on the form '100', 'x100 or '100x200' :return: Tuple of two integers for width and height :rtype: tuple
def parse_crop(self, crop, original_size, size): if crop is None: return None crop = crop.split(' ') if len(crop) == 1: crop = crop[0] x_crop = 50 y_crop = 50 if crop in CROP_ALIASES['x']: x_crop = CROP_ALIASES['x'][crop] elif crop in CROP_ALIASES['y']: y_crop = CROP_ALIASES['y'][crop] x_offset = self.calculate_offset(x_crop, original_size[0], size[0]) y_offset = self.calculate_offset(y_crop, original_size[1], size[1]) return int(x_offset), int(y_offset)
Parses crop into a tuple usable by the crop function. :param crop: String with the crop settings. :param original_size: A tuple of size of the image that should be cropped. :param size: A tuple of the wanted size. :return: Tuple of two integers with crop settings :rtype: tuple
def calculate_offset(percent, original_length, length): return int( max( 0, min(percent * original_length / 100.0, original_length - length / 2) - length / 2) )
Calculates crop offset based on percentage. :param percent: A percentage representing the size of the offset. :param original_length: The length the distance that should be cropped. :param length: The desired length. :return: The offset in pixels :rtype: int
def get_app_template_dir(app_name): if app_name in _cache: return _cache[app_name] template_dir = None for app in settings.INSTALLED_APPS: if app.split('.')[-1] == app_name: # Do not hide import errors; these should never happen at this # point anyway mod = import_module(app) template_dir = join(abspath(dirname(mod.__file__)), 'templates') break _cache[app_name] = template_dir return template_dir
Get the template directory for an application We do not use django.db.models.get_app, because this will fail if an app does not have any models. Returns a full path, or None if the app was not found.
def get_template_sources(self, template_name, template_dirs=None): if ':' not in template_name: return [] app_name, template_name = template_name.split(":", 1) template_dir = get_app_template_dir(app_name) if template_dir: return [get_template_path(template_dir, template_name, self)] return []
Return the absolute paths to "template_name" in the specified app If the name does not contain an app name (no colon), an empty list is returned. The parent FilesystemLoader.load_template_source() will take care of the actual loading for us.
def parse_arguments(): parser = argparse.ArgumentParser(prog=sys.argv[0], description='Send Webhooks Channel events to IFTTT', epilog='Visit https://ifttt.com/channels/maker_webhooks for more information') parser.add_argument('--version', action='version', version=pyfttt.__version__) sgroup = parser.add_argument_group(title='sending events') sgroup.add_argument('-k', '--key', metavar='K', default=os.environ.get('IFTTT_API_KEY'), help='IFTTT secret key') sgroup.add_argument('-e', '--event', metavar='E', required=True, help='The name of the event to trigger') sgroup.add_argument('value1', nargs='?', help='Extra data sent with the event (optional)') sgroup.add_argument('value2', nargs='?', help='Extra data sent with the event (optional)') sgroup.add_argument('value3', nargs='?', help='Extra data sent with the event (optional)') return parser.parse_args()
Parse command line arguments
def main(): args = parse_arguments() if args.key is None: print("Error: Must provide IFTTT secret key.") sys.exit(1) try: res = pyfttt.send_event(api_key=args.key, event=args.event, value1=args.value1, value2=args.value2, value3=args.value3) except requests.exceptions.ConnectionError: print("Error: Could not connect to IFTTT") sys.exit(2) except requests.exceptions.HTTPError: print("Error: Received invalid response") sys.exit(3) except requests.exceptions.Timeout: print("Error: Request timed out") sys.exit(4) except requests.exceptions.TooManyRedirects: print("Error: Too many redirects") sys.exit(5) except requests.exceptions.RequestException as reqe: print("Error: {e}".format(e=reqe)) sys.exit(6) if res.status_code != requests.codes.ok: try: j = res.json() except ValueError: print('Error: Could not parse server response. Event not sent') sys.exit(7) for err in j['errors']: print('Error: {}'.format(err['message'])) sys.exit(8)
Main function for pyfttt command line tool
def import_string(dotted_path): try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImportError('%s doesn\'t look like a valid path' % dotted_path) module = __import__(module_path, fromlist=[class_name]) try: return getattr(module, class_name) except AttributeError: msg = 'Module "%s" does not define a "%s" attribute/class' % ( dotted_path, class_name) raise ImportError(msg)
Import a dotted module path. Returns the attribute/class designated by the last name in the path. Raises ImportError if the import fails.
def argsort_indices(a, axis=-1): a = np.asarray(a) ind = list(np.ix_(*[np.arange(d) for d in a.shape])) ind[axis] = a.argsort(axis) return tuple(ind)
Like argsort, but returns an index suitable for sorting the the original array even if that array is multidimensional
def send_event(api_key, event, value1=None, value2=None, value3=None): url = 'https://maker.ifttt.com/trigger/{e}/with/key/{k}/'.format(e=event, k=api_key) payload = {'value1': value1, 'value2': value2, 'value3': value3} return requests.post(url, data=payload)
Send an event to the IFTTT maker channel Parameters: ----------- api_key : string Your IFTTT API key event : string The name of the IFTTT event to trigger value1 : Optional: Extra data sent with the event (default: None) value2 : Optional: Extra data sent with the event (default: None) value3 : Optional: Extra data sent with the event (default: None)
def get_localized_docstring(obj, domain): if obj.__class__.__doc__ is not None: return inspect.cleandoc( gettext.dgettext(domain, obj.__class__.__doc__))
Get a cleaned-up, localized copy of docstring of this class.
def get_cmd_help(self): try: return self.help except AttributeError: pass try: return get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[0].rstrip('.').lower() except (AttributeError, IndexError, ValueError): pass
Get the single-line help of this command. :returns: ``self.help``, if defined :returns: The first line of the docstring, without the trailing dot, if present. :returns: None, otherwise
def get_cmd_description(self): try: return self.description except AttributeError: pass try: return '\n'.join( get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[1:] ).split('@EPILOG@', 1)[0].strip() except (AttributeError, IndexError, ValueError): pass
Get the leading, multi-line description of this command. :returns: ``self.description``, if defined :returns: A substring of the class docstring between the first line (which is discarded) and the string ``@EPILOG@``, if present, or the end of the docstring, if any :returns: None, otherwise The description string will be displayed after the usage string but before any of the detailed argument descriptions. Please consider following good practice by keeping the description line short enough not to require scrolling but useful enough to provide additional information that cannot be inferred from the name of the command or other arguments. Stating the purpose of the command is highly recommended.
def get_cmd_epilog(self): try: return self.source.epilog except AttributeError: pass try: return '\n'.join( get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[1:] ).split('@EPILOG@', 1)[1].strip() except (AttributeError, IndexError, ValueError): pass
Get the trailing, multi-line description of this command. :returns: ``self.epilog``, if defined :returns: A substring of the class docstring between the string ``@EPILOG`` and the end of the docstring, if defined :returns: None, otherwise The epilog is similar to the description string but it is instead printed after the section containing detailed descriptions of all of the command line arguments. Please consider following good practice by providing additional details about how the command can be used, perhaps an example or a reference to means of finding additional documentation.
def main(self, argv=None, exit=True): return CommandRecipe(self).main(argv, exit)
Shortcut for running a command. See :meth:`guacamole.recipes.Recipe.main()` for details.
def get_ingredients(self): return [ cmdtree.CommandTreeBuilder(self.command), cmdtree.CommandTreeDispatcher(), argparse.AutocompleteIngredient(), argparse.ParserIngredient(), crash.VerboseCrashHandler(), ansi.ANSIIngredient(), log.Logging(), ]
Get a list of ingredients for guacamole.
def register_arguments(self, parser): parser.add_argument('x', type=int, help='the first value') parser.add_argument('y', type=int, help='the second value')
Guacamole method used by the argparse ingredient. :param parser: Argument parser (from :mod:`argparse`) specific to this command.
def invoked(self, ctx): print("{} + {} = {}".format( ctx.args.x, ctx.args.y, ctx.args.x + ctx.args.y))
Guacamole method used by the command ingredient. :param ctx: The guacamole context object. Context provides access to all features of guacamole. The argparse ingredient adds the ``args`` attribute to it. That attribute contains the result of parsing command line arguments. :returns: The return code of the command. Guacamole translates ``None`` to a successful exit status (return code zero).
def hsv(h, s, v): if 360 < h < 0: raise ValueError("h out of range: {}".format(h)) if 1 < s < 0: raise ValueError("s out of range: {}".format(h)) if 1 < v < 0: raise ValueError("v out of range: {}".format(h)) c = v * s # chroma h1 = h / 60 x = c * (1 - abs(h1 % 2 - 1)) if 0 <= h1 < 1: r1, g1, b1 = (c, x, 0) elif 1 <= h1 < 2: r1, g1, b1 = (x, c, 0) elif 2 <= h1 < 3: r1, g1, b1 = (0, c, x) elif 3 <= h1 < 4: r1, g1, b1 = (0, x, c) elif 4 <= h1 < 5: r1, g1, b1 = (x, 0, c) elif 5 <= h1 < 6: r1, g1, b1 = (c, 0, x) m = v - c r, g, b = r1 + m, g1 + m, b1 + m return int(r * 255), int(g * 255), int(b * 255)
Convert HSV (hue, saturation, value) to RGB.
def invoked(self, ctx): if not ctx.ansi.is_enabled: print("You need color support to use this demo") else: print(ctx.ansi.cmd('erase_display')) self._demo_fg_color(ctx) self._demo_bg_color(ctx) self._demo_bg_indexed(ctx) self._demo_rgb(ctx) self._demo_style(ctx)
Method called when the command is invoked.
def get(self, thumbnail_name): if isinstance(thumbnail_name, list): thumbnail_name = '/'.join(thumbnail_name) return self._get(thumbnail_name)
Wrapper for ``_get``, which converts the thumbnail_name to String if necessary before calling ``_get`` :rtype: Thumbnail