text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Solves a let-form by calling RHS with nested scope.
<END_TASK>
<USER_TASK:>
Description:
def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope.""" |
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) |
<SYSTEM_TASK:>
Filter values on the LHS by evaluating RHS with each value.
<END_TASK>
<USER_TASK:>
Description:
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
""" |
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ()) |
<SYSTEM_TASK:>
Sort values on the LHS by the value they yield when passed to RHS.
<END_TASK>
<USER_TASK:>
Description:
def solve_sort(expr, vars):
"""Sort values on the LHS by the value they yield when passed to RHS.""" |
lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0])
sort_expression = expr.rhs
def _key_func(x):
return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value
results = ordered.ordered(lhs_values, key_func=_key_func)
return Result(repeated.meld(*results), ()) |
<SYSTEM_TASK:>
Return True if RHS evaluates to a true value with each state of LHS.
<END_TASK>
<USER_TASK:>
Description:
def solve_each(expr, vars):
"""Return True if RHS evaluates to a true value with each state of LHS.
If LHS evaluates to a normal IAssociative object then this is the same as
a regular let-form, except the return value is always a boolean. If LHS
evaluates to a repeared var (see efilter.protocols.repeated) of
IAssociative objects then RHS will be evaluated with each state and True
will be returned only if each result is true.
""" |
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
for lhs_value in repeated.getvalues(lhs_values):
result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
if not result.value:
# Each is required to return an actual boolean.
return result._replace(value=False)
return Result(True, ()) |
<SYSTEM_TASK:>
Typecheck whether LHS is type on the RHS.
<END_TASK>
<USER_TASK:>
Description:
def solve_isinstance(expr, vars):
"""Typecheck whether LHS is type on the RHS.""" |
lhs = solve(expr.lhs, vars)
try:
t = solve(expr.rhs, vars).value
except errors.EfilterKeyError:
t = None
if t is None:
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="Cannot find type named %r." % expr.rhs.value)
if not isinstance(t, type):
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="%r is not a type and cannot be used with 'isa'." % (t,))
return Result(protocol.implements(lhs.value, t), ()) |
<SYSTEM_TASK:>
mod_root
<END_TASK>
<USER_TASK:>
Description:
def set_version(mod_root):
"""
mod_root
a VERSION file containes the version strings is created in mod_root,
during installation. That file is used at runtime to get the version
information.
""" |
try:
version_base = None
version_detail = None
# get version from './VERSION'
src_root = os.path.dirname(__file__)
if not src_root:
src_root = '.'
with open(src_root + '/VERSION', 'r') as f:
version_base = f.readline().strip()
# attempt to get version detail information from git
# We only do that though if we are in a repo root dir,
# ie. if 'git rev-parse --show-prefix' returns an empty string --
# otherwise we get confused if the ve lives beneath another repository,
# and the pip version used uses an install tmp dir in the ve space
# instead of /tmp (which seems to happen with some pip/setuptools
# versions).
p = sp.Popen('cd %s ; '
'test -z `git rev-parse --show-prefix` || exit -1; '
'tag=`git describe --tags --always` 2>/dev/null ; '
'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; '
'echo $tag@$branch' % src_root,
stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
version_detail = str(p.communicate()[0].strip())
version_detail = version_detail.replace('detached from ', 'detached-')
# remove all non-alphanumeric (and then some) chars
version_detail = re.sub('[/ ]+', '-', version_detail)
version_detail = re.sub('[^[email protected]]+', '', version_detail)
if p.returncode != 0 or \
version_detail == '@' or \
'git-error' in version_detail or \
'not-a-git-repo' in version_detail or \
'not-found' in version_detail or \
'fatal' in version_detail :
version = version_base
elif '@' not in version_base:
version = '%s-%s' % (version_base, version_detail)
else:
version = version_base
# make sure the version files exist for the runtime version inspection
path = '%s/%s' % (src_root, mod_root)
with open(path + "/VERSION", "w") as f:
f.write(version + "\n")
sdist_name = "%s-%s.tar.gz" % (name, version)
sdist_name = sdist_name.replace('/', '-')
sdist_name = sdist_name.replace('@', '-')
sdist_name = sdist_name.replace('#', '-')
sdist_name = sdist_name.replace('_', '-')
if '--record' in sys.argv or \
'bdist_egg' in sys.argv or \
'bdist_wheel' in sys.argv :
# pip install stage 2 or easy_install stage 1
#
# pip install will untar the sdist in a tmp tree. In that tmp
# tree, we won't be able to derive git version tags -- so we pack the
# formerly derived version as ./VERSION
shutil.move("VERSION", "VERSION.bak") # backup version
shutil.copy("%s/VERSION" % path, "VERSION") # use full version instead
os.system ("python setup.py sdist") # build sdist
shutil.copy('dist/%s' % sdist_name,
'%s/%s' % (mod_root, sdist_name)) # copy into tree
shutil.move("VERSION.bak", "VERSION") # restore version
with open(path + "/SDIST", "w") as f:
f.write(sdist_name + "\n")
return version_base, version_detail, sdist_name
except Exception as e :
raise RuntimeError('Could not extract/set version: %s' % e) |
<SYSTEM_TASK:>
Whether name should be installed
<END_TASK>
<USER_TASK:>
Description:
def isgood(name):
""" Whether name should be installed """ |
if not isbad(name):
if name.endswith('.py') or name.endswith('.json') or name.endswith('.tar'):
return True
return False |
<SYSTEM_TASK:>
Return the repeated value, or the first value if there's only one.
<END_TASK>
<USER_TASK:>
Description:
def meld(*values):
"""Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None
""" |
values = [x for x in values if x is not None]
if not values:
return None
result = repeated(*values)
if isrepeating(result):
return result
return getvalue(result) |
<SYSTEM_TASK:>
Return the single value of x or raise TypError if more than one value.
<END_TASK>
<USER_TASK:>
Description:
def getvalue(x):
"""Return the single value of x or raise TypError if more than one value.""" |
if isrepeating(x):
raise TypeError(
"Ambiguous call to getvalue for %r which has more than one value."
% x)
for value in getvalues(x):
return value |
<SYSTEM_TASK:>
Convert current Task into a dictionary
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""
Convert current Task into a dictionary
:return: python dictionary
""" |
task_desc_as_dict = {
'uid': self._uid,
'name': self._name,
'state': self._state,
'state_history': self._state_history,
'pre_exec': self._pre_exec,
'executable': self._executable,
'arguments': self._arguments,
'post_exec': self._post_exec,
'cpu_reqs': self._cpu_reqs,
'gpu_reqs': self._gpu_reqs,
'lfs_per_process': self._lfs_per_process,
'upload_input_data': self._upload_input_data,
'copy_input_data': self._copy_input_data,
'link_input_data': self._link_input_data,
'move_input_data': self._move_input_data,
'copy_output_data': self._copy_output_data,
'move_output_data': self._move_output_data,
'download_output_data': self._download_output_data,
'stdout': self._stdout,
'stderr': self._stderr,
'exit_code': self._exit_code,
'path': self._path,
'tag': self._tag,
'parent_stage': self._p_stage,
'parent_pipeline': self._p_pipeline,
}
return task_desc_as_dict |
<SYSTEM_TASK:>
Match a case-insensitive keyword consisting of multiple tokens.
<END_TASK>
<USER_TASK:>
Description:
def multi_keyword(tokens, keyword_parts):
"""Match a case-insensitive keyword consisting of multiple tokens.""" |
tokens = iter(tokens)
matched_tokens = []
limit = len(keyword_parts)
for idx in six.moves.range(limit):
try:
token = next(tokens)
except StopIteration:
return
if (not token or token.name != "symbol" or
token.value.lower() != keyword_parts[idx]):
return
matched_tokens.append(token)
return TokenMatch(None, token.value, matched_tokens) |
<SYSTEM_TASK:>
Match a suffix of an operator.
<END_TASK>
<USER_TASK:>
Description:
def suffix(tokens, operator_table):
"""Match a suffix of an operator.""" |
operator, matched_tokens = operator_table.suffix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens) |
<SYSTEM_TASK:>
An expression is an atom or an infix expression.
<END_TASK>
<USER_TASK:>
Description:
def expression(self, previous_precedence=0):
"""An expression is an atom or an infix expression.
Grammar (sort of, actually a precedence-climbing parser):
expression = atom [ binary_operator expression ] .
Args:
previous_precedence: What operator precedence should we start with?
""" |
lhs = self.atom()
return self.operator(lhs, previous_precedence) |
<SYSTEM_TASK:>
Accept the next binary operator only if it's of higher precedence.
<END_TASK>
<USER_TASK:>
Description:
def accept_operator(self, precedence):
"""Accept the next binary operator only if it's of higher precedence.""" |
match = grammar.infix(self.tokens)
if not match:
return
if match.operator.precedence < precedence:
return
# The next thing is an operator that we want. Now match it for real.
return self.tokens.accept(grammar.infix) |
<SYSTEM_TASK:>
Climb operator precedence as long as there are operators.
<END_TASK>
<USER_TASK:>
Description:
def operator(self, lhs, min_precedence):
"""Climb operator precedence as long as there are operators.
This function implements a basic precedence climbing parser to deal
with binary operators in a sane fashion. The outer loop will keep
spinning as long as the next token is an operator with a precedence
of at least 'min_precedence', parsing operands as atoms (which,
in turn, recurse into 'expression' which recurses back into 'operator').
This supports both left- and right-associativity. The only part of the
code that's not a regular precedence-climber deals with mixfix
operators. A mixfix operator in DottySQL consists of an infix part
and a suffix (they are still binary, they just have a terminator).
""" |
# Spin as long as the next token is an operator of higher
# precedence. (This may not do anything, which is fine.)
while self.accept_operator(precedence=min_precedence):
operator = self.tokens.matched.operator
# If we're parsing a mixfix operator we can keep going until
# the suffix.
if operator.suffix:
rhs = self.expression()
self.tokens.expect(common_grammar.match_tokens(operator.suffix))
rhs.end = self.tokens.matched.end
elif operator.name == ".":
# The dot operator changes the meaning of RHS.
rhs = self.dot_rhs()
else:
# The right hand side is an atom, which might turn out to be
# an expression. Isn't recursion exciting?
rhs = self.atom()
# Keep going as long as the next token is an infix operator of
# higher precedence.
next_min_precedence = operator.precedence
if operator.assoc == "left":
next_min_precedence += 1
while self.tokens.match(grammar.infix):
if (self.tokens.matched.operator.precedence
< next_min_precedence):
break
rhs = self.operator(rhs,
self.tokens.matched.operator.precedence)
lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end,
source=self.original)
return lhs |
<SYSTEM_TASK:>
First part of an SQL query.
<END_TASK>
<USER_TASK:>
Description:
def select(self):
"""First part of an SQL query.""" |
# Try to match the asterisk, any or list of vars.
if self.tokens.accept(grammar.select_any):
return self.select_any()
if self.tokens.accept(grammar.select_all):
# The FROM after SELECT * is required.
self.tokens.expect(grammar.select_from)
return self.select_from()
return self.select_what() |
<SYSTEM_TASK:>
Tries to guess what variable name 'expr' ends in.
<END_TASK>
<USER_TASK:>
Description:
def _guess_name_of(self, expr):
"""Tries to guess what variable name 'expr' ends in.
This is a heuristic that roughly emulates what most SQL databases
name columns, based on selected variable names or applied functions.
""" |
if isinstance(expr, ast.Var):
return expr.value
if isinstance(expr, ast.Resolve):
# We know the RHS of resolve is a Literal because that's what
# Parser.dot_rhs does.
return expr.rhs.value
if isinstance(expr, ast.Select) and isinstance(expr.rhs, ast.Literal):
name = self._guess_name_of(expr.lhs)
if name is not None:
return "%s_%s" % (name, expr.rhs.value)
if isinstance(expr, ast.Apply) and isinstance(expr.func, ast.Var):
return expr.func.value |
<SYSTEM_TASK:>
Parse the function application subgrammar.
<END_TASK>
<USER_TASK:>
Description:
def application(self, func):
"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
""" |
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rparen):
# That was easy.
return ast.Apply(func, start=start, end=self.tokens.matched.end,
source=self.original)
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
return ast.Apply(func, *arguments, start=start,
end=self.tokens.matched.end, source=self.original) |
<SYSTEM_TASK:>
If the row only has one column, return that value; otherwise raise.
<END_TASK>
<USER_TASK:>
Description:
def get_singleton(self):
"""If the row only has one column, return that value; otherwise raise.
Raises:
ValueError, if count of columns is not 1.
""" |
only_value = None
for value in six.itervalues(self.ordered_dict):
# This loop will raise if it runs more than once.
if only_value is not None:
raise ValueError("%r is not a singleton." % self)
only_value = value
if only_value is self.__UnsetSentinel or only_value is None:
raise ValueError("%r is empty." % self)
return only_value |
<SYSTEM_TASK:>
Does the object 'obj' implement the 'prococol'?
<END_TASK>
<USER_TASK:>
Description:
def implements(obj, protocol):
"""Does the object 'obj' implement the 'prococol'?""" |
if isinstance(obj, type):
raise TypeError("First argument to implements must be an instance. "
"Got %r." % obj)
return isinstance(obj, protocol) or issubclass(AnyType, protocol) |
<SYSTEM_TASK:>
Does the type 'cls' participate in the 'protocol'?
<END_TASK>
<USER_TASK:>
Description:
def isa(cls, protocol):
"""Does the type 'cls' participate in the 'protocol'?""" |
if not isinstance(cls, type):
raise TypeError("First argument to isa must be a type. Got %s." %
repr(cls))
if not isinstance(protocol, type):
raise TypeError(("Second argument to isa must be a type or a Protocol. "
"Got an instance of %r.") % type(protocol))
return issubclass(cls, protocol) or issubclass(AnyType, protocol) |
<SYSTEM_TASK:>
Assert that protocol 'cls' is implemented for type 'for_type'.
<END_TASK>
<USER_TASK:>
Description:
def implemented(cls, for_type):
"""Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions.
""" |
for function in cls.required():
if not function.implemented_for_type(for_type):
raise TypeError(
"%r doesn't implement %r so it cannot participate in "
"the protocol %r." %
(for_type, function.func.__name__, cls))
cls.register(for_type) |
<SYSTEM_TASK:>
Automatically generate implementations for a type.
<END_TASK>
<USER_TASK:>
Description:
def implicit_static(cls, for_type=None, for_types=None):
"""Automatically generate implementations for a type.
Implement the protocol for the 'for_type' type by dispatching each
member function of the protocol to an instance method of the same name
declared on the type 'for_type'.
Arguments:
for_type: The type to implictly implement the protocol with.
Raises:
TypeError if not all implementations are provided by 'for_type'.
""" |
for type_ in cls.__get_type_args(for_type, for_types):
implementations = {}
for function in cls.required():
method = getattr(type_, function.__name__, None)
if not callable(method):
raise TypeError(
"%s.implicit invokation on type %r is missing instance "
"method %r."
% (cls.__name__, type_, function.__name__))
implementations[function] = method
for function in cls.optional():
method = getattr(type_, function.__name__, None)
if callable(method):
implementations[function] = method
return cls.implement(for_type=type_,
implementations=implementations) |
<SYSTEM_TASK:>
Return a function that calls method 'func_name' on objects.
<END_TASK>
<USER_TASK:>
Description:
def _build_late_dispatcher(func_name):
"""Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
""" |
def _late_dynamic_dispatcher(obj, *args):
method = getattr(obj, func_name, None)
if not callable(method):
raise NotImplementedError(
"Instance method %r is not implemented by %r." % (
func_name, obj))
return method(*args)
return _late_dynamic_dispatcher |
<SYSTEM_TASK:>
Automatically generate late dynamic dispatchers to type.
<END_TASK>
<USER_TASK:>
Description:
def implicit_dynamic(cls, for_type=None, for_types=None):
"""Automatically generate late dynamic dispatchers to type.
This is similar to 'implicit_static', except instead of binding the
instance methods, it generates a dispatcher that will call whatever
instance method of the same name happens to be available at time of
dispatch.
This has the obvious advantage of supporting arbitrary subclasses, but
can do no verification at bind time.
Arguments:
for_type: The type to implictly implement the protocol with.
""" |
for type_ in cls.__get_type_args(for_type, for_types):
implementations = {}
for function in cls.functions():
implementations[function] = cls._build_late_dispatcher(
func_name=function.__name__)
cls.implement(for_type=type_, implementations=implementations) |
<SYSTEM_TASK:>
Provide protocol implementation for a type.
<END_TASK>
<USER_TASK:>
Description:
def implement(cls, implementations, for_type=None, for_types=None):
"""Provide protocol implementation for a type.
Register all implementations of multimethod functions in this
protocol and add the type into the abstract base class of the
protocol.
Arguments:
implementations: A dict of (function, implementation), where each
function is multimethod and each implementation is a callable.
for_type: The concrete type implementations apply to.
for_types: Same as for_type, but takes a tuple of types.
You may not supply both for_type and for_types for obvious reasons.
Raises:
ValueError for arguments.
TypeError if not all implementations are provided or if there
are issues related to polymorphism (e.g. attempting to
implement a non-multimethod function.
""" |
for type_ in cls.__get_type_args(for_type, for_types):
cls._implement_for_type(for_type=type_,
implementations=implementations) |
<SYSTEM_TASK:>
Parse one of the rules as either objectfilter or dottysql.
<END_TASK>
<USER_TASK:>
Description:
def _parse_query(self, source):
"""Parse one of the rules as either objectfilter or dottysql.
Example:
_parse_query("5 + 5")
# Returns Sum(Literal(5), Literal(5))
Arguments:
source: A rule in either objectfilter or dottysql syntax.
Returns:
The AST to represent the rule.
""" |
if self.OBJECTFILTER_WORDS.search(source):
syntax_ = "objectfilter"
else:
syntax_ = None # Default it is.
return query.Query(source, syntax=syntax_) |
<SYSTEM_TASK:>
Parse the tagfile and yield tuples of tag_name, list of rule ASTs.
<END_TASK>
<USER_TASK:>
Description:
def _parse_tagfile(self):
"""Parse the tagfile and yield tuples of tag_name, list of rule ASTs.""" |
rules = None
tag = None
for line in self.original:
match = self.TAG_DECL_LINE.match(line)
if match:
if tag and rules:
yield tag, rules
rules = []
tag = match.group(1)
continue
match = self.TAG_RULE_LINE.match(line)
if match:
source = match.group(1)
rules.append(self._parse_query(source)) |
<SYSTEM_TASK:>
Normalize both sides, but don't eliminate the expression.
<END_TASK>
<USER_TASK:>
Description:
def normalize(expr):
"""Normalize both sides, but don't eliminate the expression.""" |
lhs = normalize(expr.lhs)
rhs = normalize(expr.rhs)
return type(expr)(lhs, rhs, start=lhs.start, end=rhs.end) |
<SYSTEM_TASK:>
Pass through n-ary expressions, and eliminate empty branches.
<END_TASK>
<USER_TASK:>
Description:
def normalize(expr):
"""Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True
""" |
children = []
for child in expr.children:
branch = normalize(child)
if branch is None:
continue
if type(branch) is type(expr):
children.extend(branch.children)
else:
children.append(branch)
if len(children) == 0:
return None
if len(children) == 1:
return children[0]
return type(expr)(*children, start=children[0].start,
end=children[-1].end) |
<SYSTEM_TASK:>
Returns a set of the metric slugs for the given category
<END_TASK>
<USER_TASK:>
Description:
def _category_slugs(self, category):
"""Returns a set of the metric slugs for the given category""" |
key = self._category_key(category)
slugs = self.r.smembers(key)
return slugs |
<SYSTEM_TASK:>
Returns a generator of all possible granularities based on the
<END_TASK>
<USER_TASK:>
Description:
def _granularities(self):
"""Returns a generator of all possible granularities based on the
MIN_GRANULARITY and MAX_GRANULARITY settings.
""" |
keep = False
for g in GRANULARITIES:
if g == app_settings.MIN_GRANULARITY and not keep:
keep = True
elif g == app_settings.MAX_GRANULARITY and keep:
keep = False
yield g
if keep:
yield g |
<SYSTEM_TASK:>
Builds an OrderedDict of metric keys and patterns for the given slug
<END_TASK>
<USER_TASK:>
Description:
def _build_key_patterns(self, slug, date):
"""Builds an OrderedDict of metric keys and patterns for the given slug
and date.""" |
# we want to keep the order, from smallest to largest granularity
patts = OrderedDict()
metric_key_patterns = self._metric_key_patterns()
for g in self._granularities():
date_string = date.strftime(metric_key_patterns[g]["date_format"])
patts[g] = metric_key_patterns[g]["key"].format(slug, date_string)
return patts |
<SYSTEM_TASK:>
Builds redis keys used to store metrics.
<END_TASK>
<USER_TASK:>
Description:
def _build_keys(self, slug, date=None, granularity='all'):
"""Builds redis keys used to store metrics.
* ``slug`` -- a slug used for a metric, e.g. "user-signups"
* ``date`` -- (optional) A ``datetime.datetime`` object used to
generate the time period for the metric. If omitted, the current date
and time (in UTC) will be used.
* ``granularity`` -- Must be one of: "all" (default), "yearly",
"monthly", "weekly", "daily", "hourly", "minutes", or "seconds".
Returns a list of strings.
""" |
slug = slugify(slug) # Ensure slugs have a consistent format
if date is None:
date = datetime.utcnow()
patts = self._build_key_patterns(slug, date)
if granularity == "all":
return list(patts.values())
return [patts[granularity]] |
<SYSTEM_TASK:>
Removes all keys for the given ``slug``.
<END_TASK>
<USER_TASK:>
Description:
def delete_metric(self, slug):
"""Removes all keys for the given ``slug``.""" |
# To remove all keys for a slug, I need to retrieve them all from
# the set of metric keys, This uses the redis "keys" command, which is
# inefficient, but this shouldn't be used all that often.
prefix = "m:{0}:*".format(slug)
keys = self.r.keys(prefix)
self.r.delete(*keys) # Remove the metric data
# Finally, remove the slug from the set
self.r.srem(self._metric_slugs_key, slug) |
<SYSTEM_TASK:>
Records a metric, creating it if it doesn't exist or incrementing it
<END_TASK>
<USER_TASK:>
Description:
def metric(self, slug, num=1, category=None, expire=None, date=None):
"""Records a metric, creating it if it doesn't exist or incrementing it
if it does. All metrics are prefixed with 'm', and automatically
aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``num`` -- Set or Increment the metric by this number; default is 1.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
""" |
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
if category:
self._categorize(slug, category)
# Increment keys. NOTE: current redis-py (2.7.2) doesn't include an
# incrby method; .incr accepts a second ``amount`` parameter.
keys = self._build_keys(slug, date=date)
# Use a pipeline to speed up incrementing multiple keys
pipe = self.r.pipeline()
for key in keys:
pipe.incr(key, num)
if expire:
pipe.expire(key, expire)
pipe.execute() |
<SYSTEM_TASK:>
Get the current values for a metric.
<END_TASK>
<USER_TASK:>
Description:
def get_metric(self, slug):
"""Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
""" |
results = OrderedDict()
granularities = self._granularities()
keys = self._build_keys(slug)
for granularity, key in zip(granularities, keys):
results[granularity] = self.r.get(key)
return results |
<SYSTEM_TASK:>
Get the metrics for multiple slugs.
<END_TASK>
<USER_TASK:>
Description:
def get_metrics(self, slug_list):
"""Get the metrics for multiple slugs.
Returns a list of two-tuples containing the metric slug and a
dictionary like the one returned by ``get_metric``::
(
some-metric, {
'seconds': 0, 'minutes': 0, 'hours': 0,
'day': 0, 'week': 0, 'month': 0, 'year': 0
}
)
""" |
# meh. I should have been consistent here, but I'm lazy, so support these
# value names instead of granularity names, but respect the min/max
# granularity settings.
keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in self._granularities()]
results = []
for slug in slug_list:
metrics = self.r.mget(*self._build_keys(slug))
if any(metrics): # Only if we have data.
results.append((slug, dict(zip(keys, metrics))))
return results |
<SYSTEM_TASK:>
Get metrics belonging to the given category
<END_TASK>
<USER_TASK:>
Description:
def get_category_metrics(self, category):
"""Get metrics belonging to the given category""" |
slug_list = self._category_slugs(category)
return self.get_metrics(slug_list) |
<SYSTEM_TASK:>
Removes the category from Redis. This doesn't touch the metrics;
<END_TASK>
<USER_TASK:>
Description:
def delete_category(self, category):
"""Removes the category from Redis. This doesn't touch the metrics;
they simply become uncategorized.""" |
# Remove mapping of metrics-to-category
category_key = self._category_key(category)
self.r.delete(category_key)
# Remove category from Set
self.r.srem(self._categories_key, category) |
<SYSTEM_TASK:>
Get history for one or more metrics.
<END_TASK>
<USER_TASK:>
Description:
def get_metric_history(self, slugs, since=None, to=None, granularity='daily'):
"""Get history for one or more metrics.
* ``slugs`` -- a slug OR a list of slugs
* ``since`` -- the date from which we start pulling metrics
* ``to`` -- the date until which we start pulling metrics
* ``granularity`` -- seconds, minutes, hourly,
daily, weekly, monthly, yearly
Returns a list of tuples containing the Redis key and the associated
metric::
r = R()
r.get_metric_history('test', granularity='weekly')
[
('m:test:w:2012-52', '15'),
]
To get history for multiple metrics, just provide a list of slugs::
metrics = ['test', 'other']
r.get_metric_history(metrics, granularity='weekly')
[
('m:test:w:2012-52', '15'),
('m:other:w:2012-52', '42'),
]
""" |
if not type(slugs) == list:
slugs = [slugs]
# Build the set of Redis keys that we need to get.
keys = []
for slug in slugs:
for date in self._date_range(granularity, since, to):
keys += self._build_keys(slug, date, granularity)
keys = list(dedupe(keys))
# Fetch our data, replacing any None-values with zeros
results = [0 if v is None else v for v in self.r.mget(keys)]
results = zip(keys, results)
return sorted(results, key=lambda t: t[0]) |
<SYSTEM_TASK:>
Set the value for a Gauge.
<END_TASK>
<USER_TASK:>
Description:
def gauge(self, slug, current_value):
"""Set the value for a Gauge.
* ``slug`` -- the unique identifier (or key) for the Gauge
* ``current_value`` -- the value that the gauge should display
""" |
k = self._gauge_key(slug)
self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges
self.r.set(k, current_value) |
<SYSTEM_TASK:>
Removes all gauges with the given ``slug``.
<END_TASK>
<USER_TASK:>
Description:
def delete_gauge(self, slug):
"""Removes all gauges with the given ``slug``.""" |
key = self._gauge_key(slug)
self.r.delete(key) # Remove the Gauge
self.r.srem(self._gauge_slugs_key, slug) |
<SYSTEM_TASK:>
Include a Donut Chart for the specified Gauge.
<END_TASK>
<USER_TASK:>
Description:
def gauge(slug, maximum=9000, size=200, coerce='float'):
"""Include a Donut Chart for the specified Gauge.
* ``slug`` -- the unique slug for the Gauge.
* ``maximum`` -- The maximum value for the gauge (default is 9000)
* ``size`` -- The size (in pixels) of the gauge (default is 200)
* ``coerce`` -- type to which gauge values should be coerced. The default
is float. Use ``{% gauge some_slug coerce='int' %}`` to coerce to integer
""" |
coerce_options = {'float': float, 'int': int, 'str': str}
coerce = coerce_options.get(coerce, float)
redis = get_r()
value = coerce(redis.get_gauge(slug))
if value < maximum and coerce == float:
diff = round(maximum - value, 2)
elif value < maximum:
diff = maximum - value
else:
diff = 0
return {
'slug': slug,
'current_value': value,
'max_value': maximum,
'size': size,
'diff': diff,
} |
<SYSTEM_TASK:>
Template Tag to display a metric's history.
<END_TASK>
<USER_TASK:>
Description:
def metric_history(slug, granularity="daily", since=None, to=None,
with_data_table=False):
"""Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
""" |
r = get_r()
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
if to and len(to) == 10: # yyyy-mm-dd
to = datetime.strptime(since, "%Y-%m-%d")
elif to and len(to) == 19: # yyyy-mm-dd HH:MM:ss
to = datetime.strptime(to, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
metric_history = r.get_metric_history(
slugs=slug,
since=since,
to=to,
granularity=granularity
)
return {
'since': since,
'to': to,
'slug': slug,
'granularity': granularity,
'metric_history': metric_history,
'with_data_table': with_data_table,
} |
<SYSTEM_TASK:>
Template Tag to display multiple metrics.
<END_TASK>
<USER_TASK:>
Description:
def aggregate_detail(slug_list, with_data_table=False):
"""Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table.
""" |
r = get_r()
metrics_data = []
granularities = r._granularities()
# XXX converting granularties into their key-name for metrics.
keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in granularities]
# Our metrics data is of the form:
#
# (slug, {time_period: value, ... }).
#
# Let's convert this to (slug, list_of_values) so that the list of
# values is in the same order as the granularties
for slug, data in r.get_metrics(slug_list):
values = [data[t] for t in keys]
metrics_data.append((slug, values))
return {
'chart_id': "metric-aggregate-{0}".format("-".join(slug_list)),
'slugs': slug_list,
'metrics': metrics_data,
'with_data_table': with_data_table,
'granularities': [g.title() for g in keys],
} |
<SYSTEM_TASK:>
Template Tag to display history for multiple metrics.
<END_TASK>
<USER_TASK:>
Description:
def aggregate_history(slugs, granularity="daily", since=None, with_data_table=False):
"""Template Tag to display history for multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``granularity`` -- the granularity: seconds, minutes, hourly,
daily, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``with_data_table`` -- if True, prints the raw data in a table.
""" |
r = get_r()
slugs = list(slugs)
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
history = r.get_metric_history_chart_data(
slugs=slugs,
since=since,
granularity=granularity
)
return {
'chart_id': "metric-aggregate-history-{0}".format("-".join(slugs)),
'slugs': slugs,
'since': since,
'granularity': granularity,
'metric_history': history,
'with_data_table': with_data_table,
} |
<SYSTEM_TASK:>
Create an EFILTER-callable version of function 'func'.
<END_TASK>
<USER_TASK:>
Description:
def user_func(func, arg_types=None, return_type=None):
"""Create an EFILTER-callable version of function 'func'.
As a security precaution, EFILTER will not execute Python callables
unless they implement the IApplicative protocol. There is a perfectly good
implementation of this protocol in the standard library and user functions
can inherit from it.
This will declare a subclass of the standard library TypedFunction and
return an instance of it that EFILTER will happily call.
Arguments:
func: A Python callable that will serve as the implementation.
arg_types (optional): A tuple of argument types. If the function takes
keyword arguments, they must still have a defined order.
return_type (optional): The type the function returns.
Returns:
An instance of a custom subclass of efilter.stdlib.core.TypedFunction.
Examples:
def my_callback(tag):
print("I got %r" % tag)
api.apply("if True then my_callback('Hello World!')",
vars={
"my_callback": api.user_func(my_callback)
})
# This should print "I got 'Hello World!'".
""" |
class UserFunction(std_core.TypedFunction):
name = func.__name__
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
@classmethod
def reflect_static_args(cls):
return arg_types
@classmethod
def reflect_static_return(cls):
return return_type
return UserFunction() |
<SYSTEM_TASK:>
Determine the type of the query's output without actually running it.
<END_TASK>
<USER_TASK:>
Description:
def infer(query, replacements=None, root_type=None,
libs=("stdcore", "stdmath")):
"""Determine the type of the query's output without actually running it.
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or as
an array (for positional interpolation).
root_type: The types of variables to be supplied to the query inference.
libs: What standard libraries should be taken into account for the
inference.
Returns:
The type of the query's output, if it can be determined. If undecidable,
returns efilter.protocol.AnyType.
NOTE: The inference returns the type of a row in the results, not of the
actual Python object returned by 'apply'. For example, if a query
returns multiple rows, each one of which is an integer, the type of the
output is considered to be int, not a collection of rows.
Examples:
infer("5 + 5") # -> INumber
infer("SELECT * FROM people WHERE age > 10") # -> AnyType
# If root_type implements the IStructured reflection API:
infer("SELECT * FROM people WHERE age > 10", root_type=...) # -> dict
""" |
# Always make the scope stack start with stdcore.
if root_type:
type_scope = scope.ScopeStack(std_core.MODULE, root_type)
else:
type_scope = scope.ScopeStack(std_core.MODULE)
stdcore_included = False
for lib in libs:
if lib == "stdcore":
stdcore_included = True
continue
module = std_core.LibraryModule.ALL_MODULES.get(lib)
if not module:
raise TypeError("No standard library module %r." % lib)
type_scope = scope.ScopeStack(module, type_scope)
if not stdcore_included:
raise TypeError("'stdcore' must always be included.")
query = q.Query(query, params=replacements)
return infer_type.infer_type(query, type_scope) |
<SYSTEM_TASK:>
Yield objects from 'data' that match the 'query'.
<END_TASK>
<USER_TASK:>
Description:
def search(query, data, replacements=None):
"""Yield objects from 'data' that match the 'query'.""" |
query = q.Query(query, params=replacements)
for entry in data:
if solve.solve(query, entry).value:
yield entry |
<SYSTEM_TASK:>
Look ahead, doesn't affect current_token and next_token.
<END_TASK>
<USER_TASK:>
Description:
def peek(self, steps=1):
"""Look ahead, doesn't affect current_token and next_token.""" |
try:
tokens = iter(self)
for _ in six.moves.range(steps):
next(tokens)
return next(tokens)
except StopIteration:
return None |
<SYSTEM_TASK:>
Returns the next logical token, advancing the tokenizer.
<END_TASK>
<USER_TASK:>
Description:
def next_token(self):
"""Returns the next logical token, advancing the tokenizer.""" |
if self.lookahead:
self.current_token = self.lookahead.popleft()
return self.current_token
self.current_token = self._parse_next_token()
return self.current_token |
<SYSTEM_TASK:>
Will parse patterns until it gets to the next token or EOF.
<END_TASK>
<USER_TASK:>
Description:
def _parse_next_token(self):
"""Will parse patterns until it gets to the next token or EOF.""" |
while self._position < self.limit:
token = self._next_pattern()
if token:
return token
return None |
<SYSTEM_TASK:>
Parses the next pattern by matching each in turn.
<END_TASK>
<USER_TASK:>
Description:
def _next_pattern(self):
"""Parses the next pattern by matching each in turn.""" |
current_state = self.state_stack[-1]
position = self._position
for pattern in self.patterns:
if current_state not in pattern.states:
continue
m = pattern.regex.match(self.source, position)
if not m:
continue
position = m.end()
token = None
if pattern.next_state:
self.state_stack.append(pattern.next_state)
if pattern.action:
callback = getattr(self, pattern.action, None)
if callback is None:
raise RuntimeError(
"No method defined for pattern action %s!" %
pattern.action)
if "token" in m.groups():
value = m.group("token")
else:
value = m.group(0)
token = callback(string=value, match=m,
pattern=pattern)
self._position = position
return token
self._error("Don't know how to match next. Did you forget quotes?",
start=self._position, end=self._position + 1) |
<SYSTEM_TASK:>
Raise a nice error, with the token highlighted.
<END_TASK>
<USER_TASK:>
Description:
def _error(self, message, start, end=None):
"""Raise a nice error, with the token highlighted.""" |
raise errors.EfilterParseError(
source=self.source, start=start, end=end, message=message) |
<SYSTEM_TASK:>
Emits a token using the current pattern match and pattern label.
<END_TASK>
<USER_TASK:>
Description:
def emit(self, string, match, pattern, **_):
"""Emits a token using the current pattern match and pattern label.""" |
return grammar.Token(name=pattern.name, value=string,
start=match.start(), end=match.end()) |
<SYSTEM_TASK:>
Get version string by parsing PKG-INFO.
<END_TASK>
<USER_TASK:>
Description:
def get_pkg_version():
"""Get version string by parsing PKG-INFO.""" |
try:
with open("PKG-INFO", "r") as fp:
rgx = re.compile(r"Version: (\d+)")
for line in fp.readlines():
match = rgx.match(line)
if match:
return match.group(1)
except IOError:
return None |
<SYSTEM_TASK:>
Generates a version string.
<END_TASK>
<USER_TASK:>
Description:
def get_version(dev_version=False):
"""Generates a version string.
Arguments:
dev_version: Generate a verbose development version from git commits.
Examples:
1.1
1.1.dev43 # If 'dev_version' was passed.
""" |
if dev_version:
version = git_dev_version()
if not version:
raise RuntimeError("Could not generate dev version from git.")
return version
return "1!%d.%d" % (MAJOR, MINOR) |
<SYSTEM_TASK:>
Yields all the values from 'generator_func' and type-checks.
<END_TASK>
<USER_TASK:>
Description:
def getvalues(self):
"""Yields all the values from 'generator_func' and type-checks.
Yields:
Whatever 'generator_func' yields.
Raises:
TypeError: if subsequent values are of a different type than first
value.
ValueError: if subsequent iteration returns a different number of
values than the first iteration over the generator. (This would
mean 'generator_func' is not stable.)
""" |
idx = 0
generator = self._generator_func()
first_value = next(generator)
self._value_type = type(first_value)
yield first_value
for idx, value in enumerate(generator):
if not isinstance(value, self._value_type):
raise TypeError(
"All values of a repeated var must be of the same type."
" First argument was of type %r, but argument %r is of"
" type %r." %
(self._value_type, value, repeated.value_type(value)))
self._watermark = max(self._watermark, idx + 1)
yield value
# Iteration stopped - check if we're at the previous watermark and raise
# if not.
if idx + 1 < self._watermark:
raise ValueError(
"LazyRepetition %r was previously able to iterate its"
" generator up to idx %d, but this time iteration stopped after"
" idx %d! Generator function %r is not stable." %
(self, self._watermark, idx + 1, self._generator_func))
# Watermark is higher than previous count! Generator function returned
# more values this time than last time.
if self._count is not None and self._watermark >= self._count:
raise ValueError(
"LazyRepetition %r previously iterated only up to idx %d but"
" was now able to reach idx %d! Generator function %r is not"
" stable." %
(self, self._count - 1, idx + 1, self._generator_func))
# We've finished iteration - cache count. After this the count will be
# watermark + 1 forever.
self._count = self._watermark + 1 |
<SYSTEM_TASK:>
Print a detailed audit of all calls to this function.
<END_TASK>
<USER_TASK:>
Description:
def call_audit(func):
"""Print a detailed audit of all calls to this function.""" |
def audited_func(*args, **kwargs):
import traceback
stack = traceback.extract_stack()
r = func(*args, **kwargs)
func_name = func.__name__
print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % (
len(stack),
" -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]),
func_name,
args,
kwargs,
r))
return r
return audited_func |
<SYSTEM_TASK:>
Prefer one type over another type, all else being equivalent.
<END_TASK>
<USER_TASK:>
Description:
def prefer_type(self, prefer, over):
"""Prefer one type over another type, all else being equivalent.
With abstract base classes (Python's abc module) it is possible for
a type to appear to be a subclass of another type without the supertype
appearing in the subtype's MRO. As such, the supertype has no order
with respect to other supertypes, and this may lead to amguity if two
implementations are provided for unrelated abstract types.
In such cases, it is possible to disambiguate by explictly telling the
function to prefer one type over the other.
Arguments:
prefer: Preferred type (class).
over: The type we don't like (class).
Raises:
ValueError: In case of logical conflicts.
""" |
self._write_lock.acquire()
try:
if self._preferred(preferred=over, over=prefer):
raise ValueError(
"Type %r is already preferred over %r." % (over, prefer))
prefs = self._prefer_table.setdefault(prefer, set())
prefs.add(over)
finally:
self._write_lock.release() |
<SYSTEM_TASK:>
Finds the best implementation of this function given a type.
<END_TASK>
<USER_TASK:>
Description:
def _find_and_cache_best_function(self, dispatch_type):
"""Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
""" |
result = self._dispatch_table.get(dispatch_type)
if result:
return result
# The outer try ensures the lock is always released.
with self._write_lock:
try:
dispatch_mro = dispatch_type.mro()
except TypeError:
# Not every type has an MRO.
dispatch_mro = ()
best_match = None
result_type = None
for candidate_type, candidate_func in self.implementations:
if not issubclass(dispatch_type, candidate_type):
# Skip implementations that are obviously unrelated.
continue
try:
# The candidate implementation may be for a type that's
# actually in the MRO, or it may be for an abstract type.
match = dispatch_mro.index(candidate_type)
except ValueError:
# This means we have an implementation for an abstract
# type, which ranks below all concrete types.
match = None
if best_match is None:
if result and match is None:
# Already have a result, and no order of preference.
# This is probably because the type is a member of two
# abstract types and we have separate implementations
# for those two abstract types.
if self._preferred(candidate_type, over=result_type):
result = candidate_func
result_type = candidate_type
elif self._preferred(result_type, over=candidate_type):
# No need to update anything.
pass
else:
raise TypeError(
"Two candidate implementations found for "
"multimethod function %s (dispatch type %s) "
"and neither is preferred." %
(self.func_name, dispatch_type))
else:
result = candidate_func
result_type = candidate_type
best_match = match
if (match or 0) < (best_match or 0):
result = candidate_func
result_type = candidate_type
best_match = match
self._dispatch_table[dispatch_type] = result
return result |
<SYSTEM_TASK:>
Return a decorator that will register the implementation.
<END_TASK>
<USER_TASK:>
Description:
def implementation(self, for_type=None, for_types=None):
"""Return a decorator that will register the implementation.
Example:
@multimethod
def add(x, y):
pass
@add.implementation(for_type=int)
def add(x, y):
return x + y
@add.implementation(for_type=SomeType)
def add(x, y):
return int(x) + int(y)
""" |
for_types = self.__get_types(for_type, for_types)
def _decorator(implementation):
self.implement(implementation, for_types=for_types)
return self
return _decorator |
<SYSTEM_TASK:>
Registers an implementing function for for_type.
<END_TASK>
<USER_TASK:>
Description:
def implement(self, implementation, for_type=None, for_types=None):
"""Registers an implementing function for for_type.
Arguments:
implementation: Callable implementation for this type.
for_type: The type this implementation applies to.
for_types: Same as for_type, but takes a tuple of types.
for_type and for_types cannot both be passed (for obvious reasons.)
Raises:
ValueError
""" |
unbound_implementation = self.__get_unbound_function(implementation)
for_types = self.__get_types(for_type, for_types)
for t in for_types:
self._write_lock.acquire()
try:
self.implementations.append((t, unbound_implementation))
finally:
self._write_lock.release() |
<SYSTEM_TASK:>
Includes the Gauge slugs and data in the context.
<END_TASK>
<USER_TASK:>
Description:
def get_context_data(self, **kwargs):
"""Includes the Gauge slugs and data in the context.""" |
data = super(GaugesView, self).get_context_data(**kwargs)
data.update({'gauges': get_r().gauge_slugs()})
return data |
<SYSTEM_TASK:>
Pull the metrics from the submitted form, and store them as a
<END_TASK>
<USER_TASK:>
Description:
def form_valid(self, form):
"""Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
""" |
self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']]
return super(AggregateFormView, self).form_valid(form) |
<SYSTEM_TASK:>
See if this view was called with a specified category.
<END_TASK>
<USER_TASK:>
Description:
def get(self, *args, **kwargs):
"""See if this view was called with a specified category.""" |
self.initial = {"category_name": kwargs.get('category_name', None)}
return super(CategoryFormView, self).get(*args, **kwargs) |
<SYSTEM_TASK:>
Rerun sets the state of the Pipeline to scheduling so that the Pipeline
<END_TASK>
<USER_TASK:>
Description:
def rerun(self):
"""
Rerun sets the state of the Pipeline to scheduling so that the Pipeline
can be checked for new stages
""" |
self._state = states.SCHEDULING
self._completed_flag = threading.Event()
print 'Pipeline %s in %s state'%(self._uid, self._state) |
<SYSTEM_TASK:>
Create a Pipeline from a dictionary. The change is in inplace.
<END_TASK>
<USER_TASK:>
Description:
def from_dict(self, d):
"""
Create a Pipeline from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
""" |
if 'uid' in d:
if d['uid']:
self._uid = d['uid']
if 'name' in d:
if d['name']:
self._name = d['name']
if 'state' in d:
if isinstance(d['state'], str) or isinstance(d['state'], unicode):
if d['state'] in states._pipeline_state_values.keys():
self._state = d['state']
else:
raise ValueError(obj=self._uid,
attribute='state',
expected_value=states._pipeline_state_values.keys(),
actual_value=d['state'])
else:
raise TypeError(entity='state', expected_type=str,
actual_type=type(d['state']))
else:
self._state = states.INITIAL
if 'state_history' in d:
if isinstance(d['state_history'], list):
self._state_history = d['state_history']
else:
raise TypeError(entity='state_history', expected_type=list, actual_type=type(
d['state_history']))
if 'completed' in d:
if isinstance(d['completed'], bool):
if d['completed']:
self._completed_flag.set()
else:
raise TypeError(entity='completed', expected_type=bool,
actual_type=type(d['completed'])) |
<SYSTEM_TASK:>
Decorator for retrying method calls, based on instance parameters.
<END_TASK>
<USER_TASK:>
Description:
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters.""" |
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
cfg = instance._retry_config
remaining_tries = cfg.retry_attempts
current_wait = cfg.retry_wait
retry_backoff = cfg.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(instance, *args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated |
<SYSTEM_TASK:>
Generate an isocurve from vertex data in a surface mesh.
<END_TASK>
<USER_TASK:>
Description:
def iso_mesh_line(vertices, tris, vertex_data, levels):
"""Generate an isocurve from vertex data in a surface mesh.
Parameters
----------
vertices : ndarray, shape (Nv, 3)
Vertex coordinates.
tris : ndarray, shape (Nf, 3)
Indices of triangular element into the vertices array.
vertex_data : ndarray, shape (Nv,)
data at vertex.
levels : ndarray, shape (Nl,)
Levels at which to generate an isocurve
Returns
-------
lines : ndarray, shape (Nvout, 3)
Vertex coordinates for lines points
connects : ndarray, shape (Ne, 2)
Indices of line element into the vertex array.
vertex_level: ndarray, shape (Nvout,)
level for vertex in lines
Notes
-----
Uses a marching squares algorithm to generate the isolines.
""" |
lines = None
connects = None
vertex_level = None
level_index = None
if not all([isinstance(x, np.ndarray) for x in (vertices, tris,
vertex_data, levels)]):
raise ValueError('all inputs must be numpy arrays')
if vertices.shape[1] <= 3:
verts = vertices
elif vertices.shape[1] == 4:
verts = vertices[:, :-1]
else:
verts = None
if (verts is not None and tris.shape[1] == 3 and
vertex_data.shape[0] == verts.shape[0]):
edges = np.vstack((tris.reshape((-1)),
np.roll(tris, -1, axis=1).reshape((-1)))).T
edge_datas = vertex_data[edges]
edge_coors = verts[edges].reshape(tris.shape[0]*3, 2, 3)
for lev in levels:
# index for select edges with vertices have only False - True
# or True - False at extremity
index = (edge_datas >= lev)
index = index[:, 0] ^ index[:, 1] # xor calculation
# Selectect edge
edge_datas_Ok = edge_datas[index, :]
xyz = edge_coors[index]
# Linear interpolation
ratio = np.array([(lev - edge_datas_Ok[:, 0]) /
(edge_datas_Ok[:, 1] - edge_datas_Ok[:, 0])])
point = xyz[:, 0, :] + ratio.T * (xyz[:, 1, :] - xyz[:, 0, :])
nbr = point.shape[0]//2
if connects is not None:
connect = np.arange(0, nbr*2).reshape((nbr, 2)) + \
len(lines)
connects = np.append(connects, connect, axis=0)
lines = np.append(lines, point, axis=0)
vertex_level = np.append(vertex_level,
np.zeros(len(point)) +
lev)
level_index = np.append(level_index, np.array(len(point)))
else:
lines = point
connects = np.arange(0, nbr*2).reshape((nbr, 2))
vertex_level = np.zeros(len(point)) + lev
level_index = np.array(len(point))
vertex_level = vertex_level.reshape((vertex_level.size, 1))
return lines, connects, vertex_level, level_index |
<SYSTEM_TASK:>
Set the color
<END_TASK>
<USER_TASK:>
Description:
def set_color(self, color):
"""Set the color
Parameters
----------
color : instance of Color
The color to use.
""" |
if color is not None:
self._color_lev = color
self._need_color_update = True
self.update() |
<SYSTEM_TASK:>
compute LineVisual color from level index and corresponding level
<END_TASK>
<USER_TASK:>
Description:
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding level
color
""" |
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) + colors[i])
self._cl = np.vstack(level_color) |
<SYSTEM_TASK:>
Remove the layer artist for good
<END_TASK>
<USER_TASK:>
Description:
def remove(self):
"""
Remove the layer artist for good
""" |
self._multivol.deallocate(self.id)
ARRAY_CACHE.pop(self.id, None)
PIXEL_CACHE.pop(self.id, None) |
<SYSTEM_TASK:>
Inject functions and constants from PyOpenGL but leave out the
<END_TASK>
<USER_TASK:>
Description:
def _inject():
""" Inject functions and constants from PyOpenGL but leave out the
names that are deprecated or that we provide in our API.
""" |
# Get namespaces
NS = globals()
GLNS = _GL.__dict__
# Get names that we use in our API
used_names = []
used_names.extend([names[0] for names in _pyopengl2._functions_to_import])
used_names.extend([name for name in _pyopengl2._used_functions])
NS['_used_names'] = used_names
#
used_constants = set(_constants.__dict__)
# Count
injected_constants = 0
injected_functions = 0
for name in dir(_GL):
if name.startswith('GL_'):
# todo: find list of deprecated constants
if name not in used_constants:
NS[name] = GLNS[name]
injected_constants += 1
elif name.startswith('gl'):
# Functions
if (name + ',') in _deprecated_functions:
pass # Function is deprecated
elif name in used_names:
pass # Function is in our GL ES 2.0 API
else:
NS[name] = GLNS[name]
injected_functions += 1 |
<SYSTEM_TASK:>
Alternative to `imp.find_module` that can also search in subpackages.
<END_TASK>
<USER_TASK:>
Description:
def _find_module(name, path=None):
"""
Alternative to `imp.find_module` that can also search in subpackages.
""" |
parts = name.split('.')
for part in parts:
if path is not None:
path = [path]
fh, path, descr = imp.find_module(part, path)
if fh is not None and part != parts[-1]:
fh.close()
return fh, path, descr |
<SYSTEM_TASK:>
Triangulate a set of vertices
<END_TASK>
<USER_TASK:>
Description:
def triangulate(vertices):
"""Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
""" |
n = len(vertices)
vertices = np.asarray(vertices)
zmean = vertices[:, 2].mean()
vertices_2d = vertices[:, :2]
segments = np.repeat(np.arange(n + 1), 2)[1:-1]
segments[-2:] = n - 1, 0
if _TRIANGLE_AVAILABLE:
vertices_2d, triangles = _triangulate_cpp(vertices_2d, segments)
else:
vertices_2d, triangles = _triangulate_python(vertices_2d, segments)
vertices = np.empty((len(vertices_2d), 3))
vertices[:, :2] = vertices_2d
vertices[:, 2] = zmean
return vertices, triangles |
<SYSTEM_TASK:>
Given a triangle, return the edge that is opposite point i.
<END_TASK>
<USER_TASK:>
Description:
def _edge_opposite_point(self, tri, i):
""" Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
""" |
ind = tri.index(i)
return (tri[(ind+1) % 3], tri[(ind+2) % 3]) |
<SYSTEM_TASK:>
Return a dictionary containing, for each edge in self.edges, a list
<END_TASK>
<USER_TASK:>
Description:
def _find_edge_intersections(self):
"""
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
""" |
edges = self.pts[self.edges]
cuts = {} # { edge: [(intercept, point), ...], ... }
for i in range(edges.shape[0]-1):
# intersection of edge i onto all others
int1 = self._intersect_edge_arrays(edges[i:i+1], edges[i+1:])
# intersection of all edges onto edge i
int2 = self._intersect_edge_arrays(edges[i+1:], edges[i:i+1])
# select for pairs that intersect
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
mask1 = (int1 >= 0) & (int1 <= 1)
mask2 = (int2 >= 0) & (int2 <= 1)
mask3 = mask1 & mask2 # all intersections
finally:
np.seterr(**err)
# compute points of intersection
inds = np.argwhere(mask3)[:, 0]
if len(inds) == 0:
continue
h = int2[inds][:, np.newaxis]
pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) +
edges[i, 1][np.newaxis, :] * h)
# record for all edges the location of cut points
edge_cuts = cuts.setdefault(i, [])
for j, ind in enumerate(inds):
if 0 < int2[ind] < 1:
edge_cuts.append((int2[ind], pts[j]))
if 0 < int1[ind] < 1:
other_cuts = cuts.setdefault(ind+i+1, [])
other_cuts.append((int1[ind], pts[j]))
# sort all cut lists by intercept, remove duplicates
for k, v in cuts.items():
v.sort(key=lambda x: x[0])
for i in range(len(v)-2, -1, -1):
if v[i][0] == v[i+1][0]:
v.pop(i+1)
return cuts |
<SYSTEM_TASK:>
Entry point of the IPython extension
<END_TASK>
<USER_TASK:>
Description:
def load_ipython_extension(ipython):
""" Entry point of the IPython extension
Parameters
----------
IPython : IPython interpreter
An instance of the IPython interpreter that is handed
over to the extension
""" |
import IPython
# don't continue if IPython version is < 3.0
ipy_version = LooseVersion(IPython.__version__)
if ipy_version < LooseVersion("3.0.0"):
ipython.write_err("Your IPython version is older than "
"version 3.0.0, the minimum for Vispy's"
"IPython backend. Please upgrade your IPython"
"version.")
return
_load_webgl_backend(ipython) |
<SYSTEM_TASK:>
Non-uniform scaling along the x, y, and z axes
<END_TASK>
<USER_TASK:>
Description:
def scale(s, dtype=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
""" |
assert len(s) == 3
return np.array(np.diag(np.concatenate([s, (1.,)])), dtype) |
<SYSTEM_TASK:>
The 3x3 rotation matrix for rotation about a vector.
<END_TASK>
<USER_TASK:>
Description:
def rotate(angle, axis, dtype=None):
"""The 3x3 rotation matrix for rotation about a vector.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : ndarray
The x, y, z coordinates of the axis direction vector.
""" |
angle = np.radians(angle)
assert len(axis) == 3
x, y, z = axis / np.linalg.norm(axis)
c, s = math.cos(angle), math.sin(angle)
cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z
M = np.array([[cx * x + c, cy * x - z * s, cz * x + y * s, .0],
[cx * y + z * s, cy * y + c, cz * y - x * s, 0.],
[cx * z - y * s, cy * z + x * s, cz * z + c, 0.],
[0., 0., 0., 1.]], dtype).T
return M |
<SYSTEM_TASK:>
Create perspective projection matrix
<END_TASK>
<USER_TASK:>
Description:
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
""" |
assert(znear != zfar)
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar) |
<SYSTEM_TASK:>
Find a 3D transformation matrix that maps points1 onto points2.
<END_TASK>
<USER_TASK:>
Description:
def affine_map(points1, points2):
""" Find a 3D transformation matrix that maps points1 onto points2.
Arguments are specified as arrays of four 3D coordinates, shape (4, 3).
""" |
A = np.ones((4, 4))
A[:, :3] = points1
B = np.ones((4, 4))
B[:, :3] = points2
# solve 3 sets of linear equations to determine
# transformation matrix elements
matrix = np.eye(4)
for i in range(3):
# solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = np.linalg.solve(A, B[:, i])
return matrix |
<SYSTEM_TASK:>
Add a final message; flush the message list if no parent profiler.
<END_TASK>
<USER_TASK:>
Description:
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
""" |
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush() |
<SYSTEM_TASK:>
Transform vispy specific command line args to vispy config.
<END_TASK>
<USER_TASK:>
Description:
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
""" |
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o) |
<SYSTEM_TASK:>
Helper to get the default directory for storing vispy data
<END_TASK>
<USER_TASK:>
Description:
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data""" |
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path |
<SYSTEM_TASK:>
Save configuration keys to vispy config file
<END_TASK>
<USER_TASK:>
Description:
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
""" |
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0) |
<SYSTEM_TASK:>
Set vispy data download directory
<END_TASK>
<USER_TASK:>
Description:
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
""" |
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory) |
<SYSTEM_TASK:>
Start profiling and register callback to print stats when the program
<END_TASK>
<USER_TASK:>
Description:
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
""" |
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit) |
<SYSTEM_TASK:>
Get relevant system and debugging information
<END_TASK>
<USER_TASK:>
Description:
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
""" |
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out |
<SYSTEM_TASK:>
Compact vertices and indices within given tolerance
<END_TASK>
<USER_TASK:>
Description:
def compact(vertices, indices, tolerance=1e-3):
""" Compact vertices and indices within given tolerance """ |
# Transform vertices into a structured array for np.unique to work
n = len(vertices)
V = np.zeros(n, dtype=[("pos", np.float32, 3)])
V["pos"][:, 0] = vertices[:, 0]
V["pos"][:, 1] = vertices[:, 1]
V["pos"][:, 2] = vertices[:, 2]
epsilon = 1e-3
decimals = int(np.log(epsilon)/np.log(1/10.))
# Round all vertices within given decimals
V_ = np.zeros_like(V)
X = V["pos"][:, 0].round(decimals=decimals)
X[np.where(abs(X) < epsilon)] = 0
V_["pos"][:, 0] = X
Y = V["pos"][:, 1].round(decimals=decimals)
Y[np.where(abs(Y) < epsilon)] = 0
V_["pos"][:, 1] = Y
Z = V["pos"][:, 2].round(decimals=decimals)
Z[np.where(abs(Z) < epsilon)] = 0
V_["pos"][:, 2] = Z
# Find the unique vertices AND the mapping
U, RI = np.unique(V_, return_inverse=True)
# Translate indices from original vertices into the reduced set (U)
indices = indices.ravel()
I_ = indices.copy().ravel()
for i in range(len(indices)):
I_[i] = RI[indices[i]]
I_ = I_.reshape(len(indices)/3, 3)
# Return reduced vertices set, transalted indices and mapping that allows
# to go from U to V
return U.view(np.float32).reshape(len(U), 3), I_, RI |
<SYSTEM_TASK:>
Compute normals over a triangulated surface
<END_TASK>
<USER_TASK:>
Description:
def normals(vertices, indices):
"""
Compute normals over a triangulated surface
Parameters
----------
vertices : ndarray (n,3)
triangles vertices
indices : ndarray (p,3)
triangles indices
""" |
# Compact similar vertices
vertices, indices, mapping = compact(vertices, indices)
T = vertices[indices]
N = np.cross(T[:, 1] - T[:, 0], T[:, 2]-T[:, 0])
L = np.sqrt(np.sum(N * N, axis=1))
L[L == 0] = 1.0 # prevent divide-by-zero
N /= L[:, np.newaxis]
normals = np.zeros_like(vertices)
normals[indices[:, 0]] += N
normals[indices[:, 1]] += N
normals[indices[:, 2]] += N
L = np.sqrt(np.sum(normals*normals, axis=1))
L[L == 0] = 1.0
normals /= L[:, np.newaxis]
return normals[mapping] |
<SYSTEM_TASK:>
Create the native widget if not already done so. If the widget
<END_TASK>
<USER_TASK:>
Description:
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
""" |
if self._backend is not None:
return
# Make sure that the app is active
assert self._app.native
# Instantiate the backend with the right class
self._app.backend_module.CanvasBackend(self, **self._backend_kwargs)
# self._backend = set by BaseCanvasBackend
self._backend_kwargs = None # Clean up
# Connect to draw event (append to the end)
# Process GLIR commands at each paint event
self.events.draw.connect(self.context.flush_commands, position='last')
if self._autoswap:
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last') |
<SYSTEM_TASK:>
Connect a function to an event
<END_TASK>
<USER_TASK:>
Description:
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
""" |
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun) |
<SYSTEM_TASK:>
Show or hide the canvas
<END_TASK>
<USER_TASK:>
Description:
def show(self, visible=True, run=False):
"""Show or hide the canvas
Parameters
----------
visible : bool
Make the canvas visible.
run : bool
Run the backend event loop.
""" |
self._backend._vispy_set_visible(visible)
if run:
self.app.run() |
<SYSTEM_TASK:>
Close the canvas
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the canvas
Notes
-----
This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
""" |
if self._backend is not None and not self._closed:
self._closed = True
self.events.close()
self._backend._vispy_close()
forget_canvas(self) |
<SYSTEM_TASK:>
Update the fps after every window
<END_TASK>
<USER_TASK:>
Description:
def _update_fps(self, event):
"""Update the fps after every window""" |
self._frame_count += 1
diff = time() - self._basetime
if (diff > self._fps_window):
self._fps = self._frame_count / diff
self._basetime = time()
self._frame_count = 0
self._fps_callback(self.fps) |
<SYSTEM_TASK:>
Measure the current FPS
<END_TASK>
<USER_TASK:>
Description:
def measure_fps(self, window=1, callback='%1.1f FPS'):
"""Measure the current FPS
Sets the update window, connects the draw event to update_fps
and sets the callback function.
Parameters
----------
window : float
The time-window (in seconds) to calculate FPS. Default 1.0.
callback : function | str
The function to call with the float FPS value, or the string
to be formatted with the fps value and then printed. The
default is ``'%1.1f FPS'``. If callback evaluates to False, the
FPS measurement is stopped.
""" |
# Connect update_fps function to draw
self.events.draw.disconnect(self._update_fps)
if callback:
if isinstance(callback, string_types):
callback_str = callback # because callback gets overwritten
def callback(x):
print(callback_str % x)
self._fps_window = window
self.events.draw.connect(self._update_fps)
self._fps_callback = callback
else:
self._fps_callback = None |
<SYSTEM_TASK:>
Render the canvas to an offscreen buffer and return the image
<END_TASK>
<USER_TASK:>
Description:
def render(self):
""" Render the canvas to an offscreen buffer and return the image
array.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
""" |
self.set_current()
size = self.physical_size
fbo = FrameBuffer(color=RenderBuffer(size[::-1]),
depth=RenderBuffer(size[::-1]))
try:
fbo.activate()
self.events.draw()
return fbo.read()
finally:
fbo.deactivate() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.